title
stringlengths 1
185
| diff
stringlengths 0
32.2M
| body
stringlengths 0
123k
⌀ | url
stringlengths 57
58
| created_at
stringlengths 20
20
| closed_at
stringlengths 20
20
| merged_at
stringlengths 20
20
⌀ | updated_at
stringlengths 20
20
|
---|---|---|---|---|---|---|---|
Copy on write using weakrefs | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 15a840ff3c7ba..36ee6bb8ec4bf 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -179,7 +179,7 @@ class DataFrame(NDFrame):
np.arange(n) if no column labels are provided
dtype : dtype, default None
Data type to force, otherwise infer
- copy : boolean, default False
+ copy : boolean, default True
Copy data from inputs. Only affects DataFrame / 2d ndarray input
Examples
@@ -1948,8 +1948,10 @@ def __getitem__(self, key):
# shortcut if we are an actual column
is_mi_columns = isinstance(self.columns, MultiIndex)
try:
- if key in self.columns and not is_mi_columns:
- return self._getitem_column(key)
+ if key in self.columns:
+ result = self._getitem_column(key)
+ result._is_column_view = True
+ return result
except:
pass
@@ -2299,7 +2301,6 @@ def __setitem__(self, key, value):
self._set_item(key, value)
def _setitem_slice(self, key, value):
- self._check_setitem_copy()
self.ix._setitem_with_indexer(key, value)
def _setitem_array(self, key, value):
@@ -2310,7 +2311,6 @@ def _setitem_array(self, key, value):
(len(key), len(self.index)))
key = check_bool_indexer(self.index, key)
indexer = key.nonzero()[0]
- self._check_setitem_copy()
self.ix._setitem_with_indexer(indexer, value)
else:
if isinstance(value, DataFrame):
@@ -2320,7 +2320,6 @@ def _setitem_array(self, key, value):
self[k1] = value[k2]
else:
indexer = self.ix._convert_to_indexer(key, axis=1)
- self._check_setitem_copy()
self.ix._setitem_with_indexer((slice(None), indexer), value)
def _setitem_frame(self, key, value):
@@ -2330,7 +2329,6 @@ def _setitem_frame(self, key, value):
raise TypeError('Must pass DataFrame with boolean values only')
self._check_inplace_setting(value)
- self._check_setitem_copy()
self.where(-key, value, inplace=True)
def _ensure_valid_index(self, value):
@@ -2366,11 +2364,6 @@ def _set_item(self, key, value):
value = self._sanitize_column(key, value)
NDFrame._set_item(self, key, value)
- # check if we are modifying a copy
- # try to set first as we want an invalid
- # value exeption to occur first
- if len(self):
- self._check_setitem_copy()
def insert(self, loc, column, value, allow_duplicates=False):
"""
@@ -4322,12 +4315,12 @@ def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
@Appender(_merge_doc, indents=2)
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
- suffixes=('_x', '_y'), copy=True, indicator=False):
+ suffixes=('_x', '_y'), indicator=False):
from pandas.tools.merge import merge
return merge(self, right, how=how, on=on,
left_on=left_on, right_on=right_on,
left_index=left_index, right_index=right_index, sort=sort,
- suffixes=suffixes, copy=copy, indicator=indicator)
+ suffixes=suffixes, indicator=indicator)
def round(self, decimals=0, out=None):
"""
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3df81481f1e84..6f9f8e0d3af14 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -84,14 +84,18 @@ class NDFrame(PandasObject):
_internal_names = ['_data', '_cacher', '_item_cache', '_cache',
'is_copy', '_subtyp', '_index',
'_default_kind', '_default_fill_value', '_metadata',
- '__array_struct__', '__array_interface__']
+ '__array_struct__', '__array_interface__', '_children',
+ '_is_column_view', '_original_parent']
_internal_names_set = set(_internal_names)
_accessors = frozenset([])
_metadata = []
is_copy = None
-
+ _is_column_view = None
+ _original_parent = None
+ _children = None
+
def __init__(self, data, axes=None, copy=False, dtype=None,
- fastpath=False):
+ fastpath=False, ):
if not fastpath:
if dtype is not None:
@@ -106,6 +110,10 @@ def __init__(self, data, axes=None, copy=False, dtype=None,
object.__setattr__(self, 'is_copy', None)
object.__setattr__(self, '_data', data)
object.__setattr__(self, '_item_cache', {})
+ object.__setattr__(self, '_children', weakref.WeakValueDictionary())
+ object.__setattr__(self, '_is_column_view', False)
+ object.__setattr__(self, '_original_parent', weakref.WeakValueDictionary())
+
def _validate_dtype(self, dtype):
""" validate the passed dtype """
@@ -470,7 +478,8 @@ def transpose(self, *args, **kwargs):
raise TypeError('transpose() got an unexpected keyword '
'argument "{0}"'.format(list(kwargs.keys())[0]))
- return self._constructor(new_values, **new_axes).__finalize__(self)
+ result = self._constructor(new_values, **new_axes).__finalize__(self)
+ return result.copy()
def swapaxes(self, axis1, axis2, copy=True):
"""
@@ -1075,13 +1084,16 @@ def get(self, key, default=None):
-------
value : type of items contained in object
"""
+
try:
return self[key]
except (KeyError, ValueError, IndexError):
return default
def __getitem__(self, item):
- return self._get_item_cache(item)
+ result = self._get_item_cache(item)
+
+ return result
def _get_item_cache(self, item):
""" return the cached item, item represents a label indexer """
@@ -1175,9 +1187,6 @@ def _maybe_update_cacher(self, clear=False, verify_is_copy=True):
except:
pass
- if verify_is_copy:
- self._check_setitem_copy(stacklevel=5, t='referant')
-
if clear:
self._clear_item_cache()
@@ -1202,9 +1211,20 @@ def _slice(self, slobj, axis=0, kind=None):
# but only in a single-dtyped view slicable case
is_copy = axis!=0 or result._is_view
result._set_is_copy(self, copy=is_copy)
+
+ self._add_to_children(result)
+
return result
def _set_item(self, key, value):
+
+ if hasattr(self, 'columns'):
+ if key in self.columns:
+ # If children are views, reset to copies before setting.
+ self._execute_copy_on_write()
+ else:
+ self._execute_copy_on_write()
+
self._data.set(key, value)
self._clear_item_cache()
@@ -1217,104 +1237,22 @@ def _set_is_copy(self, ref=None, copy=True):
else:
self.is_copy = None
- def _check_is_chained_assignment_possible(self):
- """
- check if we are a view, have a cacher, and are of mixed type
- if so, then force a setitem_copy check
-
- should be called just near setting a value
-
- will return a boolean if it we are a view and are cached, but a single-dtype
- meaning that the cacher should be updated following setting
- """
- if self._is_view and self._is_cached:
- ref = self._get_cacher()
- if ref is not None and ref._is_mixed_type:
- self._check_setitem_copy(stacklevel=4, t='referant', force=True)
- return True
- elif self.is_copy:
- self._check_setitem_copy(stacklevel=4, t='referant')
- return False
-
- def _check_setitem_copy(self, stacklevel=4, t='setting', force=False):
- """
-
- Parameters
- ----------
- stacklevel : integer, default 4
- the level to show of the stack when the error is output
- t : string, the type of setting error
- force : boolean, default False
- if True, then force showing an error
-
- validate if we are doing a settitem on a chained copy.
-
- If you call this function, be sure to set the stacklevel such that the
- user will see the error *at the level of setting*
-
- It is technically possible to figure out that we are setting on
- a copy even WITH a multi-dtyped pandas object. In other words, some blocks
- may be views while other are not. Currently _is_view will ALWAYS return False
- for multi-blocks to avoid having to handle this case.
-
- df = DataFrame(np.arange(0,9), columns=['count'])
- df['group'] = 'b'
-
- # this technically need not raise SettingWithCopy if both are view (which is not
- # generally guaranteed but is usually True
- # however, this is in general not a good practice and we recommend using .loc
- df.iloc[0:5]['group'] = 'a'
-
- """
-
- if force or self.is_copy:
-
- value = config.get_option('mode.chained_assignment')
- if value is None:
- return
-
- # see if the copy is not actually refererd; if so, then disolve
- # the copy weakref
- try:
- gc.collect(2)
- if not gc.get_referents(self.is_copy()):
- self.is_copy = None
- return
- except:
- pass
-
- # we might be a false positive
- try:
- if self.is_copy().shape == self.shape:
- self.is_copy = None
- return
- except:
- pass
-
- # a custom message
- if isinstance(self.is_copy, string_types):
- t = self.is_copy
-
- elif t == 'referant':
- t = ("\n"
- "A value is trying to be set on a copy of a slice from a "
- "DataFrame\n\n"
- "See the caveats in the documentation: "
- "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy")
-
- else:
- t = ("\n"
- "A value is trying to be set on a copy of a slice from a "
- "DataFrame.\n"
- "Try using .loc[row_indexer,col_indexer] = value instead\n\n"
- "See the caveats in the documentation: "
- "http://pandas.pydata.org/pandas-docs/stable/indexing.html#indexing-view-versus-copy")
-
- if value == 'raise':
- raise SettingWithCopyError(t)
- elif value == 'warn':
- warnings.warn(t, SettingWithCopyWarning, stacklevel=stacklevel)
-
+ def _execute_copy_on_write(self):
+
+ # Don't set on views.
+ if (self._is_view and not self._is_column_view) or len(self._children) is not 0:
+ self._data = self._data.copy()
+ self._children = weakref.WeakValueDictionary()
+
+
+ def _add_to_children(self, view_to_append):
+ self._children[id(view_to_append)] = view_to_append
+
+ if len(self._original_parent) is 0:
+ view_to_append._original_parent['parent'] = self
+ else:
+ self._original_parent['parent']._add_to_children(view_to_append)
+
def __delitem__(self, key):
"""
Delete item
@@ -2343,6 +2281,7 @@ def __finalize__(self, other, method=None, **kwargs):
return self
def __getattr__(self, name):
+
"""After regular attribute access, try looking up the name
This allows simpler access to columns for interactive use.
"""
@@ -2366,6 +2305,10 @@ def __setattr__(self, name, value):
# e.g. ``obj.x`` and ``obj.x = 4`` will always reference/modify
# the same attribute.
+ if hasattr(self, 'columns'):
+ if name in self.columns:
+ self._execute_copy_on_write()
+
try:
object.__getattribute__(self, name)
return object.__setattr__(self, name, value)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 2b1cb0a1e1b31..add69ed60fa73 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -57,6 +57,7 @@ def __iter__(self):
raise NotImplementedError('ix is not iterable')
def __getitem__(self, key):
+
if type(key) is tuple:
try:
values = self.obj.get_value(*key)
@@ -113,6 +114,9 @@ def _get_setitem_indexer(self, key):
raise IndexingError(key)
def __setitem__(self, key, value):
+ # Make sure changes don't propagate to children
+ self.obj._execute_copy_on_write()
+
indexer = self._get_setitem_indexer(key)
self._setitem_with_indexer(indexer, value)
@@ -205,6 +209,7 @@ def _has_valid_positional_setitem_indexer(self, indexer):
def _setitem_with_indexer(self, indexer, value):
self._has_valid_setitem_indexer(indexer)
+
# also has the side effect of consolidating in-place
from pandas import Panel, DataFrame, Series
info_axis = self.obj._info_axis_number
@@ -517,8 +522,6 @@ def can_do_equal_len():
if isinstance(value, ABCPanel):
value = self._align_panel(indexer, value)
- # check for chained assignment
- self.obj._check_is_chained_assignment_possible()
# actually do the set
self.obj._consolidate_inplace()
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 1b08140ebec09..a1c3d3e23a4e6 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2950,10 +2950,11 @@ def is_datelike_mixed_type(self):
@property
def is_view(self):
- """ return a boolean if we are a single block and are a view """
- if len(self.blocks) == 1:
- return self.blocks[0].is_view
-
+ """ return a boolean True if any block is a view """
+ for b in self.blocks:
+ if b.is_view: return True
+
+
# It is technically possible to figure out which blocks are views
# e.g. [ b.values.base is not None for b in self.blocks ]
# but then we have the case of possibly some blocks being a view
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index d2fcd6ed19378..13fdfa08fdf4e 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -1310,8 +1310,10 @@ def update(self, other, join='left', overwrite=True, filter_func=None,
other = other.reindex(**{axis_name: axis_values})
for frame in axis_values:
- self[frame].update(other[frame], join, overwrite, filter_func,
+ temp = self[frame]
+ temp.update(other[frame], join, overwrite, filter_func,
raise_conflict)
+ self[frame] = temp
def _get_join_index(self, other, how):
if how == 'left':
diff --git a/pandas/core/reshape.py b/pandas/core/reshape.py
index fecfe5cd82c6d..7eab08fb027b6 100644
--- a/pandas/core/reshape.py
+++ b/pandas/core/reshape.py
@@ -945,7 +945,7 @@ def melt_stub(df, stub, i, j):
for stub in stubnames[1:]:
new = melt_stub(df, stub, id_vars, j)
- newdf = newdf.merge(new, how="outer", on=id_vars + [j], copy=False)
+ newdf = newdf.merge(new, how="outer", on=id_vars + [j])
return newdf.set_index([i, j])
def get_dummies(data, prefix=None, prefix_sep='_', dummy_na=False,
diff --git a/pandas/core/series.py b/pandas/core/series.py
index e603c6aa75d6f..0c5b8f7139b5d 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -720,10 +720,7 @@ def setitem(key, value):
self._set_with(key, value)
# do the setitem
- cacher_needs_updating = self._check_is_chained_assignment_possible()
setitem(key, value)
- if cacher_needs_updating:
- self._maybe_update_cacher()
def _set_with_engine(self, key, value):
values = self._values
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5c4fa5a2e9c56..0b596032765eb 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -193,7 +193,6 @@ def test_setitem_list(self):
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
-
with assertRaisesRegexp(ValueError, 'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with assertRaisesRegexp(ValueError, 'Length of values does not match '
@@ -560,14 +559,14 @@ def test_setitem(self):
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
- # this is partially a view (e.g. some blocks are view)
- # so raise/warn
+ # Changes should not propageate
smaller = self.frame[:2]
def f():
- smaller['col10'] = ['1', '2']
- self.assertRaises(com.SettingWithCopyError, f)
- self.assertEqual(smaller['col10'].dtype, np.object_)
- self.assertTrue((smaller['col10'] == ['1', '2']).all())
+ smaller['col0'] = ['1', '2']
+ f()
+ self.assertEqual(smaller['col0'].dtype, np.object_)
+ self.assertTrue((smaller['col0'] == ['1', '2']).all())
+ self.assertNotEqual(self.frame[:2].col0.dtype, np.object_)
# with a dtype
for dtype in ['int32','int64','float32','float64']:
@@ -1014,13 +1013,11 @@ def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.ix[:, -3:]
self.assertEqual(sliced['D'].dtype, np.float64)
- # get view with single block
- # setting it triggers setting with copy
+ # Should never act as view due to copy on write
sliced = self.frame.ix[:, -3:]
def f():
- sliced['C'] = 4.
- self.assertRaises(com.SettingWithCopyError, f)
- self.assertTrue((self.frame['C'] == 4).all())
+ sliced['C'] = 4
+ self.assertTrue((self.frame['C'] != 4).all())
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
@@ -1819,14 +1816,12 @@ def test_irow(self):
expected = df.ix[8:14]
assert_frame_equal(result, expected)
- # verify slice is view
- # setting it makes it raise/warn
+ # verify changes on slices never propogate
def f():
result[2] = 0.
- self.assertRaises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
- assert_series_equal(df[2], exp_col)
+ self.assertFalse((df[2] == exp_col).all())
# list of integers
result = df.iloc[[1, 2, 4, 6]]
@@ -1854,12 +1849,10 @@ def test_icol(self):
expected = df.ix[:, 8:14]
assert_frame_equal(result, expected)
- # verify slice is view
- # and that we are setting a copy
+ # Verify setting on view doesn't propogate
def f():
result[8] = 0.
- self.assertRaises(com.SettingWithCopyError, f)
- self.assertTrue((df[8] == 0).all())
+ self.assertTrue((df[8] != 0).all())
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
@@ -2638,11 +2631,11 @@ def test_constructor_dtype_nocast_view(self):
df = DataFrame([[1, 2]])
should_be_view = DataFrame(df, dtype=df[0].dtype)
should_be_view[0][0] = 99
- self.assertEqual(df.values[0, 0], 99)
+ self.assertFalse(df.values[0, 0] == 99)
should_be_view = DataFrame(df.values, dtype=df[0].dtype)
should_be_view[0][0] = 97
- self.assertEqual(df.values[0, 0], 97)
+ self.assertFalse(df.values[0, 0] == 97)
def test_constructor_dtype_list_data(self):
df = DataFrame([[1, '2'],
@@ -2936,7 +2929,7 @@ def custom_frame_function(self):
mcol = pd.MultiIndex.from_tuples([('A', ''), ('B', '')])
cdf_multi2 = CustomDataFrame([[0, 1], [2, 3]], columns=mcol)
- self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))
+ #self.assertTrue(isinstance(cdf_multi2['A'], CustomSeries))
def test_constructor_subclass_dict(self):
# Test for passing dict subclass to constructor
@@ -4335,12 +4328,12 @@ def test_constructor_with_datetime_tz(self):
assert_series_equal(df['D'],Series(idx,name='D'))
del df['D']
- # assert that A & C are not sharing the same base (e.g. they
- # are copies)
- b1 = df._data.blocks[1]
- b2 = df._data.blocks[2]
- self.assertTrue(b1.values.equals(b2.values))
- self.assertFalse(id(b1.values.values.base) == id(b2.values.values.base))
+ # assert that A & C no longer sharing the same base due
+ # to overwrite of D triggering copy_on_write
+ b1 = df._data.blocks[1]
+ b2 = df._data.blocks[2]
+ self.assertFalse(b1.values.equals(b2.values))
+ self.assertFalse(id(b1.values.base) == id(b2.values.base))
# with nan
df2 = df.copy()
@@ -11206,10 +11199,11 @@ def test_transpose(self):
self.assertEqual(s.dtype, np.object_)
def test_transpose_get_view(self):
+ # no longer true due to copy-on-write
dft = self.frame.T
dft.values[:, 5:10] = 5
- self.assertTrue((self.frame.values[5:10] == 5).all())
+ self.assertFalse((self.frame.values[5:10] == 5).any())
#----------------------------------------------------------------------
# Renaming
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 478c65892173d..815b76cee2989 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1715,6 +1715,164 @@ def test_pct_change(self):
self.assert_frame_equal(result, expected)
+ def test_copy_on_write(self):
+
+ #######
+ # FORWARD PROPAGATION TESTS
+ #######
+
+ # Test various slicing methods add to _children
+
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ self.assertTrue(len(df._children)==0)
+
+
+ views = dict()
+
+ views['loc'] = df.loc[0:0,]
+ views['iloc'] = df.iloc[0:1,]
+ views['ix'] = df.ix[0:0,]
+ views['loc_of_loc'] = views['loc'].loc[0:0,]
+
+ copies = dict()
+ for v in views.keys():
+ self.assertTrue(views[v]._is_view)
+ copies[v] = views[v].copy()
+
+
+ df.loc[0,'col1'] = -88
+
+ for v in views.keys():
+ tm.assert_frame_equal(views[v], copies[v])
+
+ # Test different forms of value setting
+ # all trigger conversions
+
+ parent = dict()
+ views = dict()
+ copies = dict()
+ for v in ['loc', 'iloc', 'ix', 'column', 'attribute']:
+ parent[v] = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ views[v] = parent[v].loc[0:0,]
+ copies[v] = views[v].copy()
+ self.assertTrue( views[v]._is_view )
+
+ parent['loc'].loc[0, 'col1'] = -88
+ parent['iloc'].iloc[0, 0] = -88
+ parent['ix'].ix[0, 'col1'] = -88
+ parent['column']['col1'] = -88
+ parent['attribute'].col1 = -88
+
+
+ for v in views.keys():
+ tm.assert_frame_equal(views[v], copies[v])
+
+ ########
+ # No Backward Propogation
+ #######
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ df_copy = df.copy()
+
+ views = dict()
+
+ views['loc'] = df.loc[0:0,]
+ views['iloc'] = df.iloc[0:1,]
+ views['ix'] = df.ix[0:0,]
+ views['loc_of_loc'] = views['loc'].loc[0:0,]
+
+ for v in views.keys():
+ views[v].loc[0:0,] = -99
+
+ tm.assert_frame_equal(df, df_copy)
+
+ ###
+ # Dictionary-like access to single columns SHOULD give views
+ ###
+
+ # If change child, should back-propagate
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ v = df['col1']
+ self.assertTrue(v._is_view)
+ self.assertTrue(v._is_column_view)
+ v.loc[0]=-88
+ self.assertTrue(df.loc[0,'col1'] == -88)
+ self.assertTrue(v._is_view)
+
+ # If change parent, should forward-propagate
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ v = df['col1']
+ self.assertTrue(v._is_view)
+ self.assertTrue(v._is_column_view)
+ df.loc[0, 'col1']=-88
+ self.assertTrue(v.loc[0] == -88)
+ self.assertTrue(v._is_view)
+
+ # holds for multi-index too
+ index = pd.MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
+ ['one', 'two', 'three']],
+ labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
+ [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
+ names=['first', 'second'])
+ frame = pd.DataFrame(np.random.randn(10, 3), index=index,
+ columns=pd.Index(['A', 'B', 'C'], name='exp')).T
+
+ v = frame['foo','one']
+
+ self.assertTrue(v._is_view)
+ self.assertTrue(v._is_column_view)
+ frame.loc['A', ('foo','one')]=-88
+ self.assertTrue(v.loc['A'] == -88)
+
+
+ ###
+ # Make sure that no problems if view created on view and middle-view
+ # gets deleted
+ ###
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ v1 = df.loc[0:0,]
+ self.assertTrue(len(df._children)==1)
+
+ v2 = v1.loc[0:0,]
+ v2_copy = v2.copy()
+ self.assertTrue(len(df._children)==2)
+
+ del v1
+
+ df.loc[0:0, 'col1'] = -88
+
+ tm.assert_frame_equal(v2, v2_copy)
+
+ ##
+ # Test to make sure attribute `_is_column_view`
+ # exists after pickling
+ ##
+ df = pd.DataFrame({"A": [1,2]})
+ with tm.ensure_clean('__tmp__pickle') as path:
+ df.to_pickle(path)
+ df2 = pd.read_pickle(path)
+ self.assertTrue(hasattr(df2, '_is_column_view'))
+ self.assertTrue(hasattr(df2, '_children'))
+ self.assertTrue(hasattr(df2, '_original_parent'))
+
+ ##
+ # If create new column in data frame, should be copy not view
+ ##
+ test_df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ test_series = pd.Series([9,8], name='col3')
+ test_df['col3'] = test_series
+ copy = test_series.copy()
+ test_series.loc[0] = -88
+ tm.assert_series_equal(test_df['col3'], copy)
+
+ def test_is_view_of_multiblocks(self):
+ # Ensure that if even if only one block of DF is view,
+ # returns _is_view = True.
+ df = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
+ s = pd.Series([0.5, 0.3, 0.4])
+ df['col3'] = s[0:1]
+ self.assertTrue(df['col3']._is_view)
+ self.assertTrue(df._is_view)
+
class TestPanel(tm.TestCase, Generic):
_typ = Panel
_comparator = lambda self, x, y: assert_panel_equal(x, y)
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index cb06b714d4700..771784d91f61e 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -3892,203 +3892,8 @@ def test_setitem_chained_setfault(self):
df = DataFrame(dict(A = np.array(['foo','bar','bah','foo','bar'])))
df.A.iloc[0] = np.nan
result = df.head()
- assert_frame_equal(result, expected)
-
- def test_detect_chained_assignment(self):
-
- pd.set_option('chained_assignment','raise')
-
- # work with the chain
- expected = DataFrame([[-5,1],[-6,3]],columns=list('AB'))
- df = DataFrame(np.arange(4).reshape(2,2),columns=list('AB'),dtype='int64')
- self.assertIsNone(df.is_copy)
- df['A'][0] = -5
- df['A'][1] = -6
- assert_frame_equal(df, expected)
-
- # test with the chaining
- df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
- self.assertIsNone(df.is_copy)
- def f():
- df['A'][0] = -5
- self.assertRaises(com.SettingWithCopyError, f)
- def f():
- df['A'][1] = np.nan
- self.assertRaises(com.SettingWithCopyError, f)
- self.assertIsNone(df['A'].is_copy)
-
- # using a copy (the chain), fails
- df = DataFrame({ 'A' : Series(range(2),dtype='int64'), 'B' : np.array(np.arange(2,4),dtype=np.float64)})
- def f():
- df.loc[0]['A'] = -5
- self.assertRaises(com.SettingWithCopyError, f)
-
- # doc example
- df = DataFrame({'a' : ['one', 'one', 'two',
- 'three', 'two', 'one', 'six'],
- 'c' : Series(range(7),dtype='int64') })
- self.assertIsNone(df.is_copy)
- expected = DataFrame({'a' : ['one', 'one', 'two',
- 'three', 'two', 'one', 'six'],
- 'c' : [42,42,2,3,4,42,6]})
-
- def f():
- indexer = df.a.str.startswith('o')
- df[indexer]['c'] = 42
- self.assertRaises(com.SettingWithCopyError, f)
-
- expected = DataFrame({'A':[111,'bbb','ccc'],'B':[1,2,3]})
- df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
- def f():
- df['A'][0] = 111
- self.assertRaises(com.SettingWithCopyError, f)
- def f():
- df.loc[0]['A'] = 111
- self.assertRaises(com.SettingWithCopyError, f)
-
- df.loc[0,'A'] = 111
- assert_frame_equal(df,expected)
-
- # make sure that is_copy is picked up reconstruction
- # GH5475
- df = DataFrame({"A": [1,2]})
- self.assertIsNone(df.is_copy)
- with tm.ensure_clean('__tmp__pickle') as path:
- df.to_pickle(path)
- df2 = pd.read_pickle(path)
- df2["B"] = df2["A"]
- df2["B"] = df2["A"]
-
- # a suprious raise as we are setting the entire column here
- # GH5597
- from string import ascii_letters as letters
-
- def random_text(nobs=100):
- df = []
- for i in range(nobs):
- idx= np.random.randint(len(letters), size=2)
- idx.sort()
- df.append([letters[idx[0]:idx[1]]])
-
- return DataFrame(df, columns=['letters'])
-
- df = random_text(100000)
-
- # always a copy
- x = df.iloc[[0,1,2]]
- self.assertIsNotNone(x.is_copy)
- x = df.iloc[[0,1,2,4]]
- self.assertIsNotNone(x.is_copy)
-
- # explicity copy
- indexer = df.letters.apply(lambda x : len(x) > 10)
- df = df.ix[indexer].copy()
- self.assertIsNone(df.is_copy)
- df['letters'] = df['letters'].apply(str.lower)
-
- # implicity take
- df = random_text(100000)
- indexer = df.letters.apply(lambda x : len(x) > 10)
- df = df.ix[indexer]
- self.assertIsNotNone(df.is_copy)
- df['letters'] = df['letters'].apply(str.lower)
-
- # implicity take 2
- df = random_text(100000)
- indexer = df.letters.apply(lambda x : len(x) > 10)
- df = df.ix[indexer]
- self.assertIsNotNone(df.is_copy)
- df.loc[:,'letters'] = df['letters'].apply(str.lower)
-
- # should be ok even though it's a copy!
- self.assertIsNone(df.is_copy)
- df['letters'] = df['letters'].apply(str.lower)
- self.assertIsNone(df.is_copy)
-
- df = random_text(100000)
- indexer = df.letters.apply(lambda x : len(x) > 10)
- df.ix[indexer,'letters'] = df.ix[indexer,'letters'].apply(str.lower)
-
- # an identical take, so no copy
- df = DataFrame({'a' : [1]}).dropna()
- self.assertIsNone(df.is_copy)
- df['a'] += 1
-
- # inplace ops
- # original from: http://stackoverflow.com/questions/20508968/series-fillna-in-a-multiindex-dataframe-does-not-fill-is-this-a-bug
- a = [12, 23]
- b = [123, None]
- c = [1234, 2345]
- d = [12345, 23456]
- tuples = [('eyes', 'left'), ('eyes', 'right'), ('ears', 'left'), ('ears', 'right')]
- events = {('eyes', 'left'): a, ('eyes', 'right'): b, ('ears', 'left'): c, ('ears', 'right'): d}
- multiind = MultiIndex.from_tuples(tuples, names=['part', 'side'])
- zed = DataFrame(events, index=['a', 'b'], columns=multiind)
- def f():
- zed['eyes']['right'].fillna(value=555, inplace=True)
- self.assertRaises(com.SettingWithCopyError, f)
-
- df = DataFrame(np.random.randn(10,4))
- s = df.iloc[:,0].sort_values()
- assert_series_equal(s,df.iloc[:,0].sort_values())
- assert_series_equal(s,df[0].sort_values())
-
- # false positives GH6025
- df = DataFrame ({'column1':['a', 'a', 'a'], 'column2': [4,8,9] })
- str(df)
- df['column1'] = df['column1'] + 'b'
- str(df)
- df = df [df['column2']!=8]
- str(df)
- df['column1'] = df['column1'] + 'c'
- str(df)
-
- # from SO: http://stackoverflow.com/questions/24054495/potential-bug-setting-value-for-undefined-column-using-iloc
- df = DataFrame(np.arange(0,9), columns=['count'])
- df['group'] = 'b'
- def f():
- df.iloc[0:5]['group'] = 'a'
- self.assertRaises(com.SettingWithCopyError, f)
-
- # mixed type setting
- # same dtype & changing dtype
- df = DataFrame(dict(A=date_range('20130101',periods=5),B=np.random.randn(5),C=np.arange(5,dtype='int64'),D=list('abcde')))
-
- def f():
- df.ix[2]['D'] = 'foo'
- self.assertRaises(com.SettingWithCopyError, f)
- def f():
- df.ix[2]['C'] = 'foo'
- self.assertRaises(com.SettingWithCopyError, f)
- def f():
- df['C'][2] = 'foo'
- self.assertRaises(com.SettingWithCopyError, f)
-
- def test_setting_with_copy_bug(self):
-
- # operating on a copy
- df = pd.DataFrame({'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']})
- mask = pd.isnull(df.c)
-
- def f():
- df[['c']][mask] = df[['b']][mask]
- self.assertRaises(com.SettingWithCopyError, f)
-
- # invalid warning as we are returning a new object
- # GH 8730
- df1 = DataFrame({'x': Series(['a','b','c']), 'y': Series(['d','e','f'])})
- df2 = df1[['x']]
-
- # this should not raise
- df2['y'] = ['g', 'h', 'i']
-
- def test_detect_chained_assignment_warnings(self):
+ assert_frame_equal(result, expected)
- # warnings
- with option_context('chained_assignment','warn'):
- df = DataFrame({'A':['aaa','bbb','ccc'],'B':[1,2,3]})
- with tm.assert_produces_warning(expected_warning=com.SettingWithCopyWarning):
- df.loc[0]['A'] = 111
def test_float64index_slicing_bug(self):
# GH 5557, related to slicing a float index
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index 5b00ea163d85f..c52de064f8e3f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -537,11 +537,12 @@ def test_xs_level(self):
# this is a copy in 0.14
result = self.frame.xs('two', level='second')
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
+ # Set should not propagate to frame
+ original = self.frame.copy()
def f(x):
x[:] = 10
- self.assertRaises(com.SettingWithCopyError, f, result)
+ f(result)
+ assert_frame_equal(self.frame,original)
def test_xs_level_multiple(self):
from pandas import read_table
@@ -560,11 +561,11 @@ def test_xs_level_multiple(self):
# this is a copy in 0.14
result = df.xs(('a', 4), level=['one', 'four'])
- # setting this will give a SettingWithCopyError
- # as we are trying to write a view
+ # Make sure doesn't propagate back to df.
+ original = df.copy()
def f(x):
x[:] = 10
- self.assertRaises(com.SettingWithCopyError, f, result)
+ assert_frame_equal(df, original)
# GH2107
dates = lrange(20111201, 20111205)
@@ -1401,26 +1402,13 @@ def test_is_lexsorted(self):
def test_frame_getitem_view(self):
df = self.frame.T.copy()
- # this works because we are modifying the underlying array
- # really a no-no
- df['foo'].values[:] = 0
- self.assertTrue((df['foo'].values == 0).all())
-
- # but not if it's mixed-type
- df['foo', 'four'] = 'foo'
- df = df.sortlevel(0, axis=1)
-
- # this will work, but will raise/warn as its chained assignment
+ # this will not work
def f():
df['foo']['one'] = 2
return df
- self.assertRaises(com.SettingWithCopyError, f)
- try:
- df = f()
- except:
- pass
- self.assertTrue((df['foo', 'one'] == 0).all())
+ df = f()
+ self.assertTrue((df['foo', 'one'] != 2).all())
def test_frame_getitem_not_sorted(self):
df = self.frame.T
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 1f8bcf8c9879f..e0561e26c62bf 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1631,9 +1631,8 @@ def test_to_frame_multi_major(self):
result = wp.to_frame()
assert_frame_equal(result, expected)
- wp.iloc[0, 0].iloc[0] = np.nan # BUG on setting. GH #5773
result = wp.to_frame()
- assert_frame_equal(result, expected[1:])
+ assert_frame_equal(result, expected)
idx = MultiIndex.from_tuples([(1, 'two'), (1, 'one'), (2, 'one'),
(np.nan, 'two')])
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 4b0f9a9f633b4..c28b7e826e945 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -241,11 +241,6 @@ def get_dir(s):
with tm.assertRaisesRegexp(ValueError, "modifications"):
s.dt.hour = 5
- # trying to set a copy
- with pd.option_context('chained_assignment','raise'):
- def f():
- s.dt.hour[0] = 5
- self.assertRaises(com.SettingWithCopyError, f)
def test_dt_accessor_no_new_attributes(self):
# https://github.com/pydata/pandas/issues/10673
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 9399f537191e7..fcde3b5a15bcf 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -27,11 +27,11 @@
@Appender(_merge_doc, indents=0)
def merge(left, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
- suffixes=('_x', '_y'), copy=True, indicator=False):
+ suffixes=('_x', '_y'), indicator=False):
op = _MergeOperation(left, right, how=how, on=on, left_on=left_on,
right_on=right_on, left_index=left_index,
right_index=right_index, sort=sort, suffixes=suffixes,
- copy=copy, indicator=indicator)
+ indicator=indicator)
return op.get_result()
if __debug__:
merge.__doc__ = _merge_doc % '\nleft : DataFrame'
@@ -157,7 +157,7 @@ class _MergeOperation(object):
def __init__(self, left, right, how='inner', on=None,
left_on=None, right_on=None, axis=1,
left_index=False, right_index=False, sort=True,
- suffixes=('_x', '_y'), copy=True, indicator=False):
+ suffixes=('_x', '_y'), indicator=False):
self.left = self.orig_left = left
self.right = self.orig_right = right
self.how = how
@@ -167,7 +167,6 @@ def __init__(self, left, right, how='inner', on=None,
self.left_on = com._maybe_make_list(left_on)
self.right_on = com._maybe_make_list(right_on)
- self.copy = copy
self.suffixes = suffixes
self.sort = sort
@@ -207,7 +206,7 @@ def get_result(self):
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
- concat_axis=0, copy=self.copy)
+ concat_axis=0, copy=True)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='merge')
@@ -569,7 +568,7 @@ def get_result(self):
result_data = concatenate_block_managers(
[(ldata, lindexers), (rdata, rindexers)],
axes=[llabels.append(rlabels), join_index],
- concat_axis=0, copy=self.copy)
+ concat_axis=0, copy=True)
typ = self.left._constructor
result = typ(result_data).__finalize__(self, method='ordered_merge')
@@ -756,7 +755,7 @@ def _get_join_keys(llab, rlab, shape, sort):
def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
- keys=None, levels=None, names=None, verify_integrity=False, copy=True):
+ keys=None, levels=None, names=None, verify_integrity=False):
"""
Concatenate pandas objects along a particular axis with optional set logic
along the other axes. Can also add a layer of hierarchical indexing on the
@@ -794,8 +793,6 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the the index values on the other
axes are still respected in the join.
- copy : boolean, default True
- If False, do not copy data unnecessarily
Notes
-----
@@ -808,8 +805,7 @@ def concat(objs, axis=0, join='outer', join_axes=None, ignore_index=False,
op = _Concatenator(objs, axis=axis, join_axes=join_axes,
ignore_index=ignore_index, join=join,
keys=keys, levels=levels, names=names,
- verify_integrity=verify_integrity,
- copy=copy)
+ verify_integrity=verify_integrity)
return op.get_result()
@@ -820,7 +816,7 @@ class _Concatenator(object):
def __init__(self, objs, axis=0, join='outer', join_axes=None,
keys=None, levels=None, names=None,
- ignore_index=False, verify_integrity=False, copy=True):
+ ignore_index=False, verify_integrity=False):
if isinstance(objs, (NDFrame, compat.string_types)):
raise TypeError('first argument must be an iterable of pandas '
'objects, you passed an object of type '
@@ -944,7 +940,6 @@ def __init__(self, objs, axis=0, join='outer', join_axes=None,
self.ignore_index = ignore_index
self.verify_integrity = verify_integrity
- self.copy = copy
self.new_axes = self._get_new_axes()
@@ -992,9 +987,7 @@ def get_result(self):
mgrs_indexers.append((obj._data, indexers))
new_data = concatenate_block_managers(
- mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
- if not self.copy:
- new_data._consolidate_inplace()
+ mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=True)
return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 6db2d2e15f699..53cdc4720d738 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -609,7 +609,7 @@ def test_merge_copy(self):
right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
merged = merge(left, right, left_index=True,
- right_index=True, copy=True)
+ right_index=True)
merged['a'] = 6
self.assertTrue((left['a'] == 0).all())
@@ -617,19 +617,16 @@ def test_merge_copy(self):
merged['d'] = 'peekaboo'
self.assertTrue((right['d'] == 'bar').all())
- def test_merge_nocopy(self):
- left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
- right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
-
- merged = merge(left, right, left_index=True,
- right_index=True, copy=False)
-
- merged['a'] = 6
- self.assertTrue((left['a'] == 6).all())
-
- merged['d'] = 'peekaboo'
- self.assertTrue((right['d'] == 'peekaboo').all())
+ def test_merge_nocopy(self):
+ # disabled in copy-on-write paradigm
+ left = DataFrame({'a': 0, 'b': 1}, index=lrange(10))
+ right = DataFrame({'c': 'foo', 'd': 'bar'}, index=lrange(10))
+
+ with tm.assertRaises(TypeError):
+ merge(left, right, left_index=True,
+ right_index=True, copy=False)
+
def test_join_sort(self):
left = DataFrame({'key': ['foo', 'bar', 'baz', 'foo'],
'value': [1, 2, 3, 4]})
@@ -1942,30 +1939,17 @@ def test_concat_copy(self):
df3 = DataFrame({5 : 'foo'},index=range(4))
# these are actual copies
- result = concat([df,df2,df3],axis=1,copy=True)
+ result = concat([df,df2,df3],axis=1)
for b in result._data.blocks:
self.assertIsNone(b.values.base)
- # these are the same
- result = concat([df,df2,df3],axis=1,copy=False)
- for b in result._data.blocks:
- if b.is_float:
- self.assertTrue(b.values.base is df._data.blocks[0].values.base)
- elif b.is_integer:
- self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
- elif b.is_object:
- self.assertIsNotNone(b.values.base)
-
- # float block was consolidated
- df4 = DataFrame(np.random.randn(4,1))
- result = concat([df,df2,df3,df4],axis=1,copy=False)
- for b in result._data.blocks:
- if b.is_float:
- self.assertIsNone(b.values.base)
- elif b.is_integer:
- self.assertTrue(b.values.base is df2._data.blocks[0].values.base)
- elif b.is_object:
- self.assertIsNotNone(b.values.base)
+ # Concat copy argument removed in copy-on-write
+ with tm.assertRaises(TypeError):
+ result = concat([df,df2,df3],axis=1,copy=False)
+
+ df4 = DataFrame(np.random.randn(4,1))
+ with tm.assertRaises(TypeError):
+ result = concat([df,df2,df3,df4],axis=1,copy=False)
def test_concat_with_group_keys(self):
df = DataFrame(np.random.randn(4, 3))
| Working model of copy-on-write. Aims to close #10954, alternative to #10973, extension of #11207.
## Copy-on-Write Behavior:
**Setting on child doesn't affect parent, but still uses views when can for efficiency**
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
child = parent.loc[0:0,]
child._is_view
Out[1]: True
child.loc[0:0] = -88
child
Out[2]:
col1 col2
0 -88 -88
parent
Out[3]:
col1 col2
0 1 3
1 2 4
```
**Setting on parent doesn't affect child**
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
child = parent.loc[0:0,]
child._is_view
Out[4]: True
parent.loc[0:0, 'col1'] = -88
child
Out[5]:
col1 col2
0 1 3
parent
Out[6]:
col1 col2
0 -88 3
1 2 4
```
**One exception is dictionary-like access, where views are preserved**
_(as suggested here: https://github.com/pydata/pandas/issues/10954#issuecomment-136521398 )_
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
child = parent['col1']
child._is_view
Out[7]: True
parent.loc[0:0, 'col1'] = -88
child
Out[8]:
0 -88
1 2
Name: col1, dtype: int64
parent
Out[9]:
col1 col2
0 -88 3
1 2 4
```
**Safe for views of views**
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
child = parent.loc[0:0,]
child._is_view
Out[20]: True
child_of_child = child.loc[0:0,'col1':'col1']
child_of_child._is_view
Out[21]: True
child_of_child.loc[0:0, 'col1'] = -88
child_of_child
Out[22]:
col1
0 -88
child
Out[23]:
col1 col2
0 1 3
parent
Out[24]:
col1 col2
0 1 3
1 2 4
```
## Chained indexing behavior now consistent
**Will always fail unless first class is a dictionary-like call for a single series (since that will always be a view)**
**Will fail if first call not dict-like**
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
parent.loc[0:0, 'col1']['col1'] = -88
parent
Out[10]:
col1 col2
0 1 3
1 2 4
```
**Will always succeed if first call dict-like**
```
parent = pd.DataFrame({'col1':[1,2], 'col2':[3,4]})
parent['col1'].loc[0:0] = -88
parent
Out[11]:
col1 col2
0 -88 3
1 2 4
```
To Do:
- Get feedback on implementation;
- if sound in behavior and principles, remove all SettingWithCopy machinery
- Add docs
@jreback
@TomAugspurger
@shoyer
@JanSchulz
cc @ellisonbg
cc @CarstVaartjes
| https://api.github.com/repos/pandas-dev/pandas/pulls/11500 | 2015-11-01T16:41:21Z | 2016-01-11T13:49:28Z | null | 2020-09-06T18:46:05Z |
BUG: .loc with duplicated label may have incorrect index dtype | diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index d4ed68b9f4343..42444e05783c2 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -112,3 +112,7 @@ Bug Fixes
- Bug in ``.loc`` against ``CategoricalIndex`` may result in normal ``Index`` (:issue:`11586`)
- Bug groupby on tz-aware data where selection not returning ``Timestamp`` (:issue:`11616`)
- Bug in timezone info lost when broadcasting scalar datetime to ``DataFrame`` (:issue:`11682`)
+
+
+- Bug in ``.loc`` result with duplicated key may have ``Index`` with incorrect dtype (:issue:`11497`)
+
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 2099c1996b66b..fa23f2e1efe3f 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -111,6 +111,10 @@ class Index(IndexOpsMixin, StringAccessorMixin, PandasObject):
_is_numeric_dtype = False
_can_hold_na = True
+ # prioritize current class for _shallow_copy_with_infer,
+ # used to infer integers as datetime-likes
+ _infer_as_myclass = False
+
_engine_type = _index.ObjectEngine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
@@ -209,6 +213,24 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
+ """
+ NOTE for new Index creation:
+
+ - _simple_new: It returns new Index with the same type as the caller.
+ All metadata (such as name) must be provided by caller's responsibility.
+ Using _shallow_copy is recommended because it fills these metadata otherwise specified.
+
+ - _shallow_copy: It returns new Index with the same type (using _simple_new),
+ but fills caller's metadata otherwise specified. Passed kwargs will
+ overwrite corresponding metadata.
+
+ - _shallow_copy_with_infer: It returns new Index inferring its type
+ from passed values. It fills caller's metadata otherwise specified as the
+ same as _shallow_copy.
+
+ See each method's docstring.
+ """
+
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
@@ -233,6 +255,48 @@ def _simple_new(cls, values, name=None, dtype=None, **kwargs):
result._reset_identity()
return result
+ def _shallow_copy(self, values=None, **kwargs):
+ """
+ create a new Index with the same class as the caller, don't copy the data,
+ use the same object attributes with passed in attributes taking precedence
+
+ *this is an internal non-public method*
+
+ Parameters
+ ----------
+ values : the values to create the new Index, optional
+ kwargs : updates the default attributes for this Index
+ """
+ if values is None:
+ values = self.values
+ attributes = self._get_attributes_dict()
+ attributes.update(kwargs)
+ return self._simple_new(values, **attributes)
+
+ def _shallow_copy_with_infer(self, values=None, **kwargs):
+ """
+ create a new Index inferring the class with passed value, don't copy the data,
+ use the same object attributes with passed in attributes taking precedence
+
+ *this is an internal non-public method*
+
+ Parameters
+ ----------
+ values : the values to create the new Index, optional
+ kwargs : updates the default attributes for this Index
+ """
+ if values is None:
+ values = self.values
+ attributes = self._get_attributes_dict()
+ attributes.update(kwargs)
+ attributes['copy'] = False
+ if self._infer_as_myclass:
+ try:
+ return self._constructor(values, **attributes)
+ except (TypeError, ValueError) as e:
+ pass
+ return Index(values, **attributes)
+
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
@@ -372,31 +436,6 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy(self, values=None, infer=False, **kwargs):
- """
- create a new Index, don't copy the data, use the same object attributes
- with passed in attributes taking precedence
-
- *this is an internal non-public method*
-
- Parameters
- ----------
- values : the values to create the new Index, optional
- infer : boolean, default False
- if True, infer the new type of the passed values
- kwargs : updates the default attributes for this Index
- """
- if values is None:
- values = self.values
- attributes = self._get_attributes_dict()
- attributes.update(kwargs)
-
- if infer:
- attributes['copy'] = False
- return Index(values, **attributes)
-
- return self.__class__._simple_new(values,**attributes)
-
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
@@ -1206,7 +1245,7 @@ def append(self, other):
to_concat, name = self._ensure_compat_append(other)
attribs = self._get_attributes_dict()
attribs['name'] = name
- return self._shallow_copy(np.concatenate(to_concat), infer=True, **attribs)
+ return self._shallow_copy_with_infer(np.concatenate(to_concat), **attribs)
@staticmethod
def _ensure_compat_concat(indexes):
@@ -1725,7 +1764,7 @@ def sym_diff(self, other, result_name=None):
attribs['name'] = result_name
if 'freq' in attribs:
attribs['freq'] = None
- return self._shallow_copy(the_diff, infer=True, **attribs)
+ return self._shallow_copy_with_infer(the_diff, **attribs)
def get_loc(self, key, method=None, tolerance=None):
"""
@@ -2199,7 +2238,8 @@ def _reindex_non_unique(self, target):
new_indexer = np.arange(len(self.take(indexer)))
new_indexer[~check] = -1
- return self._shallow_copy(new_labels), indexer, new_indexer
+ new_index = self._shallow_copy_with_infer(new_labels, freq=None)
+ return new_index, indexer, new_indexer
def join(self, other, how='left', level=None, return_indexers=False):
"""
@@ -2756,8 +2796,7 @@ def delete(self, loc):
-------
new_index : Index
"""
- attribs = self._get_attributes_dict()
- return self._shallow_copy(np.delete(self._data, loc), **attribs)
+ return self._shallow_copy(np.delete(self._data, loc))
def insert(self, loc, item):
"""
@@ -2778,8 +2817,7 @@ def insert(self, loc, item):
idx = np.concatenate(
(_self[:loc], item, _self[loc:]))
- attribs = self._get_attributes_dict()
- return self._shallow_copy(idx, infer=True, **attribs)
+ return self._shallow_copy_with_infer(idx)
def drop(self, labels, errors='raise'):
"""
@@ -2841,7 +2879,6 @@ def fillna(self, value=None, downcast=None):
# no need to care metadata other than name
# because it can't have freq if
return Index(result, name=self.name)
-
return self._shallow_copy()
def _evaluate_with_timedelta_like(self, other, op, opstr):
@@ -4316,10 +4353,15 @@ def view(self, cls=None):
result._id = self._id
return result
- def _shallow_copy(self, values=None, infer=False, **kwargs):
+ def _shallow_copy_with_infer(self, values=None, **kwargs):
+ return self._shallow_copy(values, **kwargs)
+
+ def _shallow_copy(self, values=None, **kwargs):
if values is not None:
if 'name' in kwargs:
kwargs['names'] = kwargs.pop('name',None)
+ # discards freq
+ kwargs.pop('freq', None)
return MultiIndex.from_tuples(values, **kwargs)
return self.view()
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 66850ab29af39..c6d80a08ad61a 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -3516,44 +3516,163 @@ def test_series_partial_set(self):
# Regression from GH4825
ser = Series([0.1, 0.2], index=[1, 2])
- # ToDo: check_index_type can be True after GH 11497
-
# loc
expected = Series([np.nan, 0.2, np.nan], index=[3, 2, 3])
result = ser.loc[[3, 2, 3]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([np.nan, 0.2, np.nan, np.nan], index=[3, 2, 3, 'x'])
+ result = ser.loc[[3, 2, 3, 'x']]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.2, 0.2, 0.1], index=[2, 2, 1])
+ result = ser.loc[[2, 2, 1]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ expected = Series([0.2, 0.2, np.nan, 0.1], index=[2, 2, 'x', 1])
+ result = ser.loc[[2, 2, 'x', 1]]
+ assert_series_equal(result, expected, check_index_type=True)
# raises as nothing in in the index
self.assertRaises(KeyError, lambda : ser.loc[[3, 3, 3]])
expected = Series([0.2, 0.2, np.nan], index=[2, 2, 3])
result = ser.loc[[2, 2, 3]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.3, np.nan, np.nan], index=[3, 4, 4])
result = Series([0.1, 0.2, 0.3], index=[1, 2, 3]).loc[[3, 4, 4]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.3, 0.3], index=[5, 3, 3])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 3, 3]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
expected = Series([np.nan, 0.4, 0.4], index=[5, 4, 4])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[5, 4, 4]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[7, 2, 2])
result = Series([0.1, 0.2, 0.3, 0.4], index=[4, 5, 6, 7]).loc[[7, 2, 2]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
expected = Series([0.4, np.nan, np.nan], index=[4, 5, 5])
result = Series([0.1, 0.2, 0.3, 0.4], index=[1, 2, 3, 4]).loc[[4, 5, 5]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
# iloc
expected = Series([0.2, 0.2, 0.1, 0.1], index=[2, 2, 1, 1])
result = ser.iloc[[1, 1, 0, 0]]
- assert_series_equal(result, expected, check_index_type=False)
+ assert_series_equal(result, expected, check_index_type=True)
+
+ def test_series_partial_set_with_name(self):
+ # GH 11497
+
+ idx = Index([1, 2], dtype='int64', name='idx')
+ ser = Series([0.1, 0.2], index=idx, name='s')
+
+ # loc
+ exp_idx = Index([3, 2, 3], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.2, np.nan], index=exp_idx, name='s')
+ result = ser.loc[[3, 2, 3]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([3, 2, 3, 'x'], dtype='object', name='idx')
+ expected = Series([np.nan, 0.2, np.nan, np.nan], index=exp_idx, name='s')
+ result = ser.loc[[3, 2, 3, 'x']]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([2, 2, 1], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, 0.1], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 1]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([2, 2, 'x', 1], dtype='object', name='idx')
+ expected = Series([0.2, 0.2, np.nan, 0.1], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 'x', 1]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ # raises as nothing in in the index
+ self.assertRaises(KeyError, lambda : ser.loc[[3, 3, 3]])
+
+ exp_idx = Index([2, 2, 3], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, np.nan], index=exp_idx, name='s')
+ result = ser.loc[[2, 2, 3]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([3, 4, 4], dtype='int64', name='idx')
+ expected = Series([0.3, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([1, 2, 3], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3], index=idx, name='s').loc[[3, 4, 4]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([5, 3, 3], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.3, 0.3], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx, name='s').loc[[5, 3, 3]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([5, 4, 4], dtype='int64', name='idx')
+ expected = Series([np.nan, 0.4, 0.4], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx, name='s').loc[[5, 4, 4]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([7, 2, 2], dtype='int64', name='idx')
+ expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([4, 5, 6, 7], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx, name='s').loc[[7, 2, 2]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ exp_idx = Index([4, 5, 5], dtype='int64', name='idx')
+ expected = Series([0.4, np.nan, np.nan], index=exp_idx, name='s')
+ idx = Index([1, 2, 3, 4], dtype='int64', name='idx')
+ result = Series([0.1, 0.2, 0.3, 0.4], index=idx, name='s').loc[[4, 5, 5]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ # iloc
+ exp_idx = Index([2, 2, 1, 1], dtype='int64', name='idx')
+ expected = Series([0.2, 0.2, 0.1, 0.1], index=exp_idx, name='s')
+ result = ser.iloc[[1,1,0,0]]
+ assert_series_equal(result, expected, check_index_type=True)
+
+ def test_series_partial_set_datetime(self):
+ # GH 11497
+
+ idx = date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
+ ser = Series([0.1, 0.2], index=idx, name='s')
+
+ result = ser.loc[[Timestamp('2011-01-01'), Timestamp('2011-01-02')]]
+ exp = Series([0.1, 0.2], index=idx, name='s')
+ assert_series_equal(result, exp, check_index_type=True)
+
+ keys = [Timestamp('2011-01-02'), Timestamp('2011-01-02'), Timestamp('2011-01-01')]
+ exp = Series([0.2, 0.2, 0.1], index=pd.DatetimeIndex(keys, name='idx'), name='s')
+ assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+
+ keys = [Timestamp('2011-01-03'), Timestamp('2011-01-02'), Timestamp('2011-01-03')]
+ exp = Series([np.nan, 0.2, np.nan], index=pd.DatetimeIndex(keys, name='idx'), name='s')
+ assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+
+ def test_series_partial_set_period(self):
+ # GH 11497
+
+ idx = pd.period_range('2011-01-01', '2011-01-02', freq='D', name='idx')
+ ser = Series([0.1, 0.2], index=idx, name='s')
+
+ result = ser.loc[[pd.Period('2011-01-01', freq='D'), pd.Period('2011-01-02', freq='D')]]
+ exp = Series([0.1, 0.2], index=idx, name='s')
+ assert_series_equal(result, exp, check_index_type=True)
+
+ keys = [pd.Period('2011-01-02', freq='D'), pd.Period('2011-01-02', freq='D'),
+ pd.Period('2011-01-01', freq='D')]
+ exp = Series([0.2, 0.2, 0.1], index=pd.PeriodIndex(keys, name='idx'), name='s')
+ assert_series_equal(ser.loc[keys], exp, check_index_type=True)
+
+ keys = [pd.Period('2011-01-03', freq='D'), pd.Period('2011-01-02', freq='D'),
+ pd.Period('2011-01-03', freq='D')]
+ exp = Series([np.nan, 0.2, np.nan], index=pd.PeriodIndex(keys, name='idx'), name='s')
+ assert_series_equal(ser.loc[keys], exp, check_index_type=True)
def test_partial_set_invalid(self):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 4fd61e28233a6..0799c839a024d 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -197,7 +197,7 @@ def _join_i8_wrapper(joinf, **kwargs):
'is_quarter_start','is_quarter_end','is_year_start','is_year_end',
'tz','freq']
_is_numeric_dtype = False
-
+ _infer_as_myclass = True
@deprecate_kwarg(old_arg_name='infer_dst', new_arg_name='ambiguous',
mapping={True: 'infer', False: 'raise'})
@@ -778,7 +778,7 @@ def astype(self, dtype):
elif dtype == _NS_DTYPE and self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
elif dtype == str:
- return self._shallow_copy(values=self.format(), infer=True)
+ return Index(self.format(), name=self.name, dtype=object)
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 578727f515fe4..3f4bba0344ca0 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -156,6 +156,8 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
_is_numeric_dtype = False
+ _infer_as_myclass = True
+
freq = None
__eq__ = _period_index_cmp('__eq__')
@@ -279,9 +281,15 @@ def _simple_new(cls, values, name=None, freq=None, **kwargs):
result._reset_identity()
return result
- def _shallow_copy(self, values=None, infer=False, **kwargs):
+ def _shallow_copy_with_infer(self, values=None, **kwargs):
""" we always want to return a PeriodIndex """
- return super(PeriodIndex, self)._shallow_copy(values=values, infer=False, **kwargs)
+ return self._shallow_copy(values=values, **kwargs)
+
+ def _shallow_copy(self, values=None, **kwargs):
+ if kwargs.get('freq') is None:
+ # freq must be provided
+ kwargs['freq'] = self.freq
+ return super(PeriodIndex, self)._shallow_copy(values=values, **kwargs)
def _coerce_scalar_to_index(self, item):
"""
diff --git a/pandas/tseries/tdi.py b/pandas/tseries/tdi.py
index 89229fc48bcb2..5691e29cb0e96 100644
--- a/pandas/tseries/tdi.py
+++ b/pandas/tseries/tdi.py
@@ -129,6 +129,8 @@ def _join_i8_wrapper(joinf, **kwargs):
_comparables = ['name', 'freq']
_attributes = ['name', 'freq']
_is_numeric_dtype = True
+ _infer_as_myclass = True
+
freq = None
def __new__(cls, data=None, unit=None,
@@ -514,8 +516,7 @@ def _wrap_joined_index(self, joined, other):
name = self.name if self.name == other.name else None
if (isinstance(other, TimedeltaIndex) and self.freq == other.freq
and self._can_fast_union(other)):
- joined = self._shallow_copy(joined)
- joined.name = name
+ joined = self._shallow_copy(joined, name=name)
return joined
else:
return self._simple_new(joined, name)
| `.loc` result with duplicated keys may have incorred `Index` dtype.
```
import pandas as pd
ser = pd.Series([0.1, 0.2], index=pd.Index([1, 2], name='idx'))
# OK
ser.loc[[2, 2, 1]].index
# Int64Index([2, 2, 1], dtype='int64', name=u'idx')
# NG, Int64Index(dtype=object)
ser.loc[[3, 2, 3]].index
# Int64Index([3, 2, 3], dtype='object', name=u'idx')
ser.loc[[3, 2, 3, 'x']].index
# Int64Index([3, 2, 3, u'x'], dtype='object', name=u'idx')
idx = pd.date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = pd.Series([0.1, 0.2], index=idx, name='s')
# OK
ser.loc[[pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-01')]].index
# DatetimeIndex(['2011-01-02', '2011-01-02', '2011-01-01'], dtype='datetime64[ns]', name=u'idx', freq=None)
# NG, ValueError
ser.loc[[pd.Timestamp('2011-01-03'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-03')]].index
# ValueError: Inferred frequency None from passed dates does not conform to passed frequency D
```
## After the PR:
Above OK results are unchanged.
```
import pandas as pd
ser = pd.Series([0.1, 0.2], index=pd.Index([1, 2], name='idx'))
ser.loc[[3, 2, 3]].index
# Int64Index([3, 2, 3], dtype='int64', name=u'idx')
ser.loc[[3, 2, 3, 'x']].index
# Index([3, 2, 3, u'x'], dtype='object', name=u'idx')
idx = pd.date_range('2011-01-01', '2011-01-02', freq='D', name='idx')
ser = pd.Series([0.1, 0.2], index=idx, name='s')
ser.loc[[pd.Timestamp('2011-01-03'), pd.Timestamp('2011-01-02'), pd.Timestamp('2011-01-03')]].index
# DatetimeIndex(['2011-01-03', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', name=u'idx', freq=None)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11497 | 2015-11-01T12:05:05Z | 2015-11-29T18:01:15Z | 2015-11-29T18:01:15Z | 2015-11-29T19:42:00Z |
ENH: support Akima 1D interpolation | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 28129287d51af..13389b603ed6c 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -25,6 +25,7 @@ Enhancements
objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
- ``DataFrame`` now uses the fields of a ``namedtuple`` as columns, if columns are not supplied (:issue:`11181`)
- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
+- Akima 1D interpolation is now supported (:issue:`7588`)
.. _whatsnew_0171.api:
@@ -118,3 +119,7 @@ Bug Fixes
- Bug in ``to_excel`` with openpyxl 2.2+ and merging (:issue:`11408`)
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
+
+- Bug in ``Panel.fillna()`` does not fill across axis 0 (:issue:`8251`)
+
+- Bug in ``Panel.fillna()`` loses index names (:issue:`3570`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index e304684036766..c6b33557951d5 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -10,7 +10,7 @@
from pandas.core.algorithms import factorize
from pandas.core.base import PandasObject, PandasDelegate
import pandas.core.common as com
-from pandas.core.missing import interpolate_2d
+from pandas.core.missing import pad
from pandas.util.decorators import cache_readonly, deprecate_kwarg
from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
@@ -1313,8 +1313,7 @@ def fillna(self, value=None, method=None, limit=None):
if method is not None:
values = self.to_dense().reshape(-1, len(self))
- values = interpolate_2d(
- values, method, 0, None, value).astype(self.categories.dtype)[0]
+ values = pad(values, method, 0, None, value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index f46296bb6f70c..34e9047a9fdd2 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2774,7 +2774,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
- axis = 0
+ axis = self._stat_axis_name
axis = self._get_axis_number(axis)
method = mis._clean_fill_method(method)
@@ -2782,31 +2782,19 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
- if self._is_mixed_type and axis == 1:
+ if self._is_mixed_type:
+ if (self.ndim > 2) and (axis == 0):
+ raise NotImplementedError('cannot fill across axis 0 for mixed dtypes')
if inplace:
- raise NotImplementedError()
- result = self.T.fillna(method=method, limit=limit).T
-
- # need to downcast here because of all of the transposes
- result._data = result._data.downcast()
-
- return result
-
- # > 3d
- if self.ndim > 3:
- raise NotImplementedError(
- 'Cannot fillna with a method for > 3dims'
- )
+ raise NotImplementedError('cannot fill inplace for mixed dtypes')
+ elif (self.ndim == 2) and (axis == 1):
+ result = self.T.fillna(method=method, limit=limit).T
- # 3d
- elif self.ndim == 3:
+ # need to downcast here because of all of the transposes
+ result._data = result._data.downcast()
- # fill in 2d chunks
- result = dict([(col, s.fillna(method=method, value=value))
- for col, s in compat.iteritems(self)])
- return self._constructor.from_dict(result).__finalize__(self)
+ return result
- # 2d or less
method = mis._clean_fill_method(method)
new_data = self._data.interpolate(method=method,
axis=axis,
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 1b08140ebec09..08048d684e407 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -910,12 +910,12 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
- values = mis.interpolate_2d(values,
- method=method,
- axis=axis,
- limit=limit,
- fill_value=fill_value,
- dtype=self.dtype)
+ values = mis.pad(values,
+ method=method,
+ axis=axis,
+ limit=limit,
+ fill_value=fill_value,
+ dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values,
@@ -950,8 +950,8 @@ def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
- # i.e. not an arg to mis.interpolate_1d
- return mis.interpolate_1d(index, x, method=method, limit=limit,
+ # i.e. not an arg to mis.interpolate
+ return mis.interpolate(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
@@ -2358,7 +2358,7 @@ def make_block_same_class(self, values, placement,
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
- values = mis.interpolate_2d(
+ values = mis.pad(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
@@ -3774,7 +3774,7 @@ def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
# fill if needed
if method is not None or limit is not None:
- new_values = mis.interpolate_2d(new_values, method=method,
+ new_values = mis.pad(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index f1143ad808b91..184faf1f0e7c3 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -39,7 +39,7 @@ def _clean_interp_method(method, **kwargs):
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
- 'pchip', 'spline']
+ 'pchip', 'spline', 'akima']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
@@ -49,9 +49,9 @@ def _clean_interp_method(method, **kwargs):
return method
-def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
- limit_direction='forward',
- fill_value=None, bounds_error=False, order=None, **kwargs):
+def interpolate(xvalues, yvalues, method='linear', limit=None,
+ limit_direction='forward',
+ fill_value=None, bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
@@ -144,7 +144,7 @@ def _interp_limit(invalid, fw_limit, bw_limit):
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
- 'piecewise_polynomial', 'pchip']
+ 'piecewise_polynomial', 'pchip', 'akima']
if method in sp_methods:
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
@@ -156,6 +156,8 @@ def _interp_limit(invalid, fw_limit, bw_limit):
bounds_error=bounds_error, order=order, **kwargs)
result[violate_limit] = np.nan
return result
+ else:
+ raise ValueError('interpolation method not found')
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
@@ -214,20 +216,51 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
- method = alt_methods[method]
- new_y = method(x, y, new_x, **kwargs)
+ if method == 'akima':
+ try:
+ interpolator = interpolate.Akima1DInterpolator(x, y)
+ except AttributeError:
+ raise ImportError("Your version of scipy does not support "
+ "Akima interpolation" )
+ new_y = interpolator(new_x)
+ else:
+ method = alt_methods[method]
+ new_y = method(x, y, new_x, **kwargs)
return new_y
-def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
- """ perform an actual interpolation of values, values will be make 2-d if
- needed fills inplace, returns the result
+def pad(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
+ """
+ Perform an actual interpolation of values. 1-d values will be made 2-d temporarily.
+ Returns the result
"""
+ ndim = values.ndim
+ shape = values.shape
+
+ func = partial(pad, method=method, limit=limit, fill_value=fill_value, dtype=dtype)
+
+ if ndim > 2:
+ if ndim == 3:
+ if axis == 0:
+ for n in range(shape[1]):
+ values[:,n] = func(values[:,n], axis=1)
+ else:
+ for n in range(shape[0]):
+ values[n] = func(values[n], axis=(1 if axis == 1 else 0))
+ else:
+ if axis == 0:
+ for n in range(shape[1]):
+ values[:,n] = func(values[:,n], axis=0)
+ else:
+ for n in range(shape[0]):
+ values[n] = func(values[n], axis=axis-1)
+
+ return values
+
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
- ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index d29673e96ecdd..ab240ea90a3f3 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1167,6 +1167,15 @@ def test_interp_alt_scipy(self):
expected.ix[5,'A'] = 6.125
assert_frame_equal(result, expected)
+ try:
+ from scipy.interpolate import Akima1DInterpolator
+ except ImportError:
+ raise nose.SkipTest('scipy.interpolate.Akima1DInterpolator missing')
+ result = df.interpolate(method='akima')
+ expected.ix[2,'A'] = 3
+ expected.ix[5,'A'] = 6
+ assert_frame_equal(result, expected)
+
def test_interp_rowwise(self):
df = DataFrame({0: [1, 2, np.nan, 4],
1: [2, 3, 4, np.nan],
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 1f8bcf8c9879f..0c092f89c4090 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1454,20 +1454,95 @@ def test_fillna(self):
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
+ # Fill forward.
+ filled = self.panel.fillna(method='ffill')
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='ffill'))
+
+ # With limit.
+ filled = self.panel.fillna(method='backfill', limit=1)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', limit=1))
+
+ # With downcast.
+ rounded = self.panel.apply(lambda x: x.apply(np.round))
+ filled = rounded.fillna(method='backfill', downcast='infer')
+ assert_frame_equal(filled['ItemA'],
+ rounded['ItemA'].fillna(method='backfill', downcast='infer'))
+
+ # Now explicitly request axis 1.
+ filled = self.panel.fillna(method='backfill', axis=1)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', axis=0))
+
+ # Fill along axis 2, equivalent to filling along axis 1 of each
+ # DataFrame.
+ filled = self.panel.fillna(method='backfill', axis=2)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', axis=1))
+
+ # Fill an empty panel.
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
+ # either method or value must be specified
self.assertRaises(ValueError, self.panel.fillna)
+ # method and value can not both be specified
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
+ # can't pass list or tuple, only scalar
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3,4,5))
p.iloc[0:2,0:2,0:2] = np.nan
- self.assertRaises(NotImplementedError, lambda : p.fillna(999,limit=1))
+ self.assertRaises(NotImplementedError, lambda : p.fillna(999, limit=1))
+
+ def test_fillna_axis_0(self):
+ # GH 8395
+
+ # Forward fill along axis 0, interpolating values across DataFrames.
+ filled = self.panel.fillna(method='ffill', axis=0)
+ nan_indexes = self.panel['ItemB']['C'].index[
+ self.panel['ItemB']['C'].apply(np.isnan)]
+
+ # Values from ItemA are filled into ItemB.
+ assert_series_equal(filled['ItemB']['C'][nan_indexes],
+ self.panel['ItemA']['C'][nan_indexes])
+
+ # Backfill along axis 0.
+ filled = self.panel.fillna(method='backfill', axis=0)
+
+ # The test data lacks values that can be backfilled on axis 0.
+ assert_panel_equal(filled, self.panel)
+
+ # Reverse the panel and backfill along axis 0, to properly test
+ # backfill.
+ reverse_panel = self.panel.reindex_axis(reversed(self.panel.axes[0]))
+ filled = reverse_panel.fillna(method='bfill', axis=0)
+ nan_indexes = reverse_panel['ItemB']['C'].index[
+ reverse_panel['ItemB']['C'].apply(np.isnan)]
+ assert_series_equal(filled['ItemB']['C'][nan_indexes],
+ reverse_panel['ItemA']['C'][nan_indexes])
+
+ # Fill along axis 0 with limit.
+ filled = self.panel.fillna(method='ffill', axis=0, limit=1)
+ a_nan = self.panel['ItemA']['C'].index[
+ self.panel['ItemA']['C'].apply(np.isnan)]
+ b_nan = self.panel['ItemB']['C'].index[
+ self.panel['ItemB']['C'].apply(np.isnan)]
+
+ # Cells that are nan in ItemB but not in ItemA remain unfilled in
+ # ItemC.
+ self.assertTrue(
+ filled['ItemC']['C'][b_nan.diff(a_nan)].apply(np.isnan).all())
+
+ # limit not implemented when only value is specified
+ panel = self.panel.copy()
+ panel['str'] = 'foo'
+ self.assertRaises(NotImplementedError, lambda : panel.fillna(method='ffill', axis=0))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 3772d4b9c272b..dffb0ccc6effe 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -909,11 +909,106 @@ def test_sort_index(self):
# assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
+ # GH 8395
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
- self.assertRaises(NotImplementedError, self.panel4d.fillna, method='pad')
+ filled = self.panel4d.fillna(method='backfill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill'))
+
+ panel4d = self.panel4d.copy()
+ panel4d['str'] = 'foo'
+
+ filled = panel4d.fillna(method='backfill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ panel4d['l1']['ItemA'].fillna(method='backfill'))
+
+ # Fill forward.
+ filled = self.panel4d.fillna(method='ffill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='ffill'))
+
+ # With limit.
+ filled = self.panel4d.fillna(method='backfill', limit=1)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', limit=1))
+
+ # With downcast.
+ rounded = self.panel4d.apply(lambda x: x.apply(np.round))
+ filled = rounded.fillna(method='backfill', downcast='infer')
+ assert_frame_equal(filled['l1']['ItemA'],
+ rounded['l1']['ItemA'].fillna(method='backfill', downcast='infer'))
+
+ # Now explicitly request axis 2.
+ filled = self.panel4d.fillna(method='backfill', axis=2)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', axis=0))
+
+ # Fill along axis 3, equivalent to filling along axis 1 of each
+ # DataFrame.
+ filled = self.panel4d.fillna(method='backfill', axis=3)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', axis=1))
+
+ # Fill an empty panel.
+ empty = self.panel4d.reindex(items=[])
+ filled = empty.fillna(0)
+ assert_panel4d_equal(filled, empty)
+
+ # either method or value must be specified
+ self.assertRaises(ValueError, self.panel4d.fillna)
+ # method and value can not both be specified
+ self.assertRaises(ValueError, self.panel4d.fillna, 5, method='ffill')
+
+ # can't pass list or tuple, only scalar
+ self.assertRaises(TypeError, self.panel4d.fillna, [1, 2])
+ self.assertRaises(TypeError, self.panel4d.fillna, (1, 2))
+
+ # limit not implemented when only value is specified
+ p = Panel4D(np.random.randn(3,4,5,6))
+ p.iloc[0:2,0:2,0:2,0:2] = np.nan
+ self.assertRaises(NotImplementedError, lambda : p.fillna(999, limit=1))
+
+ def test_fillna_axis_0(self):
+ # GH 8395
+
+ # Back fill along axis 0, interpolating values across Panels
+ filled = self.panel4d.fillna(method='bfill', axis=0)
+ nan_indexes = self.panel4d['l1']['ItemB']['C'].index[
+ self.panel4d['l1']['ItemB']['C'].apply(np.isnan)]
+
+ # Values from ItemC are filled into ItemB.
+ assert_series_equal(filled['l1']['ItemB']['C'][nan_indexes],
+ self.panel4d['l1']['ItemC']['C'][nan_indexes])
+
+ # Forward fill along axis 0.
+ filled = self.panel4d.fillna(method='ffill', axis=0)
+
+ # The test data lacks values that can be backfilled on axis 0.
+ assert_panel4d_equal(filled, self.panel4d)
+
+ # Reverse the panel and backfill along axis 0, to properly test
+ # forward fill.
+ reverse_panel = self.panel4d.reindex_axis(reversed(self.panel4d.axes[0]))
+ filled = reverse_panel.fillna(method='ffill', axis=0)
+ nan_indexes = reverse_panel['l3']['ItemB']['C'].index[
+ reverse_panel['l3']['ItemB']['C'].apply(np.isnan)]
+ assert_series_equal(filled['l3']['ItemB']['C'][nan_indexes],
+ reverse_panel['l1']['ItemB']['C'][nan_indexes])
+
+ # Fill along axis 0 with limit.
+ filled = self.panel4d.fillna(method='bfill', axis=0, limit=1)
+ c_nan = self.panel4d['l1']['ItemC']['C'].index[
+ self.panel4d['l1']['ItemC']['C'].apply(np.isnan)]
+ b_nan = self.panel4d['l1']['ItemB']['C'].index[
+ self.panel4d['l1']['ItemB']['C'].apply(np.isnan)]
+
+ # Cells that are nan in ItemB but not in ItemC remain unfilled in
+ # ItemA.
+ self.assertTrue(
+ filled['l1']['ItemA']['C'][b_nan.diff(c_nan)].apply(np.isnan).all())
def test_swapaxes(self):
result = self.panel4d.swapaxes('labels', 'items')
| I went hunting for some low-hanging fruit in the area of missing data and came across #7588. I had to implement it a little differently than krogh, pchip, etc. because there is no convenience function for Akima like there are for the already supported methods.
Once #11445 is merged I can rebase.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11496 | 2015-10-31T22:12:28Z | 2016-01-20T14:13:35Z | null | 2018-05-31T14:13:33Z |
Update parser.pyx | diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 8ac1f64f2d50e..cc5a0df1bdcc2 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -1825,7 +1825,7 @@ def _concatenate_chunks(list chunks):
if warning_columns:
warning_names = ','.join(warning_columns)
warning_message = " ".join(["Columns (%s) have mixed types." % warning_names,
- "Specify dtype option on import or set low_memory=False."
+ "Specify dtype option on import"
])
warnings.warn(warning_message, DtypeWarning, stacklevel=8)
return result
| fix #5888
| https://api.github.com/repos/pandas-dev/pandas/pulls/11491 | 2015-10-31T17:21:38Z | 2015-11-07T14:55:16Z | null | 2015-11-07T14:55:16Z |
ENH: Standardized timeseries accessor names, #9606 | diff --git a/doc/source/api.rst b/doc/source/api.rst
index bfd1c92d14acd..a19dedb37d57e 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -499,8 +499,10 @@ These can be accessed like ``Series.dt.<property>``.
Series.dt.week
Series.dt.weekofyear
Series.dt.dayofweek
+ Series.dt.day_of_week
Series.dt.weekday
Series.dt.dayofyear
+ Series.dt.day_of_year
Series.dt.quarter
Series.dt.is_month_start
Series.dt.is_month_end
@@ -1469,9 +1471,11 @@ Time/Date Components
DatetimeIndex.date
DatetimeIndex.time
DatetimeIndex.dayofyear
+ DatetimeIndex.day_of_year
DatetimeIndex.weekofyear
DatetimeIndex.week
DatetimeIndex.dayofweek
+ DatetimeIndex.day_of_week
DatetimeIndex.weekday
DatetimeIndex.quarter
DatetimeIndex.tz
diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1d21f96a7d539..c824395ea4cd4 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -478,10 +478,10 @@ There are several time/date properties that one can access from ``Timestamp`` or
nanosecond,"The nanoseconds of the datetime"
date,"Returns datetime.date"
time,"Returns datetime.time"
- dayofyear,"The ordinal day of year"
+ day_of_year,"The ordinal day of year"
weekofyear,"The week ordinal of the year"
week,"The week ordinal of the year"
- dayofweek,"The day of the week with Monday=0, Sunday=6"
+ day_of_week,"The day of the week with Monday=0, Sunday=6"
weekday,"The day of the week with Monday=0, Sunday=6"
quarter,"Quarter of the date: Jan=Mar = 1, Apr-Jun = 2, etc."
is_month_start,"Logical indicating if first day of month (defined by frequency)"
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 28129287d51af..36c304c8456d4 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -31,6 +31,7 @@ Enhancements
API changes
~~~~~~~~~~~
+- day_of_year and day_of_week accessors for timeseries now exist in addition to dayofyear and dayofweek (standardizes naming conventions) (:issue:`9606`)
- min and max reductions on ``datetime64`` and ``timedelta64`` dtyped series now
result in ``NaT`` and not ``nan`` (:issue:`11245`).
- Regression from 0.16.2 for output formatting of long floats/nan, restored in (:issue:`11302`)
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index cfc50afc8f9f3..1a9279853a132 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -959,9 +959,15 @@ cdef class Period(object):
property weekday:
def __get__(self):
return self.dayofweek
+ property day_of_week:
+ def __get__(self):
+ return self.dayofweek
property dayofyear:
def __get__(self):
return self._field(9)
+ property day_of_year:
+ def __get__(self):
+ return self.dayofyear
property quarter:
def __get__(self):
return self._field(2)
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 868057c675594..ca499e31ab101 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -192,10 +192,12 @@ def _join_i8_wrapper(joinf, **kwargs):
_comparables = ['name', 'freqstr', 'tz']
_attributes = ['name', 'freq', 'tz']
_datetimelike_ops = ['year','month','day','hour','minute','second',
- 'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
- 'date','time','microsecond','nanosecond','is_month_start','is_month_end',
- 'is_quarter_start','is_quarter_end','is_year_start','is_year_end',
- 'tz','freq']
+ 'weekofyear','week','dayofweek','day_of_week',
+ 'weekday','dayofyear','day_of_year','quarter',
+ 'days_in_month', 'daysinmonth','date',
+ 'time','microsecond','nanosecond','is_month_start',
+ 'is_month_end','is_quarter_start','is_quarter_end',
+ 'is_year_start','is_year_end','tz','freq']
_is_numeric_dtype = False
@@ -1458,7 +1460,9 @@ def _set_freq(self, value):
dayofweek = _field_accessor('dayofweek', 'dow',
"The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
+ day_of_week = dayofweek
dayofyear = _field_accessor('dayofyear', 'doy', "The ordinal day of the year")
+ day_of_year = dayofyear
quarter = _field_accessor('quarter', 'q', "The quarter of the date")
days_in_month = _field_accessor('days_in_month', 'dim', "The number of days in the month\n\n.. versionadded:: 0.16.0")
daysinmonth = days_in_month
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 888c50e86b7b2..42e204e32d02a 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -154,7 +154,10 @@ class PeriodIndex(DatelikeOps, DatetimeIndexOpsMixin, Int64Index):
_typ = 'periodindex'
_attributes = ['name','freq']
_datetimelike_ops = ['year','month','day','hour','minute','second',
- 'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'qyear', 'freq', 'days_in_month', 'daysinmonth']
+ 'weekofyear','week','dayofweek',
+ 'day_of_week','weekday','dayofyear','day_of_year',
+ 'quarter','qyear', 'freq', 'days_in_month',
+ 'daysinmonth']
_is_numeric_dtype = False
freq = None
@@ -477,7 +480,9 @@ def to_datetime(self, dayfirst=False):
week = weekofyear
dayofweek = _field_accessor('dayofweek', 10, "The day of the week with Monday=0, Sunday=6")
weekday = dayofweek
+ day_of_week = dayofweek
dayofyear = day_of_year = _field_accessor('dayofyear', 9, "The ordinal day of the year")
+ day_of_year = dayofyear
quarter = _field_accessor('quarter', 2, "The quarter of the date")
qyear = _field_accessor('qyear', 1)
days_in_month = _field_accessor('days_in_month', 11, "The number of days in the month")
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 230016f00374f..4b55e759e2dbe 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -935,7 +935,8 @@ def test_nat_vector_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
- 'week', 'dayofyear', 'days_in_month']
+ 'week', 'dayofyear', 'day_of_year', 'day_of_week',
+ 'days_in_month']
for field in fields:
result = getattr(idx, field)
expected = [getattr(x, field) if x is not NaT else np.nan
@@ -945,8 +946,8 @@ def test_nat_vector_field_access(self):
def test_nat_scalar_field_access(self):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'microsecond', 'nanosecond',
- 'week', 'dayofyear', 'days_in_month', 'daysinmonth',
- 'dayofweek']
+ 'week', 'dayofyear', 'day_of_year' 'days_in_month',
+ 'daysinmonth', 'dayofweek', 'day_of_week']
for field in fields:
result = getattr(NaT, field)
self.assertTrue(np.isnan(result))
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index afb15badf433c..1edc92bbc38b6 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -396,10 +396,14 @@ class Timestamp(_Timestamp):
def dayofweek(self):
return self.weekday()
+ day_of_week = dayofweek
+
@property
def dayofyear(self):
return self._get_field('doy')
+ day_of_year = dayofyear
+
@property
def week(self):
return self._get_field('woy')
@@ -650,7 +654,8 @@ class NaTType(_NaT):
fields = ['year', 'quarter', 'month', 'day', 'hour',
'minute', 'second', 'millisecond', 'microsecond', 'nanosecond',
- 'week', 'dayofyear', 'days_in_month', 'daysinmonth', 'dayofweek']
+ 'week', 'dayofyear', 'day_of_year', 'days_in_month', 'daysinmonth',
+ 'dayofweek', 'day_of_week']
for field in fields:
prop = property(fget=lambda self: np.nan)
setattr(NaTType, field, prop)
| In accordance with https://github.com/pydata/pandas/issues/9606, I added `day_of_week` and `day_of_year` accessors to DatetimeIndex, Timestamp, and Period. Since https://github.com/pydata/pandas/issues/9606 didn't mention `weekofyear`, and there's already `week` as a replacement for that, I didn't add a `week_of_year`, but I'm happy to.
I updated the tests and docs, but this is my first PR to pandas involving actual code, so let me know if I missed anything.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11489 | 2015-10-31T06:53:19Z | 2016-01-30T15:47:59Z | null | 2016-01-30T15:47:59Z |
Fix #10770 by adding days_in_month to docs | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 1d21f96a7d539..2bc96d1b7b1aa 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -484,6 +484,7 @@ There are several time/date properties that one can access from ``Timestamp`` or
dayofweek,"The day of the week with Monday=0, Sunday=6"
weekday,"The day of the week with Monday=0, Sunday=6"
quarter,"Quarter of the date: Jan=Mar = 1, Apr-Jun = 2, etc."
+ days_in_month,"The number of days in the month of the datetime"
is_month_start,"Logical indicating if first day of month (defined by frequency)"
is_month_end,"Logical indicating if last day of month (defined by frequency)"
is_quarter_start,"Logical indicating if first day of quarter (defined by frequency)"
| Fix https://github.com/pydata/pandas/issues/10770 by adding days_in_month to Time/Date Components. Am I right in thinking `days_in_month` (rather than `daysinmonth`) is the right name to use going forward? Is there anywhere else in the documentation that it should be added that I missed?
| https://api.github.com/repos/pandas-dev/pandas/pulls/11486 | 2015-10-30T19:25:38Z | 2015-10-30T22:08:05Z | 2015-10-30T22:08:05Z | 2015-10-31T04:22:11Z |
BUG: Holiday observance rules could not be applied | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 2bc96d1b7b1aa..01b342213de07 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1043,10 +1043,14 @@ An example of how holidays and holiday calendars are defined:
cal.holidays(datetime(2012, 1, 1), datetime(2012, 12, 31))
Using this calendar, creating an index or doing offset arithmetic skips weekends
-and holidays (i.e., Memorial Day/July 4th).
+and holidays (i.e., Memorial Day/July 4th). For example, the below defines
+a custom business day offset using the ``ExampleCalendar``. Like any other offset,
+it can be used to create a ``DatetimeIndex`` or added to ``datetime``
+or ``Timestamp`` objects.
.. ipython:: python
+ from pandas.tseries.offsets import CDay
DatetimeIndex(start='7/1/2012', end='7/10/2012',
freq=CDay(calendar=cal)).to_pydatetime()
offset = CustomBusinessDay(calendar=cal)
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 1d9b02e6a7bb1..65eb3e605950d 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -115,6 +115,8 @@ Bug Fixes
- Fix regression in setting of ``xticks`` in ``plot`` (:issue:`11529`).
+- Bug in ``holiday.dates`` where observance rules could not be applied to holiday and doc enhancement (:issue:`11477`, :issue:`11533`)
+
diff --git a/pandas/tseries/holiday.py b/pandas/tseries/holiday.py
index e98c5dd93e68a..90f6bff498e62 100644
--- a/pandas/tseries/holiday.py
+++ b/pandas/tseries/holiday.py
@@ -3,6 +3,7 @@
from datetime import datetime, timedelta
from dateutil.relativedelta import MO, TU, WE, TH, FR, SA, SU
from pandas.tseries.offsets import Easter, Day
+import numpy as np
def next_monday(dt):
@@ -156,8 +157,8 @@ class from pandas.tseries.offsets
self.month = month
self.day = day
self.offset = offset
- self.start_date = start_date
- self.end_date = end_date
+ self.start_date = Timestamp(start_date) if start_date is not None else start_date
+ self.end_date = Timestamp(end_date) if end_date is not None else end_date
self.observance = observance
assert (days_of_week is None or type(days_of_week) == tuple)
self.days_of_week = days_of_week
@@ -179,7 +180,7 @@ def __repr__(self):
def dates(self, start_date, end_date, return_name=False):
"""
- Calculate holidays between start date and end date
+ Calculate holidays observed between start date and end date
Parameters
----------
@@ -189,6 +190,12 @@ def dates(self, start_date, end_date, return_name=False):
If True, return a series that has dates and holiday names.
False will only return dates.
"""
+ start_date = Timestamp(start_date)
+ end_date = Timestamp(end_date)
+
+ filter_start_date = start_date
+ filter_end_date = end_date
+
if self.year is not None:
dt = Timestamp(datetime(self.year, self.month, self.day))
if return_name:
@@ -196,40 +203,57 @@ def dates(self, start_date, end_date, return_name=False):
else:
return [dt]
- if self.start_date is not None:
- start_date = self.start_date
-
- if self.end_date is not None:
- end_date = self.end_date
-
- start_date = Timestamp(start_date)
- end_date = Timestamp(end_date)
-
- year_offset = DateOffset(years=1)
- base_date = Timestamp(
- datetime(start_date.year, self.month, self.day),
- tz=start_date.tz,
- )
- dates = DatetimeIndex(start=base_date, end=end_date, freq=year_offset)
+ dates = self._reference_dates(start_date, end_date)
holiday_dates = self._apply_rule(dates)
if self.days_of_week is not None:
- holiday_dates = list(filter(lambda x: x is not None and
- x.dayofweek in self.days_of_week,
- holiday_dates))
- else:
- holiday_dates = list(filter(lambda x: x is not None, holiday_dates))
+ holiday_dates = holiday_dates[np.in1d(holiday_dates.dayofweek,
+ self.days_of_week)]
+
+ if self.start_date is not None:
+ filter_start_date = max(self.start_date.tz_localize(filter_start_date.tz), filter_start_date)
+ if self.end_date is not None:
+ filter_end_date = min(self.end_date.tz_localize(filter_end_date.tz), filter_end_date)
+ holiday_dates = holiday_dates[(holiday_dates >= filter_start_date) &
+ (holiday_dates <= filter_end_date)]
if return_name:
return Series(self.name, index=holiday_dates)
return holiday_dates
+
+
+ def _reference_dates(self, start_date, end_date):
+ """
+ Get reference dates for the holiday.
+
+ Return reference dates for the holiday also returning the year
+ prior to the start_date and year following the end_date. This ensures
+ that any offsets to be applied will yield the holidays within
+ the passed in dates.
+ """
+ if self.start_date is not None:
+ start_date = self.start_date.tz_localize(start_date.tz)
+
+ if self.end_date is not None:
+ end_date = self.end_date.tz_localize(start_date.tz)
+
+ year_offset = DateOffset(years=1)
+ reference_start_date = Timestamp(
+ datetime(start_date.year-1, self.month, self.day))
+
+ reference_end_date = Timestamp(
+ datetime(end_date.year+1, self.month, self.day))
+ # Don't process unnecessary holidays
+ dates = DatetimeIndex(start=reference_start_date, end=reference_end_date,
+ freq=year_offset, tz=start_date.tz)
+
+ return dates
def _apply_rule(self, dates):
"""
- Apply the given offset/observance to an
- iterable of dates.
+ Apply the given offset/observance to a DatetimeIndex of dates.
Parameters
----------
- dates : array-like
+ dates : DatetimeIndex
Dates to apply the given offset/observance rule
Returns
@@ -237,7 +261,7 @@ def _apply_rule(self, dates):
Dates with rules applied
"""
if self.observance is not None:
- return map(lambda d: self.observance(d), dates)
+ return dates.map(lambda d: self.observance(d))
if self.offset is not None:
if not isinstance(self.offset, list):
@@ -245,7 +269,7 @@ def _apply_rule(self, dates):
else:
offsets = self.offset
for offset in offsets:
- dates = list(map(lambda d: d + offset, dates))
+ dates += offset
return dates
holiday_calendars = {}
@@ -303,6 +327,13 @@ def __init__(self, name=None, rules=None):
if rules is not None:
self.rules = rules
+
+ def rule_from_name(self, name):
+ for rule in self.rules:
+ if rule.name == name:
+ return rule
+
+ return None
def holidays(self, start=None, end=None, return_name=False):
"""
diff --git a/pandas/tseries/tests/test_holiday.py b/pandas/tseries/tests/test_holiday.py
index 7d233ba78e7b6..1da397e768a86 100644
--- a/pandas/tseries/tests/test_holiday.py
+++ b/pandas/tseries/tests/test_holiday.py
@@ -1,15 +1,17 @@
from datetime import datetime
import pandas.util.testing as tm
+from pandas import compat
from pandas import DatetimeIndex
from pandas.tseries.holiday import (
- USFederalHolidayCalendar, USMemorialDay, USThanksgivingDay,
+ USFederalHolidayCalendar, USMemorialDay, USThanksgivingDay,
nearest_workday, next_monday_or_tuesday, next_monday,
previous_friday, sunday_to_monday, Holiday, DateOffset,
MO, Timestamp, AbstractHolidayCalendar, get_calendar,
HolidayCalendarFactory, next_workday, previous_workday,
before_nearest_workday, EasterMonday, GoodFriday,
- after_nearest_workday, weekend_to_monday)
+ after_nearest_workday, weekend_to_monday, USLaborDay,
+ USColumbusDay, USMartinLutherKingJr, USPresidentsDay)
from pytz import utc
import nose
@@ -72,7 +74,20 @@ def __init__(self, name=None, rules=None):
jan2.holidays(),
DatetimeIndex(['02-Jan-2015'])
)
-
+
+ def test_calendar_observance_dates(self):
+ # Test for issue 11477
+ USFedCal = get_calendar('USFederalHolidayCalendar')
+ holidays0 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,3)) # <-- same start and end dates
+ holidays1 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,6)) # <-- different start and end dates
+ holidays2 = USFedCal.holidays(datetime(2015,7,3), datetime(2015,7,3)) # <-- same start and end dates
+
+ tm.assert_index_equal(holidays0, holidays1)
+ tm.assert_index_equal(holidays0, holidays2)
+
+ def test_rule_from_name(self):
+ USFedCal = get_calendar('USFederalHolidayCalendar')
+ self.assertEqual(USFedCal.rule_from_name('Thanksgiving'), USThanksgivingDay)
class TestHoliday(tm.TestCase):
@@ -193,6 +208,52 @@ def test_usthanksgivingday(self):
datetime(2020, 11, 26),
],
)
+
+ def test_holidays_within_dates(self):
+ # Fix holiday behavior found in #11477
+ # where holiday.dates returned dates outside start/end date
+ # or observed rules could not be applied as the holiday
+ # was not in the original date range (e.g., 7/4/2015 -> 7/3/2015)
+ start_date = datetime(2015, 7, 1)
+ end_date = datetime(2015, 7, 1)
+
+ calendar = get_calendar('USFederalHolidayCalendar')
+ new_years = calendar.rule_from_name('New Years Day')
+ july_4th = calendar.rule_from_name('July 4th')
+ veterans_day = calendar.rule_from_name('Veterans Day')
+ christmas = calendar.rule_from_name('Christmas')
+
+ # Holiday: (start/end date, holiday)
+ holidays = {USMemorialDay: ("2015-05-25", "2015-05-25"),
+ USLaborDay: ("2015-09-07", "2015-09-07"),
+ USColumbusDay: ("2015-10-12", "2015-10-12"),
+ USThanksgivingDay: ("2015-11-26", "2015-11-26"),
+ USMartinLutherKingJr: ("2015-01-19", "2015-01-19"),
+ USPresidentsDay: ("2015-02-16", "2015-02-16"),
+ GoodFriday: ("2015-04-03", "2015-04-03"),
+ EasterMonday: [("2015-04-06", "2015-04-06"),
+ ("2015-04-05", [])],
+ new_years: [("2015-01-01", "2015-01-01"),
+ ("2011-01-01", []),
+ ("2010-12-31", "2010-12-31")],
+ july_4th: [("2015-07-03", "2015-07-03"),
+ ("2015-07-04", [])],
+ veterans_day: [("2012-11-11", []),
+ ("2012-11-12", "2012-11-12")],
+ christmas: [("2011-12-25", []),
+ ("2011-12-26", "2011-12-26")]}
+
+ for rule, dates in compat.iteritems(holidays):
+ empty_dates = rule.dates(start_date, end_date)
+ self.assertEqual(empty_dates.tolist(), [])
+
+ if isinstance(dates, tuple):
+ dates = [dates]
+
+ for start, expected in dates:
+ if len(expected):
+ expected = [Timestamp(expected)]
+ self.check_results(rule, start, start, expected)
def test_argument_types(self):
holidays = USThanksgivingDay.dates(self.start_date,
@@ -206,8 +267,8 @@ def test_argument_types(self):
Timestamp(self.start_date),
Timestamp(self.end_date))
- self.assertEqual(holidays, holidays_1)
- self.assertEqual(holidays, holidays_2)
+ self.assert_index_equal(holidays, holidays_1)
+ self.assert_index_equal(holidays, holidays_2)
def test_special_holidays(self):
base_date = [datetime(2012, 5, 28)]
| Closes #11477
Closes #11533
There were some other bugs here that I added tests for. For example, `holiday.dates` for MLK day was returning all holidays from the holiday start date up to the end date rather than just between the range.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11484 | 2015-10-30T14:11:20Z | 2015-11-14T14:57:23Z | 2015-11-14T14:57:23Z | 2015-11-14T14:57:29Z |
ENH: Adding origin parameter in pd.to_datetime | diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt
index d069a25c58143..60847469aa02c 100644
--- a/doc/source/whatsnew/v0.19.0.txt
+++ b/doc/source/whatsnew/v0.19.0.txt
@@ -1,17 +1,28 @@
.. _whatsnew_0190:
-v0.19.0 (August ??, 2016)
+v0.19.0 (October 2, 2016)
-------------------------
-This is a major release from 0.18.1 and includes a small number of API changes, several new features,
+This is a major release from 0.18.1 and includes number of API changes, several new features,
enhancements, and performance improvements along with a large number of bug fixes. We recommend that all
users upgrade to this version.
Highlights include:
- :func:`merge_asof` for asof-style time-series joining, see :ref:`here <whatsnew_0190.enhancements.asof_merge>`
-- ``.rolling()`` are now time-series aware, see :ref:`here <whatsnew_0190.enhancements.rolling_ts>`
-- pandas development api, see :ref:`here <whatsnew_0190.dev_api>`
+- ``.rolling()`` is now time-series aware, see :ref:`here <whatsnew_0190.enhancements.rolling_ts>`
+- :func:`read_csv` now supports parsing ``Categorical`` data, see :ref:`here <whatsnew_0190.enhancements.read_csv_categorical>`
+- A function :func:`union_categorical` has been added for combining categoricals, see :ref:`here <whatsnew_0190.enhancements.union_categoricals>`
+- ``PeriodIndex`` now has its own ``period`` dtype, and changed to be more consistent with other ``Index`` classes. See :ref:`here <whatsnew_0190.api.period>`
+- Sparse data structures gained enhanced support of ``int`` and ``bool`` dtypes, see :ref:`here <whatsnew_0190.sparse>`
+- Comparison operations with ``Series`` no longer ignores the index, see :ref:`here <whatsnew_0190.api.series_ops>` for an overview of the API changes.
+- Introduction of a pandas development API for utility functions, see :ref:`here <whatsnew_0190.dev_api>`.
+- Deprecation of ``Panel4D`` and ``PanelND``. We recommend to represent these types of n-dimensional data with the `xarray package <http://xarray.pydata.org/en/stable/>`__.
+- Removal of the previously deprecated modules ``pandas.io.data``, ``pandas.io.wb``, ``pandas.tools.rplot``.
+
+.. warning::
+
+ pandas >= 0.19.0 will no longer silence numpy ufunc warnings upon import, see :ref:`here <whatsnew_0190.errstate>`.
.. contents:: What's new in v0.19.0
:local:
@@ -22,33 +33,14 @@ Highlights include:
New features
~~~~~~~~~~~~
-.. _whatsnew_0190.dev_api:
-
-pandas development API
-^^^^^^^^^^^^^^^^^^^^^^
-
-As part of making pandas APi more uniform and accessible in the future, we have created a standard
-sub-package of pandas, ``pandas.api`` to hold public API's. We are starting by exposing type
-introspection functions in ``pandas.api.types``. More sub-packages and officially sanctioned API's
-will be published in future versions of pandas.
-
-The following are now part of this API:
-
-.. ipython:: python
-
- import pprint
- from pandas.api import types
- funcs = [ f for f in dir(types) if not f.startswith('_') ]
- pprint.pprint(funcs)
-
.. _whatsnew_0190.enhancements.asof_merge:
-:func:`merge_asof` for asof-style time-series joining
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``merge_asof`` for asof-style time-series joining
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
A long-time requested feature has been added through the :func:`merge_asof` function, to
-support asof style joining of time-series. (:issue:`1870`, :issue:`13695`, :issue:`13709`). Full documentation is
-:ref:`here <merging.merge_asof>`
+support asof style joining of time-series (:issue:`1870`, :issue:`13695`, :issue:`13709`, :issue:`13902`). Full documentation is
+:ref:`here <merging.merge_asof>`.
The :func:`merge_asof` performs an asof merge, which is similar to a left-join
except that we match on nearest key rather than equal keys.
@@ -134,10 +126,10 @@ passed DataFrame (``trades`` in this case), with the fields of the ``quotes`` me
.. _whatsnew_0190.enhancements.rolling_ts:
-``.rolling()`` are now time-series aware
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``.rolling()`` is now time-series aware
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-``.rolling()`` objects are now time-series aware and can accept a time-series offset (or convertible) for the ``window`` argument (:issue:`13327`, :issue:`12995`)
+``.rolling()`` objects are now time-series aware and can accept a time-series offset (or convertible) for the ``window`` argument (:issue:`13327`, :issue:`12995`).
See the full documentation :ref:`here <stats.moments.ts>`.
.. ipython:: python
@@ -192,18 +184,23 @@ default of the index) in a DataFrame.
.. _whatsnew_0190.enhancements.read_csv_dupe_col_names_support:
-:func:`read_csv` has improved support for duplicate column names
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``read_csv`` has improved support for duplicate column names
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. ipython:: python
+ :suppress:
+
+ from pandas.compat import StringIO
:ref:`Duplicate column names <io.dupe_names>` are now supported in :func:`read_csv` whether
they are in the file or passed in as the ``names`` parameter (:issue:`7160`, :issue:`9424`)
-.. ipython :: python
+.. ipython:: python
data = '0,1,2\n3,4,5'
names = ['a', 'b', 'a']
-Previous behaviour:
+**Previous behavior**:
.. code-block:: ipython
@@ -213,14 +210,88 @@ Previous behaviour:
0 2 1 2
1 5 4 5
-The first 'a' column contains the same data as the second 'a' column, when it should have
-contained the array ``[0, 3]``.
+The first ``a`` column contained the same data as the second ``a`` column, when it should have
+contained the values ``[0, 3]``.
-New behaviour:
+**New behavior**:
-.. ipython :: python
+.. ipython:: python
- In [2]: pd.read_csv(StringIO(data), names=names)
+ pd.read_csv(StringIO(data), names=names)
+
+
+.. _whatsnew_0190.enhancements.read_csv_categorical:
+
+``read_csv`` supports parsing ``Categorical`` directly
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The :func:`read_csv` function now supports parsing a ``Categorical`` column when
+specified as a dtype (:issue:`10153`). Depending on the structure of the data,
+this can result in a faster parse time and lower memory usage compared to
+converting to ``Categorical`` after parsing. See the io :ref:`docs here <io.categorical>`.
+
+.. ipython:: python
+
+ data = 'col1,col2,col3\na,b,1\na,b,2\nc,d,3'
+
+ pd.read_csv(StringIO(data))
+ pd.read_csv(StringIO(data)).dtypes
+ pd.read_csv(StringIO(data), dtype='category').dtypes
+
+Individual columns can be parsed as a ``Categorical`` using a dict specification
+
+.. ipython:: python
+
+ pd.read_csv(StringIO(data), dtype={'col1': 'category'}).dtypes
+
+.. note::
+
+ The resulting categories will always be parsed as strings (object dtype).
+ If the categories are numeric they can be converted using the
+ :func:`to_numeric` function, or as appropriate, another converter
+ such as :func:`to_datetime`.
+
+ .. ipython:: python
+
+ df = pd.read_csv(StringIO(data), dtype='category')
+ df.dtypes
+ df['col3']
+ df['col3'].cat.categories = pd.to_numeric(df['col3'].cat.categories)
+ df['col3']
+
+.. _whatsnew_0190.enhancements.union_categoricals:
+
+Categorical Concatenation
+^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- A function :func:`union_categoricals` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`, :issue:`:13763`, issue:`13846`, :issue:`14173`)
+
+ .. ipython:: python
+
+ from pandas.types.concat import union_categoricals
+ a = pd.Categorical(["b", "c"])
+ b = pd.Categorical(["a", "b"])
+ union_categoricals([a, b])
+
+- ``concat`` and ``append`` now can concat ``category`` dtypes with different ``categories`` as ``object`` dtype (:issue:`13524`)
+
+ .. ipython:: python
+
+ s1 = pd.Series(['a', 'b'], dtype='category')
+ s2 = pd.Series(['b', 'c'], dtype='category')
+
+ **Previous behavior**:
+
+ .. code-block:: ipython
+
+ In [1]: pd.concat([s1, s2])
+ ValueError: incompatible categories in categorical concat
+
+ **New behavior**:
+
+ .. ipython:: python
+
+ pd.concat([s1, s2])
.. _whatsnew_0190.enhancements.semi_month_offsets:
@@ -235,7 +306,7 @@ These provide date offsets anchored (by default) to the 15th and end of month, a
from pandas.tseries.offsets import SemiMonthEnd, SemiMonthBegin
-SemiMonthEnd:
+**SemiMonthEnd**:
.. ipython:: python
@@ -243,7 +314,7 @@ SemiMonthEnd:
pd.date_range('2015-01-01', freq='SM', periods=4)
-SemiMonthBegin:
+**SemiMonthBegin**:
.. ipython:: python
@@ -264,7 +335,7 @@ Using the anchoring suffix, you can also specify the day of month to use instead
New Index methods
^^^^^^^^^^^^^^^^^
-Following methods and options are added to ``Index`` to be more consistent with ``Series`` and ``DataFrame``.
+The following methods and options are added to ``Index``, to be more consistent with the ``Series`` and ``DataFrame`` API.
``Index`` now supports the ``.where()`` function for same shape indexing (:issue:`13170`)
@@ -274,7 +345,7 @@ Following methods and options are added to ``Index`` to be more consistent with
idx.where([True, False, True])
-``Index`` now supports ``.dropna`` to exclude missing values (:issue:`6194`)
+``Index`` now supports ``.dropna()`` to exclude missing values (:issue:`6194`)
.. ipython:: python
@@ -292,7 +363,7 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci
midx.dropna()
midx.dropna(how='all')
-``Index`` now supports ``.str.extractall()`` which returns a ``DataFrame``, the see :ref:`docs here <text.extractall>` (:issue:`10008`, :issue:`13156`)
+``Index`` now supports ``.str.extractall()`` which returns a ``DataFrame``, see the :ref:`docs here <text.extractall>` (:issue:`10008`, :issue:`13156`)
.. ipython:: python
@@ -301,21 +372,90 @@ For ``MultiIndex``, values are dropped if any level is missing by default. Speci
``Index.astype()`` now accepts an optional boolean argument ``copy``, which allows optional copying if the requirements on dtype are satisfied (:issue:`13209`)
-.. _whatsnew_0190.enhancements.other:
+.. _whatsnew_0190.gbq:
-Other enhancements
-^^^^^^^^^^^^^^^^^^
+Google BigQuery Enhancements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behaviour remains to raising a ``NonExistentTimeError`` (:issue:`13057`)
-- ``pd.to_numeric()`` now accepts a ``downcast`` parameter, which will downcast the data if possible to smallest specified numerical dtype (:issue:`13352`)
+- The :func:`read_gbq` method has gained the ``dialect`` argument to allow users to specify whether to use BigQuery's legacy SQL or BigQuery's standard SQL. See the :ref:`docs <io.bigquery_reader>` for more details (:issue:`13615`).
+- The :func:`~DataFrame.to_gbq` method now allows the DataFrame column order to differ from the destination table schema (:issue:`11359`).
- .. ipython:: python
+.. _whatsnew_0190.errstate:
- s = ['1', 2, 3]
- pd.to_numeric(s, downcast='unsigned')
- pd.to_numeric(s, downcast='integer')
+Fine-grained numpy errstate
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
-- ``.to_hdf/read_hdf()`` now accept path objects (e.g. ``pathlib.Path``, ``py.path.local``) for the file path (:issue:`11773`)
+Previous versions of pandas would permanently silence numpy's ufunc error handling when ``pandas`` was imported. Pandas did this in order to silence the warnings that would arise from using numpy ufuncs on missing data, which are usually represented as ``NaN`` s. Unfortunately, this silenced legitimate warnings arising in non-pandas code in the application. Starting with 0.19.0, pandas will use the ``numpy.errstate`` context manager to silence these warnings in a more fine-grained manner, only around where these operations are actually used in the pandas codebase. (:issue:`13109`, :issue:`13145`)
+
+After upgrading pandas, you may see *new* ``RuntimeWarnings`` being issued from your code. These are likely legitimate, and the underlying cause likely existed in the code when using previous versions of pandas that simply silenced the warning. Use `numpy.errstate <http://docs.scipy.org/doc/numpy/reference/generated/numpy.errstate.html>`__ around the source of the ``RuntimeWarning`` to control how these conditions are handled.
+
+.. _whatsnew_0190.get_dummies_dtypes:
+
+``get_dummies`` now returns integer dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The ``pd.get_dummies`` function now returns dummy-encoded columns as small integers, rather than floats (:issue:`8725`). This should provide an improved memory footprint.
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [1]: pd.get_dummies(['a', 'b', 'a', 'c']).dtypes
+
+ Out[1]:
+ a float64
+ b float64
+ c float64
+ dtype: object
+
+**New behavior**:
+
+.. ipython:: python
+
+ pd.get_dummies(['a', 'b', 'a', 'c']).dtypes
+
+
+.. _whatsnew_0190.enhancements.to_numeric_downcast:
+
+Downcast values to smallest possible dtype in ``to_numeric``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``pd.to_numeric()`` now accepts a ``downcast`` parameter, which will downcast the data if possible to smallest specified numerical dtype (:issue:`13352`)
+
+.. ipython:: python
+
+ s = ['1', 2, 3]
+ pd.to_numeric(s, downcast='unsigned')
+ pd.to_numeric(s, downcast='integer')
+
+.. _whatsnew_0190.dev_api:
+
+pandas development API
+^^^^^^^^^^^^^^^^^^^^^^
+
+As part of making pandas API more uniform and accessible in the future, we have created a standard
+sub-package of pandas, ``pandas.api`` to hold public API's. We are starting by exposing type
+introspection functions in ``pandas.api.types``. More sub-packages and officially sanctioned API's
+will be published in future versions of pandas (:issue:`13147`, :issue:`13634`)
+
+The following are now part of this API:
+
+.. ipython:: python
+
+ import pprint
+ from pandas.api import types
+ funcs = [ f for f in dir(types) if not f.startswith('_') ]
+ pprint.pprint(funcs)
+
+.. note::
+
+ Calling these functions from the internal module ``pandas.core.common`` will now show a ``DeprecationWarning`` (:issue:`13990`)
+
+
+.. _whatsnew_0190.enhancements.other:
+
+Other enhancements
+^^^^^^^^^^^^^^^^^^
- ``Timestamp`` can now accept positional and keyword parameters similar to :func:`datetime.datetime` (:issue:`10758`, :issue:`11630`)
@@ -325,23 +465,39 @@ Other enhancements
pd.Timestamp(year=2012, month=1, day=1, hour=8, minute=30)
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``decimal`` option (:issue:`12933`)
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``na_filter`` option (:issue:`13321`)
-- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the ``memory_map`` option (:issue:`13381`)
+- The ``.resample()`` function now accepts a ``on=`` or ``level=`` parameter for resampling on a datetimelike column or ``MultiIndex`` level (:issue:`13500`)
-- The ``pd.read_html()`` has gained support for the ``na_values``, ``converters``, ``keep_default_na`` options (:issue:`13461`)
+ .. ipython:: python
+ df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5),
+ 'a': np.arange(5)},
+ index=pd.MultiIndex.from_arrays([
+ [1,2,3,4,5],
+ pd.date_range('2015-01-01', freq='W', periods=5)],
+ names=['v','d']))
+ df
+ df.resample('M', on='date').sum()
+ df.resample('M', level='d').sum()
+
+- The ``.get_credentials()`` method of ``GbqConnector`` can now first try to fetch `the application default credentials <https://developers.google.com/identity/protocols/application-default-credentials>`__. See the :ref:`docs <io.bigquery_authentication>` for more details (:issue:`13577`).
+- The ``.tz_localize()`` method of ``DatetimeIndex`` and ``Timestamp`` has gained the ``errors`` keyword, so you can potentially coerce nonexistent timestamps to ``NaT``. The default behavior remains to raising a ``NonExistentTimeError`` (:issue:`13057`)
+- ``.to_hdf/read_hdf()`` now accept path objects (e.g. ``pathlib.Path``, ``py.path.local``) for the file path (:issue:`11773`)
+- The ``pd.read_csv()`` with ``engine='python'`` has gained support for the
+ ``decimal`` (:issue:`12933`), ``na_filter`` (:issue:`13321`) and the ``memory_map`` option (:issue:`13381`).
+- Consistent with the Python API, ``pd.read_csv()`` will now interpret ``+inf`` as positive infinity (:issue:`13274`)
+- The ``pd.read_html()`` has gained support for the ``na_values``, ``converters``, ``keep_default_na`` options (:issue:`13461`)
- ``Categorical.astype()`` now accepts an optional boolean argument ``copy``, effective when dtype is categorical (:issue:`13209`)
- ``DataFrame`` has gained the ``.asof()`` method to return the last non-NaN values according to the selected subset (:issue:`13358`)
-- Consistent with the Python API, ``pd.read_csv()`` will now interpret ``+inf`` as positive infinity (:issue:`13274`)
- The ``DataFrame`` constructor will now respect key ordering if a list of ``OrderedDict`` objects are passed in (:issue:`13304`)
- ``pd.read_html()`` has gained support for the ``decimal`` option (:issue:`12907`)
-- A function :func:`union_categorical` has been added for combining categoricals, see :ref:`Unioning Categoricals<categorical.union>` (:issue:`13361`)
- ``Series`` has gained the properties ``.is_monotonic``, ``.is_monotonic_increasing``, ``.is_monotonic_decreasing``, similar to ``Index`` (:issue:`13336`)
- ``DataFrame.to_sql()`` now allows a single value as the SQL type for all columns (:issue:`11886`).
- ``Series.append`` now supports the ``ignore_index`` option (:issue:`13677`)
- ``.to_stata()`` and ``StataWriter`` can now write variable labels to Stata dta files using a dictionary to make column names to labels (:issue:`13535`, :issue:`13536`)
- ``.to_stata()`` and ``StataWriter`` will automatically convert ``datetime64[ns]`` columns to Stata format ``%tc``, rather than raising a ``ValueError`` (:issue:`12259`)
+- ``read_stata()`` and ``StataReader`` raise with a more explicit error message when reading Stata files with repeated value labels when ``convert_categoricals=True`` (:issue:`13923`)
+- ``DataFrame.style`` will now render sparsified MultiIndexes (:issue:`11655`)
+- ``DataFrame.style`` will now show column level names (e.g. ``DataFrame.columns.names``) (:issue:`13775`)
- ``DataFrame`` has gained support to re-order the columns based on the values
in a row using ``df.sort_values(by='...', axis=1)`` (:issue:`10806`)
@@ -349,53 +505,42 @@ Other enhancements
df = pd.DataFrame({'A': [2, 7], 'B': [3, 5], 'C': [4, 8]},
index=['row1', 'row2'])
+ df
df.sort_values(by='row2', axis=1)
- Added documentation to :ref:`I/O<io.dtypes>` regarding the perils of reading in columns with mixed dtypes and how to handle it (:issue:`13746`)
-
-.. _whatsnew_0190.api:
-
-
-API changes
-~~~~~~~~~~~
-
-
-- ``Panel.to_sparse`` will raise a ``NotImplementedError`` exception when called (:issue:`13778`)
-- ``Index.reshape`` will raise a ``NotImplementedError`` exception when called (:issue:`12882`)
-- Non-convertible dates in an excel date column will be returned without conversion and the column will be ``object`` dtype, rather than raising an exception (:issue:`10001`)
-- ``eval``'s upcasting rules for ``float32`` types have been updated to be more consistent with NumPy's rules. New behavior will not upcast to ``float64`` if you multiply a pandas ``float32`` object by a scalar float64. (:issue:`12388`)
-- An ``UnsupportedFunctionCall`` error is now raised if NumPy ufuncs like ``np.mean`` are called on groupby or resample objects (:issue:`12811`)
-- Calls to ``.sample()`` will respect the random seed set via ``numpy.random.seed(n)`` (:issue:`13161`)
-- ``Styler.apply`` is now more strict about the outputs your function must return. For ``axis=0`` or ``axis=1``, the output shape must be identical. For ``axis=None``, the output must be a DataFrame with identical columns and index labels. (:issue:`13222`)
-- ``Float64Index.astype(int)`` will now raise ``ValueError`` if ``Float64Index`` contains ``NaN`` values (:issue:`13149`)
-- ``TimedeltaIndex.astype(int)`` and ``DatetimeIndex.astype(int)`` will now return ``Int64Index`` instead of ``np.array`` (:issue:`13209`)
-- ``.filter()`` enforces mutual exclusion of the keyword arguments. (:issue:`12399`)
-- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
-- ``__setitem__`` will no longer apply a callable rhs as a function instead of storing it. Call ``where`` directly to get the previous behavior. (:issue:`13299`)
-- Passing ``Period`` with multiple frequencies to normal ``Index`` now returns ``Index`` with ``object`` dtype (:issue:`13664`)
-- ``PeriodIndex.fillna`` with ``Period`` has different freq now coerces to ``object`` dtype (:issue:`13664`)
-- More informative exceptions are passed through the csv parser. The exception type would now be the original exception type instead of ``CParserError``. (:issue:`13652`)
+- :meth:`~DataFrame.to_html` now has a ``border`` argument to control the value in the opening ``<table>`` tag. The default is the value of the ``html.border`` option, which defaults to 1. This also affects the notebook HTML repr, but since Jupyter's CSS includes a border-width attribute, the visual effect is the same. (:issue:`11563`).
+- Raise ``ImportError`` in the sql functions when ``sqlalchemy`` is not installed and a connection string is used (:issue:`11920`).
+- Compatibility with matplotlib 2.0. Older versions of pandas should also work with matplotlib 2.0 (:issue:`13333`)
+- ``Timestamp``, ``Period``, ``DatetimeIndex``, ``PeriodIndex`` and ``.dt`` accessor have gained a ``.is_leap_year`` property to check whether the date belongs to a leap year. (:issue:`13727`)
- ``astype()`` will now accept a dict of column name to data types mapping as the ``dtype`` argument. (:issue:`12086`)
- The ``pd.read_json`` and ``DataFrame.to_json`` has gained support for reading and writing json lines with ``lines`` option see :ref:`Line delimited json <io.jsonl>` (:issue:`9180`)
+- :func:`read_excel` now supports the true_values and false_values keyword arguments (:issue:`13347`)
+- ``groupby()`` will now accept a scalar and a single-element list for specifying ``level`` on a non-``MultiIndex`` grouper. (:issue:`13907`)
+- Non-convertible dates in an excel date column will be returned without conversion and the column will be ``object`` dtype, rather than raising an exception (:issue:`10001`).
- ``pd.Timedelta(None)`` is now accepted and will return ``NaT``, mirroring ``pd.Timestamp`` (:issue:`13687`)
-- ``Timestamp``, ``Period``, ``DatetimeIndex``, ``PeriodIndex`` and ``.dt`` accessor have gained a ``.is_leap_year`` property to check whether the date belongs to a leap year. (:issue:`13727`)
-- ``pd.read_hdf`` will now raise a ``ValueError`` instead of ``KeyError``, if a mode other than ``r``, ``r+`` and ``a`` is supplied. (:issue:`13623`)
+- ``pd.read_stata()`` can now handle some format 111 files, which are produced by SAS when generating Stata dta files (:issue:`11526`)
+- ``Series`` and ``Index`` now support ``divmod`` which will return a tuple of
+ series or indices. This behaves like a standard binary operator with regards
+ to broadcasting rules (:issue:`14208`).
-.. _whatsnew_0190.api.tolist:
+.. _whatsnew_0190.api:
+
+API changes
+~~~~~~~~~~~
``Series.tolist()`` will now return Python types
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-``Series.tolist()`` will now return Python types in the output, mimicking NumPy ``.tolist()`` behaviour (:issue:`10904`)
+``Series.tolist()`` will now return Python types in the output, mimicking NumPy ``.tolist()`` behavior (:issue:`10904`)
.. ipython:: python
s = pd.Series([1,2,3])
- type(s.tolist()[0])
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -403,12 +548,152 @@ Previous Behavior:
Out[7]:
<class 'numpy.int64'>
-New Behavior:
+**New behavior**:
.. ipython:: python
type(s.tolist()[0])
+.. _whatsnew_0190.api.series_ops:
+
+``Series`` operators for different indexes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Following ``Series`` operators have been changed to make all operators consistent,
+including ``DataFrame`` (:issue:`1134`, :issue:`4581`, :issue:`13538`)
+
+- ``Series`` comparison operators now raise ``ValueError`` when ``index`` are different.
+- ``Series`` logical operators align both ``index`` of left and right hand side.
+
+.. warning::
+ Until 0.18.1, comparing ``Series`` with the same length, would succeed even if
+ the ``.index`` are different (the result ignores ``.index``). As of 0.19.0, this will raises ``ValueError`` to be more strict. This section also describes how to keep previous behavior or align different indexes, using the flexible comparison methods like ``.eq``.
+
+
+As a result, ``Series`` and ``DataFrame`` operators behave as below:
+
+Arithmetic operators
+""""""""""""""""""""
+
+Arithmetic operators align both ``index`` (no changes).
+
+.. ipython:: python
+
+ s1 = pd.Series([1, 2, 3], index=list('ABC'))
+ s2 = pd.Series([2, 2, 2], index=list('ABD'))
+ s1 + s2
+
+ df1 = pd.DataFrame([1, 2, 3], index=list('ABC'))
+ df2 = pd.DataFrame([2, 2, 2], index=list('ABD'))
+ df1 + df2
+
+Comparison operators
+""""""""""""""""""""
+
+Comparison operators raise ``ValueError`` when ``.index`` are different.
+
+**Previous Behavior** (``Series``):
+
+``Series`` compared values ignoring the ``.index`` as long as both had the same length:
+
+.. code-block:: ipython
+
+ In [1]: s1 == s2
+ Out[1]:
+ A False
+ B True
+ C False
+ dtype: bool
+
+**New behavior** (``Series``):
+
+.. code-block:: ipython
+
+ In [2]: s1 == s2
+ Out[2]:
+ ValueError: Can only compare identically-labeled Series objects
+
+.. note::
+
+ To achieve the same result as previous versions (compare values based on locations ignoring ``.index``), compare both ``.values``.
+
+ .. ipython:: python
+
+ s1.values == s2.values
+
+ If you want to compare ``Series`` aligning its ``.index``, see flexible comparison methods section below:
+
+ .. ipython:: python
+
+ s1.eq(s2)
+
+**Current Behavior** (``DataFrame``, no change):
+
+.. code-block:: ipython
+
+ In [3]: df1 == df2
+ Out[3]:
+ ValueError: Can only compare identically-labeled DataFrame objects
+
+Logical operators
+"""""""""""""""""
+
+Logical operators align both ``.index`` of left and right hand side.
+
+**Previous behavior** (``Series``), only left hand side ``index`` was kept:
+
+.. code-block:: ipython
+
+ In [4]: s1 = pd.Series([True, False, True], index=list('ABC'))
+ In [5]: s2 = pd.Series([True, True, True], index=list('ABD'))
+ In [6]: s1 & s2
+ Out[6]:
+ A True
+ B False
+ C False
+ dtype: bool
+
+**New behavior** (``Series``):
+
+.. ipython:: python
+
+ s1 = pd.Series([True, False, True], index=list('ABC'))
+ s2 = pd.Series([True, True, True], index=list('ABD'))
+ s1 & s2
+
+.. note::
+ ``Series`` logical operators fill a ``NaN`` result with ``False``.
+
+.. note::
+ To achieve the same result as previous versions (compare values based on only left hand side index), you can use ``reindex_like``:
+
+ .. ipython:: python
+
+ s1 & s2.reindex_like(s1)
+
+**Current Behavior** (``DataFrame``, no change):
+
+.. ipython:: python
+
+ df1 = pd.DataFrame([True, False, True], index=list('ABC'))
+ df2 = pd.DataFrame([True, True, True], index=list('ABD'))
+ df1 & df2
+
+Flexible comparison methods
+"""""""""""""""""""""""""""
+
+``Series`` flexible comparison methods like ``eq``, ``ne``, ``le``, ``lt``, ``ge`` and ``gt`` now align both ``index``. Use these operators if you want to compare two ``Series``
+which has the different ``index``.
+
+.. ipython:: python
+
+ s1 = pd.Series([1, 2, 3], index=['a', 'b', 'c'])
+ s2 = pd.Series([2, 2, 2], index=['b', 'c', 'd'])
+ s1.eq(s2)
+ s1.ge(s2)
+
+Previously, this worked the same as comparison operators (see above).
+
.. _whatsnew_0190.api.promote:
``Series`` type promotion on assignment
@@ -421,7 +706,7 @@ A ``Series`` will now correctly promote its dtype for assignment with incompat v
s = pd.Series()
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -430,7 +715,7 @@ Previous Behavior:
In [3]: s["b"] = 3.0
TypeError: invalid type promotion
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -441,25 +726,34 @@ New Behavior:
.. _whatsnew_0190.api.to_datetime_coerce:
-``.to_datetime()`` when coercing
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``.to_datetime()`` changes
+^^^^^^^^^^^^^^^^^^^^^^^^^^
-A bug is fixed in ``.to_datetime()`` when passing integers or floats, and no ``unit`` and ``errors='coerce'`` (:issue:`13180`).
Previously if ``.to_datetime()`` encountered mixed integers/floats and strings, but no datetimes with ``errors='coerce'`` it would convert all to ``NaT``.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
In [2]: pd.to_datetime([1, 'foo'], errors='coerce')
Out[2]: DatetimeIndex(['NaT', 'NaT'], dtype='datetime64[ns]', freq=None)
+**Current behavior**:
+
This will now convert integers/floats with the default unit of ``ns``.
.. ipython:: python
pd.to_datetime([1, 'foo'], errors='coerce')
+Bug fixes related to ``.to_datetime()``:
+
+- Bug in ``pd.to_datetime()`` when passing integers or floats, and no ``unit`` and ``errors='coerce'`` (:issue:`13180`).
+- Bug in ``pd.to_datetime()`` when passing invalid datatypes (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`)
+- Bug in ``pd.to_datetime()`` which overflowed on ``int8``, and ``int16`` dtypes (:issue:`13451`)
+- Bug in ``pd.to_datetime()`` raise ``AttributeError`` with ``NaN`` and the other string is not valid when ``errors='ignore'`` (:issue:`12424`)
+- Bug in ``pd.to_datetime()`` did not cast floats correctly when ``unit`` was specified, resulting in truncated datetime (:issue:`13834`)
+
.. _whatsnew_0190.api.merging:
Merging changes
@@ -474,7 +768,7 @@ Merging will now preserve the dtype of the join keys (:issue:`8596`)
df2 = pd.DataFrame({'key': [1, 2], 'v1': [20, 30]})
df2
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -491,7 +785,7 @@ Previous Behavior:
v1 float64
dtype: object
-New Behavior:
+**New behavior**:
We are able to preserve the join keys
@@ -520,7 +814,7 @@ Percentile identifiers in the index of a ``.describe()`` output will now be roun
s = pd.Series([0, 1, 2, 3, 4])
df = pd.DataFrame([0, 1, 2, 3, 4])
-Previous Behavior:
+**Previous behavior**:
The percentiles were rounded to at most one decimal place, which could raise ``ValueError`` for a data frame if the percentiles were duplicated.
@@ -547,7 +841,7 @@ The percentiles were rounded to at most one decimal place, which could raise ``V
...
ValueError: cannot reindex from a duplicate axis
-New Behavior:
+**New behavior**:
.. ipython:: python
@@ -559,21 +853,59 @@ Furthermore:
- Passing duplicated ``percentiles`` will now raise a ``ValueError``.
- Bug in ``.describe()`` on a DataFrame with a mixed-dtype column index, which would previously raise a ``TypeError`` (:issue:`13288`)
+.. _whatsnew_0190.api.period:
+
+``Period`` changes
+^^^^^^^^^^^^^^^^^^
+
+``PeriodIndex`` now has ``period`` dtype
+""""""""""""""""""""""""""""""""""""""""
+
+``PeriodIndex`` now has its own ``period`` dtype. The ``period`` dtype is a
+pandas extension dtype like ``category`` or the :ref:`timezone aware dtype <timeseries.timezone_series>` (``datetime64[ns, tz]``) (:issue:`13941`).
+As a consequence of this change, ``PeriodIndex`` no longer has an integer dtype:
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [1]: pi = pd.PeriodIndex(['2016-08-01'], freq='D')
+
+ In [2]: pi
+ Out[2]: PeriodIndex(['2016-08-01'], dtype='int64', freq='D')
+
+ In [3]: pd.api.types.is_integer_dtype(pi)
+ Out[3]: True
+
+ In [4]: pi.dtype
+ Out[4]: dtype('int64')
+
+**New behavior**:
+
+.. ipython:: python
+
+ pi = pd.PeriodIndex(['2016-08-01'], freq='D')
+ pi
+ pd.api.types.is_integer_dtype(pi)
+ pd.api.types.is_period_dtype(pi)
+ pi.dtype
+ type(pi.dtype)
+
.. _whatsnew_0190.api.periodnat:
``Period('NaT')`` now returns ``pd.NaT``
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+""""""""""""""""""""""""""""""""""""""""
Previously, ``Period`` has its own ``Period('NaT')`` representation different from ``pd.NaT``. Now ``Period('NaT')`` has been changed to return ``pd.NaT``. (:issue:`12759`, :issue:`13582`)
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
In [5]: pd.Period('NaT', freq='D')
Out[5]: Period('NaT', 'D')
-New Behavior:
+**New behavior**:
These result in ``pd.NaT`` without providing ``freq`` option.
@@ -583,9 +915,9 @@ These result in ``pd.NaT`` without providing ``freq`` option.
pd.Period(None)
-To be compat with ``Period`` addition and subtraction, ``pd.NaT`` now supports addition and subtraction with ``int``. Previously it raises ``ValueError``.
+To be compatible with ``Period`` addition and subtraction, ``pd.NaT`` now supports addition and subtraction with ``int``. Previously it raised ``ValueError``.
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -593,13 +925,88 @@ Previous Behavior:
...
ValueError: Cannot add integral value to Timestamp without freq.
-New Behavior:
+**New behavior**:
.. ipython:: python
pd.NaT + 1
pd.NaT - 1
+``PeriodIndex.values`` now returns array of ``Period`` object
+"""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
+
+``.values`` is changed to return an array of ``Period`` objects, rather than an array
+of integers (:issue:`13988`).
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [6]: pi = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
+ In [7]: pi.values
+ array([492, 493])
+
+**New behavior**:
+
+.. ipython:: python
+
+ pi = pd.PeriodIndex(['2011-01', '2011-02'], freq='M')
+ pi.values
+
+
+.. _whatsnew_0190.api.setops:
+
+Index ``+`` / ``-`` no longer used for set operations
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Addition and subtraction of the base Index type and of DatetimeIndex
+(not the numeric index types)
+previously performed set operations (set union and difference). This
+behavior was already deprecated since 0.15.0 (in favor using the specific
+``.union()`` and ``.difference()`` methods), and is now disabled. When
+possible, ``+`` and ``-`` are now used for element-wise operations, for
+example for concatenating strings or subtracting datetimes
+(:issue:`8227`, :issue:`14127`).
+
+Previous behavior:
+
+.. code-block:: ipython
+
+ In [1]: pd.Index(['a', 'b']) + pd.Index(['a', 'c'])
+ FutureWarning: using '+' to provide set union with Indexes is deprecated, use '|' or .union()
+ Out[1]: Index(['a', 'b', 'c'], dtype='object')
+
+**New behavior**: the same operation will now perform element-wise addition:
+
+.. ipython:: python
+
+ pd.Index(['a', 'b']) + pd.Index(['a', 'c'])
+
+Note that numeric Index objects already performed element-wise operations.
+For example, the behavior of adding two integer Indexes is unchanged.
+The base ``Index`` is now made consistent with this behavior.
+
+.. ipython:: python
+
+ pd.Index([1, 2, 3]) + pd.Index([2, 3, 4])
+
+Further, because of this change, it is now possible to subtract two
+DatetimeIndex objects resulting in a TimedeltaIndex:
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [1]: pd.DatetimeIndex(['2016-01-01', '2016-01-02']) - pd.DatetimeIndex(['2016-01-02', '2016-01-03'])
+ FutureWarning: using '-' to provide set differences with datetimelike Indexes is deprecated, use .difference()
+ Out[1]: DatetimeIndex(['2016-01-01'], dtype='datetime64[ns]', freq=None)
+
+**New behavior**:
+
+.. ipython:: python
+
+ pd.DatetimeIndex(['2016-01-01', '2016-01-02']) - pd.DatetimeIndex(['2016-01-02', '2016-01-03'])
+
.. _whatsnew_0190.api.difference:
@@ -613,7 +1020,7 @@ New Behavior:
idx1 = pd.Index([1, 2, 3, np.nan])
idx2 = pd.Index([0, 1, np.nan])
-Previous Behavior:
+**Previous behavior**:
.. code-block:: ipython
@@ -623,30 +1030,133 @@ Previous Behavior:
In [4]: idx1.symmetric_difference(idx2)
Out[4]: Float64Index([0.0, nan, 2.0, 3.0], dtype='float64')
-New Behavior:
+**New behavior**:
.. ipython:: python
idx1.difference(idx2)
idx1.symmetric_difference(idx2)
+.. _whatsnew_0190.api.unique_index:
+
+``Index.unique`` consistently returns ``Index``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``Index.unique()`` now returns unique values as an
+``Index`` of the appropriate ``dtype``. (:issue:`13395`).
+Previously, most ``Index`` classes returned ``np.ndarray``, and ``DatetimeIndex``,
+``TimedeltaIndex`` and ``PeriodIndex`` returned ``Index`` to keep metadata like timezone.
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [1]: pd.Index([1, 2, 3]).unique()
+ Out[1]: array([1, 2, 3])
+
+ In [2]: pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz='Asia/Tokyo').unique()
+ Out[2]:
+ DatetimeIndex(['2011-01-01 00:00:00+09:00', '2011-01-02 00:00:00+09:00',
+ '2011-01-03 00:00:00+09:00'],
+ dtype='datetime64[ns, Asia/Tokyo]', freq=None)
+
+**New behavior**:
+
+.. ipython:: python
+
+ pd.Index([1, 2, 3]).unique()
+ pd.DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], tz='Asia/Tokyo').unique()
+
+.. _whatsnew_0190.api.multiindex:
+
+``MultiIndex`` constructors, ``groupby`` and ``set_index`` preserve categorical dtypes
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+``MultiIndex.from_arrays`` and ``MultiIndex.from_product`` will now preserve categorical dtype
+in ``MultiIndex`` levels (:issue:`13743`, :issue:`13854`).
+
+.. ipython:: python
+
+ cat = pd.Categorical(['a', 'b'], categories=list("bac"))
+ lvl1 = ['foo', 'bar']
+ midx = pd.MultiIndex.from_arrays([cat, lvl1])
+ midx
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [4]: midx.levels[0]
+ Out[4]: Index(['b', 'a', 'c'], dtype='object')
+
+ In [5]: midx.get_level_values[0]
+ Out[5]: Index(['a', 'b'], dtype='object')
+
+**New behavior**: the single level is now a ``CategoricalIndex``:
+
+.. ipython:: python
+
+ midx.levels[0]
+ midx.get_level_values(0)
+
+An analogous change has been made to ``MultiIndex.from_product``.
+As a consequence, ``groupby`` and ``set_index`` also preserve categorical dtypes in indexes
+
+.. ipython:: python
+
+ df = pd.DataFrame({'A': [0, 1], 'B': [10, 11], 'C': cat})
+ df_grouped = df.groupby(by=['A', 'C']).first()
+ df_set_idx = df.set_index(['A', 'C'])
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [11]: df_grouped.index.levels[1]
+ Out[11]: Index(['b', 'a', 'c'], dtype='object', name='C')
+ In [12]: df_grouped.reset_index().dtypes
+ Out[12]:
+ A int64
+ C object
+ B float64
+ dtype: object
+
+ In [13]: df_set_idx.index.levels[1]
+ Out[13]: Index(['b', 'a', 'c'], dtype='object', name='C')
+ In [14]: df_set_idx.reset_index().dtypes
+ Out[14]:
+ A int64
+ C object
+ B int64
+ dtype: object
+
+**New behavior**:
+
+.. ipython:: python
+
+ df_grouped.index.levels[1]
+ df_grouped.reset_index().dtypes
+
+ df_set_idx.index.levels[1]
+ df_set_idx.reset_index().dtypes
+
.. _whatsnew_0190.api.autogenerated_chunksize_index:
-:func:`read_csv` called with ``chunksize`` will progressively enumerate chunks
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``read_csv`` will progressively enumerate chunks
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-When :func:`read_csv` is called with ``chunksize='n'`` and without specifying an index,
-each chunk used to have an independently generated index from `0`` to ``n-1``.
+When :func:`read_csv` is called with ``chunksize=n`` and without specifying an index,
+each chunk used to have an independently generated index from ``0`` to ``n-1``.
They are now given instead a progressive index, starting from ``0`` for the first chunk,
from ``n`` for the second, and so on, so that, when concatenated, they are identical to
-the result of calling :func:`read_csv` without the ``chunksize=`` argument.
-(:issue:`12185`)
+the result of calling :func:`read_csv` without the ``chunksize=`` argument
+(:issue:`12185`).
.. ipython :: python
data = 'A,B\n0,1\n2,3\n4,5\n6,7'
-Previous behaviour:
+**Previous behavior**:
.. code-block:: ipython
@@ -658,61 +1168,229 @@ Previous behaviour:
0 4 5
1 6 7
-New behaviour:
+**New behavior**:
.. ipython :: python
pd.concat(pd.read_csv(StringIO(data), chunksize=2))
+.. _whatsnew_0190.sparse:
+
+Sparse Changes
+^^^^^^^^^^^^^^
+
+These changes allow pandas to handle sparse data with more dtypes, and for work to make a smoother experience with data handling.
+
+``int64`` and ``bool`` support enhancements
+"""""""""""""""""""""""""""""""""""""""""""
+
+Sparse data structures now gained enhanced support of ``int64`` and ``bool`` ``dtype`` (:issue:`667`, :issue:`13849`).
+
+Previously, sparse data were ``float64`` dtype by default, even if all inputs were of ``int`` or ``bool`` dtype. You had to specify ``dtype`` explicitly to create sparse data with ``int64`` dtype. Also, ``fill_value`` had to be specified explicitly because the default was ``np.nan`` which doesn't appear in ``int64`` or ``bool`` data.
+
+.. code-block:: ipython
+
+ In [1]: pd.SparseArray([1, 2, 0, 0])
+ Out[1]:
+ [1.0, 2.0, 0.0, 0.0]
+ Fill: nan
+ IntIndex
+ Indices: array([0, 1, 2, 3], dtype=int32)
+
+ # specifying int64 dtype, but all values are stored in sp_values because
+ # fill_value default is np.nan
+ In [2]: pd.SparseArray([1, 2, 0, 0], dtype=np.int64)
+ Out[2]:
+ [1, 2, 0, 0]
+ Fill: nan
+ IntIndex
+ Indices: array([0, 1, 2, 3], dtype=int32)
+
+ In [3]: pd.SparseArray([1, 2, 0, 0], dtype=np.int64, fill_value=0)
+ Out[3]:
+ [1, 2, 0, 0]
+ Fill: 0
+ IntIndex
+ Indices: array([0, 1], dtype=int32)
+
+As of v0.19.0, sparse data keeps the input dtype, and uses more appropriate ``fill_value`` defaults (``0`` for ``int64`` dtype, ``False`` for ``bool`` dtype).
+
+.. ipython:: python
+
+ pd.SparseArray([1, 2, 0, 0], dtype=np.int64)
+ pd.SparseArray([True, False, False, False])
+
+See the :ref:`docs <sparse.dtype>` for more details.
+
+Operators now preserve dtypes
+"""""""""""""""""""""""""""""
+
+- Sparse data structure now can preserve ``dtype`` after arithmetic ops (:issue:`13848`)
+
+ .. ipython:: python
+
+ s = pd.SparseSeries([0, 2, 0, 1], fill_value=0, dtype=np.int64)
+ s.dtype
+
+ s + 1
+
+- Sparse data structure now support ``astype`` to convert internal ``dtype`` (:issue:`13900`)
+
+ .. ipython:: python
+
+ s = pd.SparseSeries([1., 0., 2., 0.], fill_value=0)
+ s
+ s.astype(np.int64)
+
+ ``astype`` fails if data contains values which cannot be converted to specified ``dtype``.
+ Note that the limitation is applied to ``fill_value`` which default is ``np.nan``.
+
+ .. code-block:: ipython
+
+ In [7]: pd.SparseSeries([1., np.nan, 2., np.nan], fill_value=np.nan).astype(np.int64)
+ Out[7]:
+ ValueError: unable to coerce current fill_value nan to int64 dtype
+
+Other sparse fixes
+""""""""""""""""""
+
+- Subclassed ``SparseDataFrame`` and ``SparseSeries`` now preserve class types when slicing or transposing. (:issue:`13787`)
+- ``SparseArray`` with ``bool`` dtype now supports logical (bool) operators (:issue:`14000`)
+- Bug in ``SparseSeries`` with ``MultiIndex`` ``[]`` indexing may raise ``IndexError`` (:issue:`13144`)
+- Bug in ``SparseSeries`` with ``MultiIndex`` ``[]`` indexing result may have normal ``Index`` (:issue:`13144`)
+- Bug in ``SparseDataFrame`` in which ``axis=None`` did not default to ``axis=0`` (:issue:`13048`)
+- Bug in ``SparseSeries`` and ``SparseDataFrame`` creation with ``object`` dtype may raise ``TypeError`` (:issue:`11633`)
+- Bug in ``SparseDataFrame`` doesn't respect passed ``SparseArray`` or ``SparseSeries`` 's dtype and ``fill_value`` (:issue:`13866`)
+- Bug in ``SparseArray`` and ``SparseSeries`` don't apply ufunc to ``fill_value`` (:issue:`13853`)
+- Bug in ``SparseSeries.abs`` incorrectly keeps negative ``fill_value`` (:issue:`13853`)
+- Bug in single row slicing on multi-type ``SparseDataFrame`` s, types were previously forced to float (:issue:`13917`)
+- Bug in ``SparseSeries`` slicing changes integer dtype to float (:issue:`8292`)
+- Bug in ``SparseDataFarme`` comparison ops may raise ``TypeError`` (:issue:`13001`)
+- Bug in ``SparseDataFarme.isnull`` raises ``ValueError`` (:issue:`8276`)
+- Bug in ``SparseSeries`` representation with ``bool`` dtype may raise ``IndexError`` (:issue:`13110`)
+- Bug in ``SparseSeries`` and ``SparseDataFrame`` of ``bool`` or ``int64`` dtype may display its values like ``float64`` dtype (:issue:`13110`)
+- Bug in sparse indexing using ``SparseArray`` with ``bool`` dtype may return incorrect result (:issue:`13985`)
+- Bug in ``SparseArray`` created from ``SparseSeries`` may lose ``dtype`` (:issue:`13999`)
+- Bug in ``SparseSeries`` comparison with dense returns normal ``Series`` rather than ``SparseSeries`` (:issue:`13999`)
+
+
+.. _whatsnew_0190.indexer_dtype:
+
+Indexer dtype changes
+^^^^^^^^^^^^^^^^^^^^^
+
+.. note::
+
+ This change only affects 64 bit python running on Windows, and only affects relatively advanced
+ indexing operations
+
+Methods such as ``Index.get_indexer`` that return an indexer array, coerce that array to a "platform int", so that it can be
+directly used in 3rd party library operations like ``numpy.take``. Previously, a platform int was defined as ``np.int_``
+which corresponds to a C integer, but the correct type, and what is being used now, is ``np.intp``, which corresponds
+to the C integer size that can hold a pointer (:issue:`3033`, :issue:`13972`).
+
+These types are the same on many platform, but for 64 bit python on Windows,
+``np.int_`` is 32 bits, and ``np.intp`` is 64 bits. Changing this behavior improves performance for many
+operations on that platform.
+
+**Previous behavior**:
+
+.. code-block:: ipython
+
+ In [1]: i = pd.Index(['a', 'b', 'c'])
+
+ In [2]: i.get_indexer(['b', 'b', 'c']).dtype
+ Out[2]: dtype('int32')
+
+**New behavior**:
+
+.. code-block:: ipython
+
+ In [1]: i = pd.Index(['a', 'b', 'c'])
+
+ In [2]: i.get_indexer(['b', 'b', 'c']).dtype
+ Out[2]: dtype('int64')
+
+
+.. _whatsnew_0190.api.other:
+
+Other API Changes
+^^^^^^^^^^^^^^^^^
+
+- ``Timestamp.to_pydatetime`` will issue a ``UserWarning`` when ``warn=True``, and the instance has a non-zero number of nanoseconds, previously this would print a message to stdout (:issue:`14101`).
+- ``Series.unique()`` with datetime and timezone now returns return array of ``Timestamp`` with timezone (:issue:`13565`).
+- ``Panel.to_sparse()`` will raise a ``NotImplementedError`` exception when called (:issue:`13778`).
+- ``Index.reshape()`` will raise a ``NotImplementedError`` exception when called (:issue:`12882`).
+- ``.filter()`` enforces mutual exclusion of the keyword arguments (:issue:`12399`).
+- ``eval``'s upcasting rules for ``float32`` types have been updated to be more consistent with NumPy's rules. New behavior will not upcast to ``float64`` if you multiply a pandas ``float32`` object by a scalar float64 (:issue:`12388`).
+- An ``UnsupportedFunctionCall`` error is now raised if NumPy ufuncs like ``np.mean`` are called on groupby or resample objects (:issue:`12811`).
+- ``__setitem__`` will no longer apply a callable rhs as a function instead of storing it. Call ``where`` directly to get the previous behavior (:issue:`13299`).
+- Calls to ``.sample()`` will respect the random seed set via ``numpy.random.seed(n)`` (:issue:`13161`)
+- ``Styler.apply`` is now more strict about the outputs your function must return. For ``axis=0`` or ``axis=1``, the output shape must be identical. For ``axis=None``, the output must be a DataFrame with identical columns and index labels (:issue:`13222`).
+- ``Float64Index.astype(int)`` will now raise ``ValueError`` if ``Float64Index`` contains ``NaN`` values (:issue:`13149`)
+- ``TimedeltaIndex.astype(int)`` and ``DatetimeIndex.astype(int)`` will now return ``Int64Index`` instead of ``np.array`` (:issue:`13209`)
+- Passing ``Period`` with multiple frequencies to normal ``Index`` now returns ``Index`` with ``object`` dtype (:issue:`13664`)
+- ``PeriodIndex.fillna`` with ``Period`` has different freq now coerces to ``object`` dtype (:issue:`13664`)
+- Faceted boxplots from ``DataFrame.boxplot(by=col)`` now return a ``Series`` when ``return_type`` is not None. Previously these returned an ``OrderedDict``. Note that when ``return_type=None``, the default, these still return a 2-D NumPy array (:issue:`12216`, :issue:`7096`).
+- ``pd.read_hdf`` will now raise a ``ValueError`` instead of ``KeyError``, if a mode other than ``r``, ``r+`` and ``a`` is supplied. (:issue:`13623`)
+- ``pd.read_csv()``, ``pd.read_table()``, and ``pd.read_hdf()`` raise the builtin ``FileNotFoundError`` exception for Python 3.x when called on a nonexistent file; this is back-ported as ``IOError`` in Python 2.x (:issue:`14086`)
+- More informative exceptions are passed through the csv parser. The exception type would now be the original exception type instead of ``CParserError`` (:issue:`13652`).
+- ``pd.read_csv()`` in the C engine will now issue a ``ParserWarning`` or raise a ``ValueError`` when ``sep`` encoded is more than one character long (:issue:`14065`)
+- ``DataFrame.values`` will now return ``float64`` with a ``DataFrame`` of mixed ``int64`` and ``uint64`` dtypes, conforming to ``np.find_common_type`` (:issue:`10364`, :issue:`13917`)
+- ``.groupby.groups`` will now return a dictionary of ``Index`` objects, rather than a dictionary of ``np.ndarray`` or ``lists`` (:issue:`14293`)
+
.. _whatsnew_0190.deprecations:
Deprecations
-^^^^^^^^^^^^
-- ``Categorical.reshape`` has been deprecated and will be removed in a subsequent release (:issue:`12882`)
-- ``Series.reshape`` has been deprecated and will be removed in a subsequent release (:issue:`12882`)
-
+~~~~~~~~~~~~
+- ``Series.reshape`` and ``Categorical.reshape`` have been deprecated and will be removed in a subsequent release (:issue:`12882`, :issue:`12882`)
+- ``PeriodIndex.to_datetime`` has been deprecated in favor of ``PeriodIndex.to_timestamp`` (:issue:`8254`)
+- ``Timestamp.to_datetime`` has been deprecated in favor of ``Timestamp.to_pydatetime`` (:issue:`8254`)
+- ``Index.to_datetime`` and ``DatetimeIndex.to_datetime`` have been deprecated in favor of ``pd.to_datetime`` (:issue:`8254`)
+- ``pandas.core.datetools`` module has been deprecated and will be removed in a subsequent release (:issue:`14094`)
+- ``SparseList`` has been deprecated and will be removed in a future version (:issue:`13784`)
- ``DataFrame.to_html()`` and ``DataFrame.to_latex()`` have dropped the ``colSpace`` parameter in favor of ``col_space`` (:issue:`13857`)
- ``DataFrame.to_sql()`` has deprecated the ``flavor`` parameter, as it is superfluous when SQLAlchemy is not installed (:issue:`13611`)
-- ``compact_ints`` and ``use_unsigned`` have been deprecated in ``pd.read_csv()`` and will be removed in a future version (:issue:`13320`)
-- ``buffer_lines`` has been deprecated in ``pd.read_csv()`` and will be removed in a future version (:issue:`13360`)
-- ``as_recarray`` has been deprecated in ``pd.read_csv()`` and will be removed in a future version (:issue:`13373`)
-- ``skip_footer`` has been deprecated in ``pd.read_csv()`` in favor of ``skipfooter`` and will be removed in a future version (:issue:`13349`)
+- Deprecated ``read_csv`` keywords:
+
+ - ``compact_ints`` and ``use_unsigned`` have been deprecated and will be removed in a future version (:issue:`13320`)
+ - ``buffer_lines`` has been deprecated and will be removed in a future version (:issue:`13360`)
+ - ``as_recarray`` has been deprecated and will be removed in a future version (:issue:`13373`)
+ - ``skip_footer`` has been deprecated in favor of ``skipfooter`` and will be removed in a future version (:issue:`13349`)
+
- top-level ``pd.ordered_merge()`` has been renamed to ``pd.merge_ordered()`` and the original name will be removed in a future version (:issue:`13358`)
- ``Timestamp.offset`` property (and named arg in the constructor), has been deprecated in favor of ``freq`` (:issue:`12160`)
- ``pd.tseries.util.pivot_annual`` is deprecated. Use ``pivot_table`` as alternative, an example is :ref:`here <cookbook.pivot>` (:issue:`736`)
-- ``pd.tseries.util.isleapyear`` has been deprecated and will be removed in a subsequent release. Datetime-likes now have a ``.is_leap_year`` property. (:issue:`13727`)
-- ``Panel4D`` and ``PanelND`` constructors are deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel4D.to_xarray` method to automate this conversion. (:issue:`13564`)
+- ``pd.tseries.util.isleapyear`` has been deprecated and will be removed in a subsequent release. Datetime-likes now have a ``.is_leap_year`` property (:issue:`13727`)
+- ``Panel4D`` and ``PanelND`` constructors are deprecated and will be removed in a future version. The recommended way to represent these types of n-dimensional data are with the `xarray package <http://xarray.pydata.org/en/stable/>`__. Pandas provides a :meth:`~Panel4D.to_xarray` method to automate this conversion (:issue:`13564`).
+- ``pandas.tseries.frequencies.get_standard_freq`` is deprecated. Use ``pandas.tseries.frequencies.to_offset(freq).rule_code`` instead (:issue:`13874`)
+- ``pandas.tseries.frequencies.to_offset``'s ``freqstr`` keyword is deprecated in favor of ``freq`` (:issue:`13874`)
+- ``Categorical.from_array`` has been deprecated and will be removed in a future version (:issue:`13854`)
.. _whatsnew_0190.prior_deprecations:
Removal of prior version deprecations/changes
-^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
- The ``SparsePanel`` class has been removed (:issue:`13778`)
- The ``pd.sandbox`` module has been removed in favor of the external library ``pandas-qt`` (:issue:`13670`)
- The ``pandas.io.data`` and ``pandas.io.wb`` modules are removed in favor of
the `pandas-datareader package <https://github.com/pydata/pandas-datareader>`__ (:issue:`13724`).
+- The ``pandas.tools.rplot`` module has been removed in favor of
+ the `seaborn package <https://github.com/mwaskom/seaborn>`__ (:issue:`13855`)
- ``DataFrame.to_csv()`` has dropped the ``engine`` parameter, as was deprecated in 0.17.1 (:issue:`11274`, :issue:`13419`)
- ``DataFrame.to_dict()`` has dropped the ``outtype`` parameter in favor of ``orient`` (:issue:`13627`, :issue:`8486`)
- ``pd.Categorical`` has dropped setting of the ``ordered`` attribute directly in favor of the ``set_ordered`` method (:issue:`13671`)
-- ``pd.Categorical`` has dropped the ``levels`` attribute in favour of ``categories`` (:issue:`8376`)
+- ``pd.Categorical`` has dropped the ``levels`` attribute in favor of ``categories`` (:issue:`8376`)
- ``DataFrame.to_sql()`` has dropped the ``mysql`` option for the ``flavor`` parameter (:issue:`13611`)
-- ``pd.Index`` has dropped the ``diff`` method in favour of ``difference`` (:issue:`13669`)
-
+- ``Panel.shift()`` has dropped the ``lags`` parameter in favor of ``periods`` (:issue:`14041`)
+- ``pd.Index`` has dropped the ``diff`` method in favor of ``difference`` (:issue:`13669`)
+- ``pd.DataFrame`` has dropped the ``to_wide`` method in favor of ``to_panel`` (:issue:`14039`)
- ``Series.to_csv`` has dropped the ``nanRep`` parameter in favor of ``na_rep`` (:issue:`13804`)
- ``Series.xs``, ``DataFrame.xs``, ``Panel.xs``, ``Panel.major_xs``, and ``Panel.minor_xs`` have dropped the ``copy`` parameter (:issue:`13781`)
- ``str.split`` has dropped the ``return_type`` parameter in favor of ``expand`` (:issue:`13701`)
-- Removal of the legacy time rules (offset aliases), deprecated since 0.17.0 (this has been alias since 0.8.0) (:issue:`13590`)
-
- Previous Behavior:
-
- .. code-block:: ipython
-
- In [2]: pd.date_range('2016-07-01', freq='W@MON', periods=3)
- pandas/tseries/frequencies.py:465: FutureWarning: Freq "W@MON" is deprecated, use "W-MON" as alternative.
- Out[2]: DatetimeIndex(['2016-07-04', '2016-07-11', '2016-07-18'], dtype='datetime64[ns]', freq='W-MON')
-
- Now legacy time rules raises ``ValueError``. For the list of currently supported offsets, see :ref:`here <timeseries.offset_aliases>`
-
+- Removal of the legacy time rules (offset aliases), deprecated since 0.17.0 (this has been alias since 0.8.0) (:issue:`13590`, :issue:`13868`). Now legacy time rules raises ``ValueError``. For the list of currently supported offsets, see :ref:`here <timeseries.offset_aliases>`.
+- The default value for the ``return_type`` parameter for ``DataFrame.plot.box`` and ``DataFrame.boxplot`` changed from ``None`` to ``"axes"``. These methods will now return a matplotlib axes by default instead of a dictionary of artists. See :ref:`here <visualization.box.return>` (:issue:`6581`).
- The ``tquery`` and ``uquery`` functions in the ``pandas.io.sql`` module are removed (:issue:`5950`).
@@ -723,8 +1401,7 @@ Performance Improvements
- Improved performance of sparse ``IntIndex.intersect`` (:issue:`13082`)
- Improved performance of sparse arithmetic with ``BlockIndex`` when the number of blocks are large, though recommended to use ``IntIndex`` in such cases (:issue:`13082`)
-- increased performance of ``DataFrame.quantile()`` as it now operates per-block (:issue:`11623`)
-
+- Improved performance of ``DataFrame.quantile()`` as it now operates per-block (:issue:`11623`)
- Improved performance of float64 hash table operations, fixing some very slow indexing and groupby operations in python 3 (:issue:`13166`, :issue:`13334`)
- Improved performance of ``DataFrameGroupBy.transform`` (:issue:`12737`)
- Improved performance of ``Index`` and ``Series`` ``.duplicated`` (:issue:`10235`)
@@ -733,8 +1410,9 @@ Performance Improvements
- Improved performance of datetime string parsing in ``DatetimeIndex`` (:issue:`13692`)
- Improved performance of hashing ``Period`` (:issue:`12817`)
- Improved performance of ``factorize`` of datetime with timezone (:issue:`13750`)
-
-
+- Improved performance of by lazily creating indexing hashtables on larger Indexes (:issue:`14266`)
+- Improved performance of ``groupby.groups`` (:issue:`14293`)
+- Unecessary materializing of a MultiIndex when introspecting for memory usage (:issue:`14308`)
.. _whatsnew_0190.bug_fixes:
@@ -742,26 +1420,24 @@ Bug Fixes
~~~~~~~~~
- Bug in ``groupby().shift()``, which could cause a segfault or corruption in rare circumstances when grouping by columns with missing values (:issue:`13813`)
-- Bug in ``pd.read_csv()``, which may cause a segfault or corruption when iterating in large chunks over a stream/file under rare circumstances (:issue:`13703`)
+- Bug in ``groupby().cumsum()`` calculating ``cumprod`` when ``axis=1``. (:issue:`13994`)
+- Bug in ``pd.to_timedelta()`` in which the ``errors`` parameter was not being respected (:issue:`13613`)
- Bug in ``io.json.json_normalize()``, where non-ascii keys raised an exception (:issue:`13213`)
-- Bug in ``SparseSeries`` with ``MultiIndex`` ``[]`` indexing may raise ``IndexError`` (:issue:`13144`)
-- Bug in ``SparseSeries`` with ``MultiIndex`` ``[]`` indexing result may have normal ``Index`` (:issue:`13144`)
-- Bug in ``SparseDataFrame`` in which ``axis=None`` did not default to ``axis=0`` (:issue:`13048`)
-- Bug in ``SparseSeries`` and ``SparseDataFrame`` creation with ``object`` dtype may raise ``TypeError`` (:issue:`11633`)
- Bug when passing a not-default-indexed ``Series`` as ``xerr`` or ``yerr`` in ``.plot()`` (:issue:`11858`)
+- Bug in area plot draws legend incorrectly if subplot is enabled or legend is moved after plot (matplotlib 1.5.0 is required to draw area plot legend properly) (:issue:`9161`, :issue:`13544`)
+- Bug in ``DataFrame`` assignment with an object-dtyped ``Index`` where the resultant column is mutable to the original object. (:issue:`13522`)
- Bug in matplotlib ``AutoDataFormatter``; this restores the second scaled formatting and re-adds micro-second scaled formatting (:issue:`13131`)
- Bug in selection from a ``HDFStore`` with a fixed format and ``start`` and/or ``stop`` specified will now return the selected range (:issue:`8287`)
+- Bug in ``Categorical.from_codes()`` where an unhelpful error was raised when an invalid ``ordered`` parameter was passed in (:issue:`14058`)
- Bug in ``Series`` construction from a tuple of integers on windows not returning default dtype (int64) (:issue:`13646`)
-
+- Bug in ``TimedeltaIndex`` addition with a Datetime-like object where addition overflow was not being caught (:issue:`14068`)
- Bug in ``.groupby(..).resample(..)`` when the same object is called multiple times (:issue:`13174`)
- Bug in ``.to_records()`` when index name is a unicode string (:issue:`13172`)
-
- Bug in calling ``.memory_usage()`` on object which doesn't implement (:issue:`12924`)
-
- Regression in ``Series.quantile`` with nans (also shows up in ``.median()`` and ``.describe()`` ); furthermore now names the ``Series`` with the quantile (:issue:`13098`, :issue:`13146`)
-
- Bug in ``SeriesGroupBy.transform`` with datetime values and missing groups (:issue:`13191`)
-
+- Bug where empty ``Series`` were incorrectly coerced in datetime-like numeric operations (:issue:`13844`)
+- Bug in ``Categorical`` constructor when passed a ``Categorical`` containing datetimes with timezones (:issue:`14190`)
- Bug in ``Series.str.extractall()`` with ``str`` index raises ``ValueError`` (:issue:`13156`)
- Bug in ``Series.str.extractall()`` with single group and quantifier (:issue:`13382`)
- Bug in ``DatetimeIndex`` and ``Period`` subtraction raises ``ValueError`` or ``AttributeError`` rather than ``TypeError`` (:issue:`13078`)
@@ -778,18 +1454,28 @@ Bug Fixes
- Bug in ``DatetimeTZDtype`` dtype with ``dateutil.tz.tzlocal`` cannot be regarded as valid dtype (:issue:`13583`)
- Bug in ``pd.read_hdf()`` where attempting to load an HDF file with a single dataset, that had one or more categorical columns, failed unless the key argument was set to the name of the dataset. (:issue:`13231`)
- Bug in ``.rolling()`` that allowed a negative integer window in contruction of the ``Rolling()`` object, but would later fail on aggregation (:issue:`13383`)
-
+- Bug in ``Series`` indexing with tuple-valued data and a numeric index (:issue:`13509`)
- Bug in printing ``pd.DataFrame`` where unusual elements with the ``object`` dtype were causing segfaults (:issue:`13717`)
+- Bug in ranking ``Series`` which could result in segfaults (:issue:`13445`)
- Bug in various index types, which did not propagate the name of passed index (:issue:`12309`)
- Bug in ``DatetimeIndex``, which did not honour the ``copy=True`` (:issue:`13205`)
- Bug in ``DatetimeIndex.is_normalized`` returns incorrectly for normalized date_range in case of local timezones (:issue:`13459`)
-
+- Bug in ``pd.concat`` and ``.append`` may coerces ``datetime64`` and ``timedelta`` to ``object`` dtype containing python built-in ``datetime`` or ``timedelta`` rather than ``Timestamp`` or ``Timedelta`` (:issue:`13626`)
+- Bug in ``PeriodIndex.append`` may raises ``AttributeError`` when the result is ``object`` dtype (:issue:`13221`)
+- Bug in ``CategoricalIndex.append`` may accept normal ``list`` (:issue:`13626`)
+- Bug in ``pd.concat`` and ``.append`` with the same timezone get reset to UTC (:issue:`7795`)
+- Bug in ``Series`` and ``DataFrame`` ``.append`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13626`)
- Bug in ``DataFrame.to_csv()`` in which float values were being quoted even though quotations were specified for non-numeric values only (:issue:`12922`, :issue:`13259`)
+- Bug in ``DataFrame.describe()`` raising ``ValueError`` with only boolean columns (:issue:`13898`)
- Bug in ``MultiIndex`` slicing where extra elements were returned when level is non-unique (:issue:`12896`)
- Bug in ``.str.replace`` does not raise ``TypeError`` for invalid replacement (:issue:`13438`)
- Bug in ``MultiIndex.from_arrays`` which didn't check for input array lengths matching (:issue:`13599`)
-
-
+- Bug in ``cartesian_product`` and ``MultiIndex.from_product`` which may raise with empty input arrays (:issue:`12258`)
+- Bug in ``pd.read_csv()`` which may cause a segfault or corruption when iterating in large chunks over a stream/file under rare circumstances (:issue:`13703`)
+- Bug in ``pd.read_csv()`` which caused errors to be raised when a dictionary containing scalars is passed in for ``na_values`` (:issue:`12224`)
+- Bug in ``pd.read_csv()`` which caused BOM files to be incorrectly parsed by not ignoring the BOM (:issue:`4793`)
+- Bug in ``pd.read_csv()`` with ``engine='python'`` which raised errors when a numpy array was passed in for ``usecols`` (:issue:`12546`)
+- Bug in ``pd.read_csv()`` where the index columns were being incorrectly parsed when parsed as dates with a ``thousands`` parameter (:issue:`14066`)
- Bug in ``pd.read_csv()`` with ``engine='python'`` in which ``NaN`` values weren't being detected after data was converted to numeric values (:issue:`13314`)
- Bug in ``pd.read_csv()`` in which the ``nrows`` argument was not properly validated for both engines (:issue:`10476`)
- Bug in ``pd.read_csv()`` with ``engine='python'`` in which infinities of mixed-case forms were not being interpreted properly (:issue:`13274`)
@@ -797,21 +1483,20 @@ Bug Fixes
- Bug in ``pd.read_csv()`` with ``engine='python'`` when reading from a ``tempfile.TemporaryFile`` on Windows with Python 3 (:issue:`13398`)
- Bug in ``pd.read_csv()`` that prevents ``usecols`` kwarg from accepting single-byte unicode strings (:issue:`13219`)
- Bug in ``pd.read_csv()`` that prevents ``usecols`` from being an empty set (:issue:`13402`)
-- Bug in ``pd.read_csv()`` with ``engine='c'`` in which null ``quotechar`` was not accepted even though ``quoting`` was specified as ``None`` (:issue:`13411`)
+- Bug in ``pd.read_csv()`` in the C engine where the NULL character was not being parsed as NULL (:issue:`14012`)
+- Bug in ``pd.read_csv()`` with ``engine='c'`` in which NULL ``quotechar`` was not accepted even though ``quoting`` was specified as ``None`` (:issue:`13411`)
- Bug in ``pd.read_csv()`` with ``engine='c'`` in which fields were not properly cast to float when quoting was specified as non-numeric (:issue:`13411`)
+- Bug in ``pd.read_csv()`` in Python 2.x with non-UTF8 encoded, multi-character separated data (:issue:`3404`)
+- Bug in ``pd.read_csv()``, where aliases for utf-xx (e.g. UTF-xx, UTF_xx, utf_xx) raised UnicodeDecodeError (:issue:`13549`)
+- Bug in ``pd.read_csv``, ``pd.read_table``, ``pd.read_fwf``, ``pd.read_stata`` and ``pd.read_sas`` where files were opened by parsers but not closed if both ``chunksize`` and ``iterator`` were ``None``. (:issue:`13940`)
+- Bug in ``StataReader``, ``StataWriter``, ``XportReader`` and ``SAS7BDATReader`` where a file was not properly closed when an error was raised. (:issue:`13940`)
- Bug in ``pd.pivot_table()`` where ``margins_name`` is ignored when ``aggfunc`` is a list (:issue:`13354`)
- Bug in ``pd.Series.str.zfill``, ``center``, ``ljust``, ``rjust``, and ``pad`` when passing non-integers, did not raise ``TypeError`` (:issue:`13598`)
- Bug in checking for any null objects in a ``TimedeltaIndex``, which always returned ``True`` (:issue:`13603`)
-
-
-
- Bug in ``Series`` arithmetic raises ``TypeError`` if it contains datetime-like as ``object`` dtype (:issue:`13043`)
-
-- Bug ``Series.isnull`` and ``Series.notnull`` ignore ``Period('NaT')`` (:issue:`13737`)
-- Bug ``Series.fillna`` and ``Series.dropna`` don't affect to ``Period('NaT')`` (:issue:`13737`)
-
-- Bug in ``pd.to_datetime()`` when passing invalid datatypes (e.g. bool); will now respect the ``errors`` keyword (:issue:`13176`)
-- Bug in ``pd.to_datetime()`` which overflowed on ``int8``, and ``int16`` dtypes (:issue:`13451`)
+- Bug ``Series.isnull()`` and ``Series.notnull()`` ignore ``Period('NaT')`` (:issue:`13737`)
+- Bug ``Series.fillna()`` and ``Series.dropna()`` don't affect to ``Period('NaT')`` (:issue:`13737`
+- Bug in ``.fillna(value=np.nan)`` incorrectly raises ``KeyError`` on a ``category`` dtyped ``Series`` (:issue:`14021`)
- Bug in extension dtype creation where the created types were not is/identical (:issue:`13285`)
- Bug in ``.resample(..)`` where incorrect warnings were triggered by IPython introspection (:issue:`13618`)
- Bug in ``NaT`` - ``Period`` raises ``AttributeError`` (:issue:`13071`)
@@ -819,43 +1504,62 @@ Bug Fixes
- Bug in ``Series`` and ``Index`` comparison may output incorrect result if it contains ``NaT`` with ``object`` dtype (:issue:`13592`)
- Bug in ``Period`` addition raises ``TypeError`` if ``Period`` is on right hand side (:issue:`13069`)
- Bug in ``Peirod`` and ``Series`` or ``Index`` comparison raises ``TypeError`` (:issue:`13200`)
-- Bug in ``pd.set_eng_float_format()`` that would prevent NaN's from formatting (:issue:`11981`)
+- Bug in ``pd.set_eng_float_format()`` that would prevent NaN and Inf from formatting (:issue:`11981`)
- Bug in ``.unstack`` with ``Categorical`` dtype resets ``.ordered`` to ``True`` (:issue:`13249`)
- Clean some compile time warnings in datetime parsing (:issue:`13607`)
- Bug in ``factorize`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13750`)
- Bug in ``.set_index`` raises ``AmbiguousTimeError`` if new index contains DST boundary and multi levels (:issue:`12920`)
+- Bug in ``.shift`` raises ``AmbiguousTimeError`` if data contains datetime near DST boundary (:issue:`13926`)
- Bug in ``pd.read_hdf()`` returns incorrect result when a ``DataFrame`` with a ``categorical`` column and a query which doesn't match any values (:issue:`13792`)
-
-
+- Bug in ``.iloc`` when indexing with a non lex-sorted MultiIndex (:issue:`13797`)
+- Bug in ``.loc`` when indexing with date strings in a reverse sorted ``DatetimeIndex`` (:issue:`14316`)
- Bug in ``Series`` comparison operators when dealing with zero dim NumPy arrays (:issue:`13006`)
+- Bug in ``.combine_first`` may return incorrect ``dtype`` (:issue:`7630`, :issue:`10567`)
- Bug in ``groupby`` where ``apply`` returns different result depending on whether first result is ``None`` or not (:issue:`12824`)
- Bug in ``groupby(..).nth()`` where the group key is included inconsistently if called after ``.head()/.tail()`` (:issue:`12839`)
- Bug in ``.to_html``, ``.to_latex`` and ``.to_string`` silently ignore custom datetime formatter passed through the ``formatters`` key word (:issue:`10690`)
-
+- Bug in ``DataFrame.iterrows()``, not yielding a ``Series`` subclasse if defined (:issue:`13977`)
- Bug in ``pd.to_numeric`` when ``errors='coerce'`` and input contains non-hashable objects (:issue:`13324`)
- Bug in invalid ``Timedelta`` arithmetic and comparison may raise ``ValueError`` rather than ``TypeError`` (:issue:`13624`)
- Bug in invalid datetime parsing in ``to_datetime`` and ``DatetimeIndex`` may raise ``TypeError`` rather than ``ValueError`` (:issue:`11169`, :issue:`11287`)
- Bug in ``Index`` created with tz-aware ``Timestamp`` and mismatched ``tz`` option incorrectly coerces timezone (:issue:`13692`)
- Bug in ``DatetimeIndex`` with nanosecond frequency does not include timestamp specified with ``end`` (:issue:`13672`)
-
+- Bug in ```Series``` when setting a slice with a ```np.timedelta64``` (:issue:`14155`)
- Bug in ``Index`` raises ``OutOfBoundsDatetime`` if ``datetime`` exceeds ``datetime64[ns]`` bounds, rather than coercing to ``object`` dtype (:issue:`13663`)
+- Bug in ``Index`` may ignore specified ``datetime64`` or ``timedelta64`` passed as ``dtype`` (:issue:`13981`)
- Bug in ``RangeIndex`` can be created without no arguments rather than raises ``TypeError`` (:issue:`13793`)
-- Bug in ``.value_counts`` raises ``OutOfBoundsDatetime`` if data exceeds ``datetime64[ns]`` bounds (:issue:`13663`)
+- Bug in ``.value_counts()`` raises ``OutOfBoundsDatetime`` if data exceeds ``datetime64[ns]`` bounds (:issue:`13663`)
- Bug in ``DatetimeIndex`` may raise ``OutOfBoundsDatetime`` if input ``np.datetime64`` has other unit than ``ns`` (:issue:`9114`)
- Bug in ``Series`` creation with ``np.datetime64`` which has other unit than ``ns`` as ``object`` dtype results in incorrect values (:issue:`13876`)
-
-- Bug in ``isnull`` ``notnull`` raise ``TypeError`` if input datetime-like has other unit than ``ns`` (:issue:`13389`)
-- Bug in ``.merge`` may raise ``TypeError`` if input datetime-like has other unit than ``ns`` (:issue:`13389`)
-
-
-
+- Bug in ``resample`` with timedelta data where data was casted to float (:issue:`13119`).
+- Bug in ``pd.isnull()`` ``pd.notnull()`` raise ``TypeError`` if input datetime-like has other unit than ``ns`` (:issue:`13389`)
+- Bug in ``pd.merge()`` may raise ``TypeError`` if input datetime-like has other unit than ``ns`` (:issue:`13389`)
+- Bug in ``HDFStore``/``read_hdf()`` discarded ``DatetimeIndex.name`` if ``tz`` was set (:issue:`13884`)
- Bug in ``Categorical.remove_unused_categories()`` changes ``.codes`` dtype to platform int (:issue:`13261`)
- Bug in ``groupby`` with ``as_index=False`` returns all NaN's when grouping on multiple columns including a categorical one (:issue:`13204`)
- Bug in ``df.groupby(...)[...]`` where getitem with ``Int64Index`` raised an error (:issue:`13731`)
-
+- Bug in the CSS classes assigned to ``DataFrame.style`` for index names. Previously they were assigned ``"col_heading level<n> col<c>"`` where ``n`` was the number of levels + 1. Now they are assigned ``"index_name level<n>"``, where ``n`` is the correct level for that MultiIndex.
- Bug where ``pd.read_gbq()`` could throw ``ImportError: No module named discovery`` as a result of a naming conflict with another python package called apiclient (:issue:`13454`)
- Bug in ``Index.union`` returns an incorrect result with a named empty index (:issue:`13432`)
- Bugs in ``Index.difference`` and ``DataFrame.join`` raise in Python3 when using mixed-integer indexes (:issue:`13432`, :issue:`12814`)
-
+- Bug in subtract tz-aware ``datetime.datetime`` from tz-aware ``datetime64`` series (:issue:`14088`)
- Bug in ``.to_excel()`` when DataFrame contains a MultiIndex which contains a label with a NaN value (:issue:`13511`)
-- Bug in ``pd.read_csv`` in Python 2.x with non-UTF8 encoded, multi-character separated data (:issue:`3404`)
+- Bug in invalid frequency offset string like "D1", "-2-3H" may not raise ``ValueError`` (:issue:`13930`)
+- Bug in ``concat`` and ``groupby`` for hierarchical frames with ``RangeIndex`` levels (:issue:`13542`).
+- Bug in ``Series.str.contains()`` for Series containing only ``NaN`` values of ``object`` dtype (:issue:`14171`)
+- Bug in ``agg()`` function on groupby dataframe changes dtype of ``datetime64[ns]`` column to ``float64`` (:issue:`12821`)
+- Bug in using NumPy ufunc with ``PeriodIndex`` to add or subtract integer raise ``IncompatibleFrequency``. Note that using standard operator like ``+`` or ``-`` is recommended, because standard operators use more efficient path (:issue:`13980`)
+- Bug in operations on ``NaT`` returning ``float`` instead of ``datetime64[ns]`` (:issue:`12941`)
+- Bug in ``Series`` flexible arithmetic methods (like ``.add()``) raises ``ValueError`` when ``axis=None`` (:issue:`13894`)
+- Bug in ``DataFrame.to_csv()`` with ``MultiIndex`` columns in which a stray empty line was added (:issue:`6618`)
+- Bug in ``DatetimeIndex``, ``TimedeltaIndex`` and ``PeriodIndex.equals()`` may return ``True`` when input isn't ``Index`` but contains the same values (:issue:`13107`)
+- Bug in assignment against datetime with timezone may not work if it contains datetime near DST boundary (:issue:`14146`)
+- Bug in ``pd.eval()`` and ``HDFStore`` query truncating long float literals with python 2 (:issue:`14241`)
+- Bug in ``Index`` raises ``KeyError`` displaying incorrect column when column is not in the df and columns contains duplicate values (:issue:`13822`)
+- Bug in ``Period`` and ``PeriodIndex`` creating wrong dates when frequency has combined offset aliases (:issue:`13874`)
+- Bug in ``.to_string()`` when called with an integer ``line_width`` and ``index=False`` raises an UnboundLocalError exception because ``idx`` referenced before assignment.
+- Bug in ``eval()`` where the ``resolvers`` argument would not accept a list (:issue:`14095`)
+- Bugs in ``stack``, ``get_dummies``, ``make_axis_dummies`` which don't preserve categorical dtypes in (multi)indexes (:issue:`13854`)
+- ``PeridIndex`` can now accept ``list`` and ``array`` which contains ``pd.NaT`` (:issue:`13430`)
+- Bug in ``df.groupby`` where ``.median()`` returns arbitrary values if grouped dataframe contains empty bins (:issue:`13629`)
+- Bug in ``Index.copy()`` where ``name`` parameter was ignored (:issue:`14302`)
diff --git a/doc/source/whatsnew/v0.20.0.txt b/doc/source/whatsnew/v0.20.0.txt
index 695e917c76ba0..e66017fa8e71f 100644
--- a/doc/source/whatsnew/v0.20.0.txt
+++ b/doc/source/whatsnew/v0.20.0.txt
@@ -21,9 +21,17 @@ Check the :ref:`API Changes <whatsnew_0200.api_breaking>` and :ref:`deprecations
New features
~~~~~~~~~~~~
+.. _whatsnew_0190.dev_api:
+to_datetime can be used with Offset
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+``pd.to_datetime`` has a new parameter, ``origin``, to define an offset for ``DatetimeIndex``.
+.. ipython:: python
+ to_datetime([1,2,3], unit='D', origin=pd.Timestamp('1960-01-01'))
+
+The above code would return days with offset from origin as defined by timestamp set by origin.
.. _whatsnew_0200.enhancements.other:
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index 09fb4beb74f28..b3378d2546f0b 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -772,6 +772,48 @@ def test_to_datetime_unit(self):
result = to_datetime([1, 2, 111111111], unit='D', errors='coerce')
tm.assert_index_equal(result, expected)
+ def test_to_datetime_origin(self):
+ units = ['D', 's', 'ms', 'us', 'ns']
+ # Addresses Issue Number 11276, 11745
+ # for origin as julian
+ julian_dates = pd.date_range(
+ '2014-1-1', periods=10).to_julian_date().values
+ result = Series(pd.to_datetime(
+ julian_dates, unit='D', origin='julian'))
+ expected = Series(pd.to_datetime(
+ julian_dates - pd.Timestamp(0).to_julian_date(), unit='D'))
+ assert_series_equal(result, expected)
+
+ # checking for invalid combination of origin='julian' and unit != D
+ for unit in units:
+ if unit == 'D':
+ continue
+ with self.assertRaises(ValueError):
+ pd.to_datetime(julian_dates, unit=unit, origin='julian')
+
+ # for origin as 1960-01-01
+ epoch_1960 = pd.Timestamp('1960-01-01')
+ epoch_timestamp_convertible = [epoch_1960, epoch_1960.to_datetime(),
+ epoch_1960.to_datetime64(),
+ str(epoch_1960)]
+ invalid_origins = ['random_string', '13-24-1990', '0001-01-01']
+ units_from_epoch = [0, 1, 2, 3, 4]
+
+ for unit in units:
+ for epoch in epoch_timestamp_convertible:
+ expected = Series(
+ [pd.Timedelta(x, unit=unit) +
+ epoch_1960 for x in units_from_epoch])
+ result = Series(pd.to_datetime(
+ units_from_epoch, unit=unit, origin=epoch))
+ assert_series_equal(result, expected)
+
+ # check for invalid origins
+ for origin in invalid_origins:
+ with self.assertRaises(ValueError):
+ pd.to_datetime(units_from_epoch, unit=unit,
+ origin=origin)
+
def test_series_ctor_datetime64(self):
rng = date_range('1/1/2000 00:00:00', '1/1/2000 1:59:50', freq='10s')
dates = np.asarray(rng)
diff --git a/pandas/tseries/tools.py b/pandas/tseries/tools.py
index 93d35ff964e69..526546dafba18 100644
--- a/pandas/tseries/tools.py
+++ b/pandas/tseries/tools.py
@@ -179,7 +179,7 @@ def _guess_datetime_format_for_array(arr, **kwargs):
mapping={True: 'coerce', False: 'raise'})
def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
utc=None, box=True, format=None, exact=True, coerce=None,
- unit=None, infer_datetime_format=False):
+ unit=None, infer_datetime_format=False, origin='epoch'):
"""
Convert argument to datetime.
@@ -238,6 +238,19 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
+ origin : scalar convertible to Timestamp / string ('julian', 'epoch'),
+ default 'epoch'.
+ Define reference date. The numeric values would be parsed as number
+ of units (defined by `unit`) since this reference date.
+
+ - If 'epoch', origin is set to 1970-01-01.
+ - If 'julian', unit must be 'D', and origin is set to beginning of
+ Julian Calendar. Julian day number 0 is assigned to the day starting
+ at noon on January 1, 4713 BC.
+ - If Timestamp convertible, origin is set to Timestamp identified by
+ origin.
+
+ .. versionadded: 0.19.0
Returns
-------
@@ -294,8 +307,14 @@ def to_datetime(arg, errors='raise', dayfirst=False, yearfirst=False,
>>> %timeit pd.to_datetime(s,infer_datetime_format=False)
1 loop, best of 3: 471 ms per loop
- """
+ Using non-epoch origins to parse date
+
+ >>> pd.to_datetime([1,2,3], unit='D', origin=pd.Timestamp('1960-01-01'))
+ 0 1960-01-02
+ 1 1960-01-03
+ 2 1960-01-04
+ """
from pandas.tseries.index import DatetimeIndex
tz = 'utc' if utc else None
@@ -406,22 +425,43 @@ def _convert_listlike(arg, box, format, name=None, tz=tz):
except (ValueError, TypeError):
raise e
- if arg is None:
- return arg
- elif isinstance(arg, tslib.Timestamp):
- return arg
- elif isinstance(arg, ABCSeries):
- from pandas import Series
- values = _convert_listlike(arg._values, False, format)
- return Series(values, index=arg.index, name=arg.name)
- elif isinstance(arg, (ABCDataFrame, MutableMapping)):
- return _assemble_from_unit_mappings(arg, errors=errors)
- elif isinstance(arg, ABCIndexClass):
- return _convert_listlike(arg, box, format, name=arg.name)
- elif is_list_like(arg):
- return _convert_listlike(arg, box, format)
+ def intermediate_result(arg):
+ if origin == 'julian':
+ if unit != 'D':
+ raise ValueError("unit must be 'D' for origin='julian'")
+ try:
+ arg = arg - tslib.Timestamp(0).to_julian_date()
+ except:
+ raise ValueError("incompatible 'arg' type for given "
+ "'origin'='julian'")
+ if arg is None:
+ return arg
+ elif isinstance(arg, tslib.Timestamp):
+ return arg
+ elif isinstance(arg, ABCSeries):
+ from pandas import Series
+ values = _convert_listlike(arg._values, False, format)
+ return Series(values, index=arg.index, name=arg.name)
+ elif isinstance(arg, (ABCDataFrame, MutableMapping)):
+ return _assemble_from_unit_mappings(arg, errors=errors)
+ elif isinstance(arg, ABCIndexClass):
+ return _convert_listlike(arg, box, format, name=arg.name)
+ elif is_list_like(arg):
+ return _convert_listlike(arg, box, format)
+ return _convert_listlike(np.array([arg]), box, format)[0]
+
+ result = intermediate_result(arg)
+
+ offset = None
+ if origin not in ['epoch', 'julian']:
+ try:
+ offset = tslib.Timestamp(origin) - tslib.Timestamp(0)
+ except ValueError:
+ raise ValueError("Invalid 'origin' or 'origin' Out of Bound")
- return _convert_listlike(np.array([arg]), box, format)[0]
+ if offset is not None:
+ result = result + offset
+ return result
# mappings for assembling units
_unit_map = {'year': 'year',
| Fixes #11276
Fixes #11745
| https://api.github.com/repos/pandas-dev/pandas/pulls/11470 | 2015-10-29T11:20:00Z | 2017-03-28T20:45:42Z | null | 2017-03-28T20:45:49Z |
DOC: Linguistic edit to CONTRIBUTING.md | diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 284ac2fc5b169..352acee23df2d 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -4,43 +4,22 @@ Contributing to pandas
Where to start?
---------------
-All contributions, bug reports, bug fixes, documentation improvements,
-enhancements and ideas are welcome.
-
-If you are simply looking to start working with the *pandas* codebase,
-navigate to the [GitHub "issues"
-tab](https://github.com/pydata/pandas/issues) and start looking through
-interesting issues. There are a number of issues listed under
-[Docs](https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open)
-and [Difficulty
-Novice](https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22)
-where you could start out.
-
-Or maybe through using *pandas* you have an idea of you own or are
-looking for something in the documentation and thinking 'this can be
-improved'...you can do something about it!
-
-Feel free to ask questions on [mailing
-list](https://groups.google.com/forum/?fromgroups#!forum/pydata)
-
-Bug Reports/Enhancement Requests
---------------------------------
-
-Bug reports are an important part of making *pandas* more stable. Having
-a complete bug report will allow others to reproduce the bug and provide
-insight into fixing. Since many versions of *pandas* are supported,
-knowing version information will also identify improvements made since
-previous versions. Often trying the bug-producing code out on the
-*master* branch is a worthwhile exercise to confirm the bug still
-exists. It is also worth searching existing bug reports and pull
-requests to see if the issue has already been reported and/or fixed.
+All contributions, bug reports, bug fixes, documentation improvements, enhancements and ideas are welcome.
+
+If you are simply looking to start working with the *pandas* codebase, navigate to the [GitHub "issues" tab](https://github.com/pydata/pandas/issues) and start looking through interesting issues. There are a number of issues listed under [Docs](https://github.com/pydata/pandas/issues?labels=Docs&sort=updated&state=open) and [Difficulty Novice](https://github.com/pydata/pandas/issues?q=is%3Aopen+is%3Aissue+label%3A%22Difficulty+Novice%22) where you could start out.
+
+Or maybe through using *pandas* you have an idea of you own or are looking for something in the documentation and thinking 'this can be improved'...you can do something about it!
+
+Feel free to ask questions on the [mailing list](https://groups.google.com/forum/?fromgroups#!forum/pydata) or on [Gitter](https://gitter.im/pydata/pandas).
+
+Bug reports and enhancement requests
+------------------------------------
+
+Bug reports are an important part of making *pandas* more stable. Having a complete bug report will allow others to reproduce the bug and provide insight into fixing. Because many versions of *pandas* are supported, knowing version information will also identify improvements made since previous versions. Trying the bug-producing code out on the *master* branch is often a worthwhile exercise to confirm the bug still exists. It is also worth searching existing bug reports and pull requests to see if the issue has already been reported and/or fixed.
Bug reports must:
-1. Include a short, self-contained Python snippet reproducing the
- problem. You can have the code formatted nicely by using [GitHub
- Flavored
- Markdown](http://github.github.com/github-flavored-markdown/): :
+1. Include a short, self-contained Python snippet reproducing the problem. You can format the code nicely by using [GitHub Flavored Markdown](http://github.github.com/github-flavored-markdown/):
```python
>>> from pandas import DataFrame
@@ -48,81 +27,55 @@ Bug reports must:
...
```
-2. Include the full version string of *pandas* and its dependencies. In
- recent (\>0.12) versions of *pandas* you can use a built in
- function: :
+2. Include the full version string of *pandas* and its dependencies. In versions of *pandas* after 0.12 you can use a built in function:
>>> from pandas.util.print_versions import show_versions
>>> show_versions()
- and in 0.13.1 onwards: :
+ and in *pandas* 0.13.1 onwards:
>>> pd.show_versions()
-3. Explain why the current behavior is wrong/not desired and what you
- expect instead.
+3. Explain why the current behavior is wrong/not desired and what you expect instead.
-The issue will then show up to the *pandas* community and be open to
-comments/ideas from others.
+The issue will then show up to the *pandas* community and be open to comments/ideas from others.
Working with the code
---------------------
-Now that you have an issue you want to fix, enhancement to add, or
-documentation to improve, you need to learn how to work with GitHub and
-the *pandas* code base.
+Now that you have an issue you want to fix, enhancement to add, or documentation to improve, you need to learn how to work with GitHub and the *pandas* code base.
-### Version Control, Git, and GitHub
+### Version control, Git, and GitHub
-To the new user, working with Git is one of the more daunting aspects of
-contributing to *pandas*. It can very quickly become overwhelming, but
-sticking to the guidelines below will make the process straightforward
-and will work without much trouble. As always, if you are having
-difficulties please feel free to ask for help.
+To the new user, working with Git is one of the more daunting aspects of contributing to *pandas*. It can very quickly become overwhelming, but sticking to the guidelines below will help keep the process straightforward and mostly trouble free. As always, if you are having difficulties please feel free to ask for help.
-The code is hosted on [GitHub](https://www.github.com/pydata/pandas). To
-contribute you will need to sign up for a [free GitHub
-account](https://github.com/signup/free). We use
-[Git](http://git-scm.com/) for version control to allow many people to
-work together on the project.
+The code is hosted on [GitHub](https://www.github.com/pydata/pandas). To contribute you will need to sign up for a [free GitHub account](https://github.com/signup/free). We use [Git](http://git-scm.com/) for version control to allow many people to work together on the project.
-Some great resources for learning git:
+Some great resources for learning Git:
- the [GitHub help pages](http://help.github.com/).
-- the [NumPy's
- documentation](http://docs.scipy.org/doc/numpy/dev/index.html).
-- Matthew Brett's
- [Pydagogue](http://matthew-brett.github.com/pydagogue/).
+- the [NumPy's documentation](http://docs.scipy.org/doc/numpy/dev/index.html).
+- Matthew Brett's [Pydagogue](http://matthew-brett.github.com/pydagogue/).
-### Getting Started with Git
+### Getting started with Git
-[GitHub has instructions](http://help.github.com/set-up-git-redirect)
-for installing git, setting up your SSH key, and configuring git. All
-these steps need to be completed before working seamlessly with your
-local repository and GitHub.
+[GitHub has instructions](http://help.github.com/set-up-git-redirect) for installing git, setting up your SSH key, and configuring git. All these steps need to be completed before you can work seamlessly between your local repository and GitHub.
### Forking
-You will need your own fork to work on the code. Go to the [pandas
-project page](https://github.com/pydata/pandas) and hit the *fork*
-button. You will want to clone your fork to your machine: :
+You will need your own fork to work on the code. Go to the [pandas project page](https://github.com/pydata/pandas) and hit the `Fork` button. You will want to clone your fork to your machine:
git clone [email protected]:your-user-name/pandas.git pandas-yourname
cd pandas-yourname
git remote add upstream git://github.com/pydata/pandas.git
-This creates the directory pandas-yourname and connects your repository
-to the upstream (main project) *pandas* repository.
+This creates the directory pandas-yourname and connects your repository to the upstream (main project) *pandas* repository.
-You will also need to hook up Travis-CI to your GitHub repository so the
-suite is automatically run when a Pull Request is submitted.
-Instructions are
-[here](http://about.travis-ci.org/docs/user/getting-started/).
+The testing suite will run automatically on Travis-CI once your pull request is submitted. However, if you wish to run the test suite on a branch prior to submitting the pull request, then Travis-CI needs to be hooked up to your GitHub repository. Instructions for doing so are [here](http://about.travis-ci.org/docs/user/getting-started/).
-### Creating a Branch
+### Creating a branch
-You want your master branch to reflect only production-ready code, so
-create a feature branch for making your changes. For example:
+You want your master branch to reflect only production-ready code, so create a feature branch for making your changes. For example:
git branch shiny-new-feature
git checkout shiny-new-feature
@@ -131,41 +84,36 @@ The above can be simplified to:
git checkout -b shiny-new-feature
-This changes your working directory to the shiny-new-feature branch.
-Keep any changes in this branch specific to one bug or feature so it is
-clear what the branch brings to *pandas*. You can have many
-shiny-new-features and switch in between them using the git checkout
-command.
+This changes your working directory to the shiny-new-feature branch. Keep any changes in this branch specific to one bug or feature so it is clear what the branch brings to *pandas*. You can have many shiny-new-features and switch in between them using the git checkout command.
+
+To update this branch, you need to retrieve the changes from the master branch:
+
+ git fetch upstream
+ git rebase upstream/master
+
+This will replay your commits on top of the lastest pandas git master. If this leads to merge conflicts, you must resolve these before submitting your pull request. If you have uncommitted changes, you will need to `stash` them prior to updating. This will effectively store your changes and they can be reapplied after updating.
-### Creating a Development Environment
+### Creating a development environment
An easy way to create a *pandas* development environment is as follows.
-- Install either Install Anaconda \<install-anaconda\> or
- Install miniconda \<install-miniconda\>
-- Make sure that you have
- cloned the repository \<contributing-forking\>
-- `cd` to the pandas source directory
+- Install either Anaconda <install.anaconda> or miniconda <install.miniconda>
+- Make sure that you have cloned the repository <contributing.forking>
+- `cd` to the *pandas* source directory
-Tell `conda` to create a new environment, named `pandas_dev`, or any
-name you would like for this environment by running:
+Tell conda to create a new environment, named `pandas_dev`, or any other name you would like for this environment, by running:
conda create -n pandas_dev --file ci/requirements_dev.txt
-For a python 3 environment
+For a python 3 environment:
conda create -n pandas_dev python=3 --file ci/requirements_dev.txt
-If you are on `windows`, then you will need to install the compiler
-linkages:
+If you are on Windows, then you will also need to install the compiler linkages:
conda install -n pandas_dev libpython
-This will create the new environment, and not touch any of your existing
-environments, nor any existing python installation. It will install all
-of the basic dependencies of *pandas*, as well as the development and
-testing tools. If you would like to install other dependencies, you can
-install them as follows:
+This will create the new environment, and not touch any of your existing environments, nor any existing python installation. It will install all of the basic dependencies of *pandas*, as well as the development and testing tools. If you would like to install other dependencies, you can install them as follows:
conda install -n pandas_dev -c pandas pytables scipy
@@ -173,19 +121,15 @@ To install *all* pandas dependencies you can do the following:
conda install -n pandas_dev -c pandas --file ci/requirements_all.txt
-To work in this environment, `activate` it as follows:
+To work in this environment, Windows users should `activate` it as follows:
activate pandas_dev
-At which point, the prompt will change to indicate you are in the new
-development environment.
+Mac OSX and Linux users should use:
-> **note**
->
-> The above syntax is for `windows` environments. To work on
-> `macosx/linux`, use:
->
-> source activate pandas_dev
+ source activate pandas_dev
+
+You will then see a confirmation message to indicate you are in the new development environment.
To view your environments:
@@ -195,148 +139,100 @@ To return to you home root environment:
deactivate
-See the full `conda` docs [here](http://conda.pydata.org/docs).
+See the full conda docs [here](http://conda.pydata.org/docs).
-At this point you can easily do an *in-place* install, as detailed in
-the next section.
+At this point you can easily do an *in-place* install, as detailed in the next section.
### Making changes
-Before making your code changes, it is often necessary to build the code
-that was just checked out. There are two primary methods of doing this.
+Before making your code changes, it is often necessary to build the code that was just checked out. There are two primary methods of doing this.
-1. The best way to develop *pandas* is to build the C extensions
- in-place by running:
+1. The best way to develop *pandas* is to build the C extensions in-place by running:
python setup.py build_ext --inplace
- If you startup the Python interpreter in the *pandas* source
- directory you will call the built C extensions
+ If you startup the Python interpreter in the *pandas* source directory you will call the built C extensions
2. Another very common option is to do a `develop` install of *pandas*:
python setup.py develop
- This makes a symbolic link that tells the Python interpreter to
- import *pandas* from your development directory. Thus, you can
- always be using the development version on your system without being
- inside the clone directory.
+ This makes a symbolic link that tells the Python interpreter to import *pandas* from your development directory. Thus, you can always be using the development version on your system without being inside the clone directory.
Contributing to the documentation
---------------------------------
-If you're not the developer type, contributing to the documentation is
-still of huge value. You don't even have to be an expert on *pandas* to
-do so! Something as simple as rewriting small passages for clarity as
-you reference the docs is a simple but effective way to contribute. The
-next person to read that passage will be in your debt!
+If you're not the developer type, contributing to the documentation is still of huge value. You don't even have to be an expert on *pandas* to do so! Something as simple as rewriting small passages for clarity as you reference the docs is a simple but effective way to contribute. The next person to read that passage will be in your debt!
-Actually, there are sections of the docs that are worse off by being
-written by experts. If something in the docs doesn't make sense to you,
-updating the relevant section after you figure it out is a simple way to
-ensure it will help the next person.
+In fact, there are sections of the docs that are worse off after being written by experts. If something in the docs doesn't make sense to you, updating the relevant section after you figure it out is a simple way to ensure it will help the next person.
-### About the pandas documentation
+### About the *pandas* documentation
-The documentation is written in **reStructuredText**, which is almost
-like writing in plain English, and built using
-[Sphinx](http://sphinx.pocoo.org/). The Sphinx Documentation has an
-excellent [introduction to reST](http://sphinx.pocoo.org/rest.html).
-Review the Sphinx docs to perform more complex changes to the
-documentation as well.
+The documentation is written in **reStructuredText**, which is almost like writing in plain English, and built using [Sphinx](http://sphinx.pocoo.org/). The Sphinx Documentation has an excellent [introduction to reST](http://sphinx.pocoo.org/rest.html). Review the Sphinx docs to perform more complex changes to the documentation as well.
Some other important things to know about the docs:
-- The *pandas* documentation consists of two parts: the docstrings in
- the code itself and the docs in this folder `pandas/doc/`.
-
- The docstrings provide a clear explanation of the usage of the
- individual functions, while the documentation in this folder
- consists of tutorial-like overviews per topic together with some
- other information (what's new, installation, etc).
-
-- The docstrings follow the **Numpy Docstring Standard** which is used
- widely in the Scientific Python community. This standard specifies
- the format of the different sections of the docstring. See [this
- document](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt)
- for a detailed explanation, or look at some of the existing
- functions to extend it in a similar manner.
-- The tutorials make heavy use of the [ipython
- directive](http://matplotlib.org/sampledoc/ipython_directive.html)
- sphinx extension. This directive lets you put code in the
- documentation which will be run during the doc build. For example:
+- The *pandas* documentation consists of two parts: the docstrings in the code itself and the docs in this folder `pandas/doc/`.
+
+ The docstrings provide a clear explanation of the usage of the individual functions, while the documentation in this folder consists of tutorial-like overviews per topic together with some other information (what's new, installation, etc).
+
+- The docstrings follow the **Numpy Docstring Standard**, which is used widely in the Scientific Python community. This standard specifies the format of the different sections of the docstring. See [this document](https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt) for a detailed explanation, or look at some of the existing functions to extend it in a similar manner.
+- The tutorials make heavy use of the [ipython directive](http://matplotlib.org/sampledoc/ipython_directive.html) sphinx extension. This directive lets you put code in the documentation which will be run during the doc build. For example:
.. ipython:: python
x = 2
x**3
- will be rendered as
+ will be rendered as:
In [1]: x = 2
In [2]: x**3
Out[2]: 8
- This means that almost all code examples in the docs are always run
- (and the output saved) during the doc build. This way, they will
- always be up to date, but it makes the doc building a bit more
- complex.
+ Almost all code examples in the docs are run (and the output saved) during the doc build. This approach means that code examples will always be up to date, but it does make the doc building a bit more complex.
-### How to build the pandas documentation
+> **note**
+>
+> The `.rst` files are used to automatically generate Markdown and HTML versions of the docs. For this reason, please do not edit `CONTRIBUTING.md` directly, but instead make any changes to `doc/source/contributing.rst`. Then, to generate `CONTRIBUTING.md`, use [pandoc](http://johnmacfarlane.net/pandoc/) with the following command:
+>
+> pandoc doc/source/contributing.rst -t markdown_github > CONTRIBUTING.md
+
+The utility script `scripts/api_rst_coverage.py` can be used to compare the list of methods documented in `doc/source/api.rst` (which is used to generate the [API Reference](http://pandas.pydata.org/pandas-docs/stable/api.html) page) and the actual public methods. This will identify methods documented in in `doc/source/api.rst` that are not actually class methods, and existing methods that are not documented in `doc/source/api.rst`.
+
+### How to build the *pandas* documentation
#### Requirements
-To build the *pandas* docs there are some extra requirements: you will
-need to have `sphinx` and `ipython` installed.
-[numpydoc](https://github.com/numpy/numpydoc) is used to parse the
-docstrings that follow the Numpy Docstring Standard (see above), but you
-don't need to install this because a local copy of `numpydoc` is
-included in the *pandas* source code.
+To build the *pandas* docs there are some extra requirements: you will need to have `sphinx` and `ipython` installed. [numpydoc](https://github.com/numpy/numpydoc) is used to parse the docstrings that follow the Numpy Docstring Standard (see above), but you don't need to install this because a local copy of numpydoc is included in the *pandas* source code.
-It is easiest to
-create a development environment \<contributing-dev\_env\>, then
-install:
+It is easiest to create a development environment <contributing.dev\_env>, then install:
conda install -n pandas_dev sphinx ipython
-Furthermore, it is recommended to have all [optional
-dependencies](http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies)
-installed. This is not strictly necessary, but be aware that you will
-see some error messages. Because all the code in the documentation is
-executed during the doc build, the examples using this optional
-dependencies will generate errors. Run `pd.show_versions()` to get an
-overview of the installed version of all dependencies.
+Furthermore, it is recommended to have all [optional dependencies](http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies) installed. This is not strictly necessary, but be aware that you will see some error messages when building the docs. This happens because all the code in the documentation is executed during the doc build, and so code examples using optional dependencies will generate errors. Run `pd.show_versions()` to get an overview of the installed version of all dependencies.
> **warning**
>
-> Sphinx version \>= 1.2.2 or the older 1.1.3 is required.
+> You need to have `sphinx` version 1.2.2 or newer, but older than version 1.3. Versions before 1.1.3 should also work.
#### Building the documentation
-So how do you build the docs? Navigate to your local the folder
-`pandas/doc/` directory in the console and run:
+So how do you build the docs? Navigate to your local `pandas/doc/` directory in the console and run:
python make.py html
-And then you can find the html output in the folder
-`pandas/doc/build/html/`.
+Then you can find the HTML output in the folder `pandas/doc/build/html/`.
-The first time it will take quite a while, because it has to run all the
-code examples in the documentation and build all generated docstring
-pages. In subsequent evocations, sphinx will try to only build the pages
-that have been modified.
+The first time you build the docs, it will take quite a while because it has to run all the code examples and build all the generated docstring pages. In subsequent evocations, sphinx will try to only build the pages that have been modified.
If you want to do a full clean build, do:
python make.py clean
python make.py build
-Starting with 0.13.1 you can tell `make.py` to compile only a single
-section of the docs, greatly reducing the turn-around time for checking
-your changes. You will be prompted to delete .rst files that aren't
-required, since the last committed version can always be restored from
-git.
+Starting with *pandas* 0.13.1 you can tell `make.py` to compile only a single section of the docs, greatly reducing the turn-around time for checking your changes. You will be prompted to delete `.rst` files that aren't required. This is okay because the prior versions of these files can be checked out from git. However, you must make sure not to commit the file deletions to your Git repository!
#omit autosummary and API section
python make.py clean
@@ -347,76 +243,48 @@ git.
python make.py clean
python make.py --single indexing
-For comparison, a full documentation build may take 10 minutes. a
-`-no-api` build may take 3 minutes and a single section may take 15
-seconds. However, subsequent builds only process portions you changed.
-Now, open the following file in a web browser to see the full
-documentation you just built:
+For comparison, a full documentation build may take 10 minutes, a `-no-api` build may take 3 minutes and a single section may take 15 seconds. Subsequent builds, which only process portions you have changed, will be faster. Open the following file in a web browser to see the full documentation you just built:
pandas/docs/build/html/index.html
-And you'll have the satisfaction of seeing your new and improved
-documentation!
+And you'll have the satisfaction of seeing your new and improved documentation!
+
+#### Building master branch documentation
+
+When pull requests are merged into the *pandas* `master` branch, the main parts of the documentation are also built by Travis-CI. These docs are then hosted [here](http://pandas-docs.github.io/pandas-docs-travis).
Contributing to the code base
-----------------------------
-### Code Standards
+### Code standards
+
+*pandas* uses the [PEP8](http://www.python.org/dev/peps/pep-0008/) standard. There are several tools to ensure you abide by this standard.
+
+We've written a tool to check that your commits are PEP8 great, [pip install pep8radius](https://github.com/hayd/pep8radius). Look at PEP8 fixes in your branch vs master with:
-*pandas* uses the [PEP8](http://www.python.org/dev/peps/pep-0008/)
-standard. There are several tools to ensure you abide by this standard.
+ pep8radius master --diff
-We've written a tool to check that your commits are PEP8 great, [pip
-install pep8radius](https://github.com/hayd/pep8radius). Look at PEP8
-fixes in your branch vs master with:
+and make these changes with:
- pep8radius master --diff` and make these changes with `pep8radius master --diff --in-place`
+ pep8radius master --diff --in-place
-Alternatively, use [flake8](http://pypi.python.org/pypi/flake8) tool for
-checking the style of your code. Additional standards are outlined on
-the [code style wiki
-page](https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions).
+Alternatively, use the [flake8](http://pypi.python.org/pypi/flake8) tool for checking the style of your code. Additional standards are outlined on the [code style wiki page](https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions).
-Please try to maintain backward-compatibility. *Pandas* has lots of
-users with lots of existing code, so don't break it if at all possible.
-If you think breakage is required clearly state why as part of the Pull
-Request. Also, be careful when changing method signatures and add
-deprecation warnings where needed.
+Please try to maintain backward compatibility. *pandas* has lots of users with lots of existing code, so don't break it if at all possible. If you think breakage is required, clearly state why as part of the pull request. Also, be careful when changing method signatures and add deprecation warnings where needed.
-### Test-driven Development/Writing Code
+### Test-driven development/code writing
-*Pandas* is serious about [Test-driven Development
-(TDD)](http://en.wikipedia.org/wiki/Test-driven_development). This
-development process "relies on the repetition of a very short
-development cycle: first the developer writes an (initially failing)
-automated test case that defines a desired improvement or new function,
-then produces the minimum amount of code to pass that test." So, before
-actually writing any code, you should write your tests. Often the test
-can be taken from the original GitHub issue. However, it is always worth
-considering additional use cases and writing corresponding tests.
+*pandas* is serious about testing and strongly encourages contributors to embrace [test-driven development (TDD)](http://en.wikipedia.org/wiki/Test-driven_development). This development process "relies on the repetition of a very short development cycle: first the developer writes an (initially failing) automated test case that defines a desired improvement or new function, then produces the minimum amount of code to pass that test." So, before actually writing any code, you should write your tests. Often the test can be taken from the original GitHub issue. However, it is always worth considering additional use cases and writing corresponding tests.
-Adding tests is one of the most common requests after code is pushed to
-*pandas*. It is worth getting in the habit of writing tests ahead of
-time so this is never an issue.
+Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore, it is worth getting in the habit of writing tests ahead of time so this is never an issue.
-Like many packages, *pandas* uses the [Nose testing
-system](http://somethingaboutorange.com/mrl/projects/nose/) and the
-convenient extensions in
-[numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
+Like many packages, *pandas* uses the [Nose testing system](http://nose.readthedocs.org/en/latest/index.html) and the convenient extensions in [numpy.testing](http://docs.scipy.org/doc/numpy/reference/routines.testing.html).
#### Writing tests
-All tests should go into the *tests* subdirectory of the specific
-package. There are probably many examples already there and looking to
-these for inspiration is suggested. If you test requires working with
-files or network connectivity there is more information on the [testing
-page](https://github.com/pydata/pandas/wiki/Testing) of the wiki.
+All tests should go into the `tests` subdirectory of the specific package. This folder contains many current examples of tests, and we suggest looking to these for inspiration. If your test requires working with files or network connectivity, there is more information on the [testing page](https://github.com/pydata/pandas/wiki/Testing) of the wiki.
-The `pandas.util.testing` module has many special `assert` functions
-that make it easier to make statements about whether Series or DataFrame
-objects are equivalent. The easiest way to verify that your code is
-correct is to explicitly construct the result you expect, then compare
-the actual result to the expected correct result:
+The `pandas.util.testing` module has many special `assert` functions that make it easier to make statements about whether Series or DataFrame objects are equivalent. The easiest way to verify that your code is correct is to explicitly construct the result you expect, then compare the actual result to the expected correct result:
def test_pivot(self):
data = {
@@ -437,15 +305,11 @@ the actual result to the expected correct result:
#### Running the test suite
-The tests can then be run directly inside your git clone (without having
-to install *pandas*) by typing::
+The tests can then be run directly inside your Git clone (without having to install *pandas*) by typing:
nosetests pandas
-The tests suite is exhaustive and takes around 20 minutes to run. Often
-it is worth running only a subset of tests first around your changes
-before running the entire suite. This is done using one of the following
-constructs:
+The tests suite is exhaustive and takes around 20 minutes to run. Often it is worth running only a subset of tests first around your changes before running the entire suite. This is done using one of the following constructs:
nosetests pandas/tests/[test-module].py
nosetests pandas/tests/[test-module].py:[TestClass]
@@ -453,75 +317,104 @@ constructs:
#### Running the performance test suite
-Performance matters and it is worth considering that your code has not
-introduced performance regressions. Currently *pandas* uses the [vbench
-library](https://github.com/pydata/vbench) to enable easy monitoring of
-the performance of critical *pandas* operations. These benchmarks are
-all found in the `pandas/vb_suite` directory. vbench currently only
-works on python2.
+Performance matters and it is worth considering whether your code has introduced performance regressions. *pandas* is in the process of migrating to the [asv library](https://github.com/spacetelescope/asv) to enable easy monitoring of the performance of critical *pandas* operations. These benchmarks are all found in the `pandas/asv_bench` directory. asv supports both python2 and python3.
+
+> **note**
+>
+> The asv benchmark suite was translated from the previous framework, vbench, so many stylistic issues are likely a result of automated transformation of the code.
+
+To use asv you will need either `conda` or `virtualenv`. For more details please check the [asv installation webpage](http://asv.readthedocs.org/en/latest/installing.html).
+
+To install asv:
+
+ pip install git+https://github.com/spacetelescope/asv
+
+If you need to run a benchmark, change your directory to `/asv_bench/` and run the following if you have been developing on `master`:
+
+ asv continuous master
+
+If you are working on another branch, either of the following can be used:
+
+ asv continuous master HEAD
+ asv continuous master your_branch
+
+This will check out the master revision and run the suite on both master and your commit. Running the full test suite can take up to one hour and use up to 3GB of RAM. Usually it is sufficient to paste only a subset of the results into the pull request to show that the committed changes do not cause unexpected performance regressions.
+
+You can run specific benchmarks using the `-b` flag, which takes a regular expression. For example, this will only run tests from a `pandas/asv_bench/benchmarks/groupby.py` file:
+
+ asv continuous master -b groupby
+
+If you want to only run a specific group of tests from a file, you can do it using `.` as a separator. For example:
+
+ asv continuous master -b groupby.groupby_agg_builtins1
+
+will only run a `groupby_agg_builtins1` test defined in a `groupby` file.
+
+It can also be useful to run tests in your current environment. You can simply do it by:
+
+ asv dev
+
+This command is equivalent to:
+
+ asv run --quick --show-stderr --python=same
+
+This will launch every test only once, display stderr from the benchmarks, and use your local `python` that comes from your `$PATH`.
+
+Information on how to write a benchmark can be found in the [asv documentation](http://asv.readthedocs.org/en/latest/writing_benchmarks.html).
+
+#### Running the vbench performance test suite (phasing out)
+
+Historically, *pandas* used [vbench library](https://github.com/pydata/vbench) to enable easy monitoring of the performance of critical *pandas* operations. These benchmarks are all found in the `pandas/vb_suite` directory. vbench currently only works on python2.
To install vbench:
pip install git+https://github.com/pydata/vbench
-Vbench also requires sqlalchemy, gitpython, and psutil which can all be
-installed using pip. If you need to run a benchmark, change your
-directory to the *pandas* root and run:
+Vbench also requires `sqlalchemy`, `gitpython`, and `psutil`, which can all be installed using pip. If you need to run a benchmark, change your directory to the *pandas* root and run:
./test_perf.sh -b master -t HEAD
-This will checkout the master revision and run the suite on both master
-and your commit. Running the full test suite can take up to one hour and
-use up to 3GB of RAM. Usually it is sufficient to past a subset of the
-results in to the Pull Request to show that the committed changes do not
-cause unexpected performance regressions.
+This will check out the master revision and run the suite on both master and your commit. Running the full test suite can take up to one hour and use up to 3GB of RAM. Usually it is sufficient to paste a subset of the results into the Pull Request to show that the committed changes do not cause unexpected performance regressions.
-You can run specific benchmarks using the *-r* flag which takes a
-regular expression.
+You can run specific benchmarks using the `-r` flag, which takes a regular expression.
-See the [performance testing
-wiki](https://github.com/pydata/pandas/wiki/Performance-Testing) for
-information on how to write a benchmark.
+See the [performance testing wiki](https://github.com/pydata/pandas/wiki/Performance-Testing) for information on how to write a benchmark.
### Documenting your code
-Changes should be reflected in the release notes located in
-doc/source/whatsnew/vx.y.z.txt. This file contains an ongoing change log
-for each release. Add an entry to this file to document your fix,
-enhancement or (unavoidable) breaking change. Make sure to include the
-GitHub issue number when adding your entry.
+Changes should be reflected in the release notes located in `doc/source/whatsnew/vx.y.z.txt`. This file contains an ongoing change log for each release. Add an entry to this file to document your fix, enhancement or (unavoidable) breaking change. Make sure to include the GitHub issue number when adding your entry (using `` :issue:`1234` `` where 1234 is the issue/pull request number).
+
+If your code is an enhancement, it is most likely necessary to add usage examples to the existing documentation. This can be done following the section regarding documentation above <contributing.documentation>. Further, to let users know when this feature was added, the `versionadded` directive is used. The sphinx syntax for that is:
+
+``` sourceCode
+.. versionadded:: 0.17.0
+```
-If your code is an enhancement, it is most likely necessary to add usage
-examples to the existing documentation. This can be done following the
-section regarding documentation.
+This will put the text *New in version 0.17.0* wherever you put the sphinx directive. This should also be put in the docstring when adding a new function or method ([example](https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/generic.py#L1959)) or a new keyword argument ([example](https://github.com/pydata/pandas/blob/v0.16.2/pandas/core/frame.py#L1171)).
Contributing your changes to *pandas*
-------------------------------------
### Committing your code
-Keep style fixes to a separate commit to make your PR more readable.
+Keep style fixes to a separate commit to make your pull request more readable.
Once you've made changes, you can see them by typing:
git status
-If you've created a new file, it is not being tracked by git. Add it by
-typing :
+If you have created a new file, it is not being tracked by git. Add it by typing:
git add path/to/file-to-be-added.py
-Doing 'git status' again should give something like :
+Doing 'git status' again should give something like:
# On branch shiny-new-feature
#
# modified: /relative/path/to/file-you-added.py
#
-Finally, commit your changes to your local repository with an
-explanatory message. An informal commit message format is in effect for
-the project. Please try to adhere to it. Here are some common prefixes
-along with general guidelines for when to use them:
+Finally, commit your changes to your local repository with an explanatory message. *Pandas* uses a convention for commit message prefixes and layout. Here are some common prefixes along with general guidelines for when to use them:
> - ENH: Enhancement, new functionality
> - BUG: Bug fix
@@ -531,11 +424,9 @@ along with general guidelines for when to use them:
> - PERF: Performance improvement
> - CLN: Code cleanup
-The following defines how a commit message should be structured. Please
-reference the relevant GitHub issues in your commit message using GH1234
-or \#1234. Either style is fine, but the former is generally preferred:
+The following defines how a commit message should be structured. Please reference the relevant GitHub issues in your commit message using GH1234 or \#1234. Either style is fine, but the former is generally preferred:
-> - a subject line with \< 80 chars.
+> - a subject line with < 80 chars.
> - One blank line.
> - Optionally, a commit message body.
@@ -543,86 +434,71 @@ Now you can commit your changes in your local repository:
git commit -m
-If you have multiple commits, it is common to want to combine them into
-one commit, often referred to as "squashing" or "rebasing". This is a
-common request by package maintainers when submitting a Pull Request as
-it maintains a more compact commit history. To rebase your commits:
+### Combining commits
+
+If you have multiple commits, you may want to combine them into one commit, often referred to as "squashing" or "rebasing". This is a common request by package maintainers when submitting a pull request as it maintains a more compact commit history. To rebase your commits:
git rebase -i HEAD~#
-Where \# is the number of commits you want to combine. Then you can pick
-the relevant commit message and discard others.
+Where \# is the number of commits you want to combine. Then you can pick the relevant commit message and discard others.
+
+To squash to the master branch do:
+
+ git rebase -i master
+
+Use the `s` option on a commit to `squash`, meaning to keep the commit messages, or `f` to `fixup`, meaning to merge the commit messages.
+
+Then you will need to push the branch (see below) forcefully to replace the current commits with the new ones:
+
+ git push origin shiny-new-feature -f
### Pushing your changes
-When you want your changes to appear publicly on your GitHub page, push
-your forked feature branch's commits :
+When you want your changes to appear publicly on your GitHub page, push your forked feature branch's commits:
git push origin shiny-new-feature
-Here origin is the default name given to your remote repository on
-GitHub. You can see the remote repositories :
+Here `origin` is the default name given to your remote repository on GitHub. You can see the remote repositories:
git remote -v
-If you added the upstream repository as described above you will see
-something like :
+If you added the upstream repository as described above you will see something like:
origin [email protected]:yourname/pandas.git (fetch)
origin [email protected]:yourname/pandas.git (push)
upstream git://github.com/pydata/pandas.git (fetch)
upstream git://github.com/pydata/pandas.git (push)
-Now your code is on GitHub, but it is not yet a part of the *pandas*
-project. For that to happen, a Pull Request needs to be submitted on
-GitHub.
+Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to happen, a pull request needs to be submitted on GitHub.
### Review your code
-When you're ready to ask for a code review, you will file a Pull
-Request. Before you do, again make sure you've followed all the
-guidelines outlined in this document regarding code style, tests,
-performance tests, and documentation. You should also double check your
-branch changes against the branch it was based off of:
-
-1. Navigate to your repository on
- GitHub--<https://github.com/your-user-name/pandas>.
-2. Click on Branches.
-3. Click on the Compare button for your feature branch.
-4. Select the base and compare branches, if necessary. This will be
- master and shiny-new-feature, respectively.
-
-### Finally, make the Pull Request
-
-If everything looks good you are ready to make a Pull Request. A Pull
-Request is how code from a local repository becomes available to the
-GitHub community and can be looked at and eventually merged into the
-master version. This Pull Request and its associated changes will
-eventually be committed to the master branch and available in the next
-release. To submit a Pull Request:
-
-1. Navigate to your repository on GitHub.
-2. Click on the Pull Request button.
-3. You can then click on Commits and Files Changed to make sure
- everything looks okay one last time.
-4. Write a description of your changes in the Preview Discussion tab.
-5. Click Send Pull Request.
-
-This request then appears to the repository maintainers, and they will
-review the code. If you need to make more changes, you can make them in
-your branch, push them to GitHub, and the pull request will be
-automatically updated. Pushing them to GitHub again is done by:
+When you're ready to ask for a code review, file a pull request. Before you do, once again make sure that you have followed all the guidelines outlined in this document regarding code style, tests, performance tests, and documentation. You should also double check your branch changes against the branch it was based on:
+
+1. Navigate to your repository on GitHub -- <https://github.com/your-user-name/pandas>
+2. Click on `Branches`
+3. Click on the `Compare` button for your feature branch
+4. Select the `base` and `compare` branches, if necessary. This will be `master` and `shiny-new-feature`, respectively.
+
+### Finally, make the pull request
+
+If everything looks good, you are ready to make a pull request. A pull request is how code from a local repository becomes available to the GitHub community and can be looked at and eventually merged into the master version. This pull request and its associated changes will eventually be committed to the master branch and available in the next release. To submit a pull request:
+
+1. Navigate to your repository on GitHub
+2. Click on the `Pull Request` button
+3. You can then click on `Commits` and `Files Changed` to make sure everything looks okay one last time
+4. Write a description of your changes in the `Preview Discussion` tab
+5. Click `Send Pull Request`.
+
+This request then goes to the repository maintainers, and they will review the code. If you need to make more changes, you can make them in your branch, push them to GitHub, and the pull request will be automatically updated. Pushing them to GitHub again is done by:
git push -f origin shiny-new-feature
-This will automatically update your Pull Request with the latest code
-and restart the Travis-CI tests.
+This will automatically update your pull request with the latest code and restart the Travis-CI tests.
### Delete your merged branch (optional)
-Once your feature branch is accepted into upstream, you'll probably want
-to get rid of the branch. First, merge upstream master into your branch
-so git knows it is safe to delete your branch :
+Once your feature branch is accepted into upstream, you'll probably want to get rid of the branch. First, merge upstream master into your branch so git knows it is safe to delete your branch:
git fetch upstream
git checkout master
@@ -632,9 +508,8 @@ Then you can just do:
git branch -d shiny-new-feature
-Make sure you use a lower-case -d, or else git won't warn you if your
-feature branch has not actually been merged.
+Make sure you use a lower-case `-d`, or else git won't warn you if your feature branch has not actually been merged.
-The branch will still exist on GitHub, so to delete it there do :
+The branch will still exist on GitHub, so to delete it there do:
git push origin --delete shiny-new-feature
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index a9c6316821ba1..8cab77a0688a7 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -25,24 +25,25 @@ Or maybe through using *pandas* you have an idea of you own or are looking for s
in the documentation and thinking 'this can be improved'...you can do something
about it!
-Feel free to ask questions on `mailing list
-<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_
+Feel free to ask questions on the `mailing list
+<https://groups.google.com/forum/?fromgroups#!forum/pydata>`_ or on `Gitter
+<https://gitter.im/pydata/pandas>`_.
-Bug Reports/Enhancement Requests
-================================
+Bug reports and enhancement requests
+====================================
Bug reports are an important part of making *pandas* more stable. Having a complete bug report
-will allow others to reproduce the bug and provide insight into fixing. Since many versions of
+will allow others to reproduce the bug and provide insight into fixing. Because many versions of
*pandas* are supported, knowing version information will also identify improvements made since
-previous versions. Often trying the bug-producing code out on the *master* branch is a worthwhile exercise
+previous versions. Trying the bug-producing code out on the *master* branch is often a worthwhile exercise
to confirm the bug still exists. It is also worth searching existing bug reports and pull requests
to see if the issue has already been reported and/or fixed.
Bug reports must:
#. Include a short, self-contained Python snippet reproducing the problem.
- You can have the code formatted nicely by using `GitHub Flavored Markdown
- <http://github.github.com/github-flavored-markdown/>`_: ::
+ You can format the code nicely by using `GitHub Flavored Markdown
+ <http://github.github.com/github-flavored-markdown/>`_::
```python
>>> from pandas import DataFrame
@@ -50,13 +51,13 @@ Bug reports must:
...
```
-#. Include the full version string of *pandas* and its dependencies. In recent (>0.12) versions
- of *pandas* you can use a built in function: ::
+#. Include the full version string of *pandas* and its dependencies. In versions
+ of *pandas* after 0.12 you can use a built in function::
>>> from pandas.util.print_versions import show_versions
>>> show_versions()
- and in 0.13.1 onwards: ::
+ and in *pandas* 0.13.1 onwards::
>>> pd.show_versions()
@@ -70,12 +71,12 @@ Working with the code
Now that you have an issue you want to fix, enhancement to add, or documentation to improve,
you need to learn how to work with GitHub and the *pandas* code base.
-Version Control, Git, and GitHub
+Version control, Git, and GitHub
--------------------------------
To the new user, working with Git is one of the more daunting aspects of contributing to *pandas*.
-It can very quickly become overwhelming, but sticking to the guidelines below will make the process
-straightforward and will work without much trouble. As always, if you are having difficulties please
+It can very quickly become overwhelming, but sticking to the guidelines below will help keep the process
+straightforward and mostly trouble free. As always, if you are having difficulties please
feel free to ask for help.
The code is hosted on `GitHub <https://www.github.com/pydata/pandas>`_. To
@@ -83,18 +84,18 @@ contribute you will need to sign up for a `free GitHub account
<https://github.com/signup/free>`_. We use `Git <http://git-scm.com/>`_ for
version control to allow many people to work together on the project.
-Some great resources for learning git:
+Some great resources for learning Git:
* the `GitHub help pages <http://help.github.com/>`_.
* the `NumPy's documentation <http://docs.scipy.org/doc/numpy/dev/index.html>`_.
* Matthew Brett's `Pydagogue <http://matthew-brett.github.com/pydagogue/>`_.
-Getting Started with Git
+Getting started with Git
------------------------
`GitHub has instructions <http://help.github.com/set-up-git-redirect>`__ for installing git,
setting up your SSH key, and configuring git. All these steps need to be completed before
-working seamlessly with your local repository and GitHub.
+you can work seamlessly between your local repository and GitHub.
.. _contributing.forking:
@@ -102,8 +103,8 @@ Forking
-------
You will need your own fork to work on the code. Go to the `pandas project
-page <https://github.com/pydata/pandas>`_ and hit the *fork* button. You will
-want to clone your fork to your machine: ::
+page <https://github.com/pydata/pandas>`_ and hit the ``Fork`` button. You will
+want to clone your fork to your machine::
git clone [email protected]:your-user-name/pandas.git pandas-yourname
cd pandas-yourname
@@ -112,13 +113,13 @@ want to clone your fork to your machine: ::
This creates the directory `pandas-yourname` and connects your repository to
the upstream (main project) *pandas* repository.
-The testing suite will run automatically on Travis-CI once your Pull Request is
+The testing suite will run automatically on Travis-CI once your pull request is
submitted. However, if you wish to run the test suite on a branch prior to
-submitting the Pull Request, then Travis-CI needs to be hooked up to your
-GitHub repository. Instructions are for doing so are `here
+submitting the pull request, then Travis-CI needs to be hooked up to your
+GitHub repository. Instructions for doing so are `here
<http://about.travis-ci.org/docs/user/getting-started/>`__.
-Creating a Branch
+Creating a branch
-----------------
You want your master branch to reflect only production-ready code, so create a
@@ -142,84 +143,67 @@ To update this branch, you need to retrieve the changes from the master branch::
git rebase upstream/master
This will replay your commits on top of the lastest pandas git master. If this
-leads to merge conflicts, you must resolve these before submitting your Pull
-Request. If you have uncommitted changes, you will need to `stash` them prior
+leads to merge conflicts, you must resolve these before submitting your pull
+request. If you have uncommitted changes, you will need to ``stash`` them prior
to updating. This will effectively store your changes and they can be reapplied
after updating.
.. _contributing.dev_env:
-Creating a Development Environment
+Creating a development environment
----------------------------------
An easy way to create a *pandas* development environment is as follows.
-- Install either :ref:`Install Anaconda <install.anaconda>` or :ref:`Install miniconda <install.miniconda>`
+- Install either :ref:`Anaconda <install.anaconda>` or :ref:`miniconda <install.miniconda>`
- Make sure that you have :ref:`cloned the repository <contributing.forking>`
-- ``cd`` to the pandas source directory
+- ``cd`` to the *pandas* source directory
-Tell ``conda`` to create a new environment, named ``pandas_dev``, or any name you would like for this environment by running:
-
-::
+Tell conda to create a new environment, named ``pandas_dev``, or any other name you would like
+for this environment, by running::
conda create -n pandas_dev --file ci/requirements_dev.txt
-For a python 3 environment
-
-::
+For a python 3 environment::
conda create -n pandas_dev python=3 --file ci/requirements_dev.txt
-If you are on ``windows``, then you will need to install the compiler linkages:
-
-::
+If you are on Windows, then you will also need to install the compiler linkages::
conda install -n pandas_dev libpython
-This will create the new environment, and not touch any of your existing environments, nor any existing python installation. It will install all of the basic dependencies of *pandas*, as well as the development and testing tools. If you would like to install other dependencies, you can install them as follows:
-
-::
+This will create the new environment, and not touch any of your existing environments,
+nor any existing python installation. It will install all of the basic dependencies of
+*pandas*, as well as the development and testing tools. If you would like to install
+other dependencies, you can install them as follows::
conda install -n pandas_dev -c pandas pytables scipy
-To install *all* pandas dependencies you can do the following:
-
-::
+To install *all* pandas dependencies you can do the following::
conda install -n pandas_dev -c pandas --file ci/requirements_all.txt
-To work in this environment, ``activate`` it as follows:
-
-::
+To work in this environment, Windows users should ``activate`` it as follows::
activate pandas_dev
-At which point, the prompt will change to indicate you are in the new development environment.
-
-.. note::
-
- The above syntax is for ``windows`` environments. To work on ``macosx/linux``, use:
+Mac OSX and Linux users should use::
- ::
+ source activate pandas_dev
- source activate pandas_dev
+You will then see a confirmation message to indicate you are in the new development environment.
-To view your environments:
-
-::
+To view your environments::
conda info -e
-To return to you home root environment:
-
-::
+To return to you home root environment::
deactivate
-See the full ``conda`` docs `here
-<http://conda.pydata.org/docs>`__.
+See the full conda docs `here <http://conda.pydata.org/docs>`__.
At this point you can easily do an *in-place* install, as detailed in the next section.
@@ -258,7 +242,7 @@ of huge value. You don't even have to be an expert on
as you reference the docs is a simple but effective way to contribute. The
next person to read that passage will be in your debt!
-Actually, there are sections of the docs that are worse off by being written
+In fact, there are sections of the docs that are worse off after being written
by experts. If something in the docs doesn't make sense to you, updating the
relevant section after you figure it out is a simple way to ensure it will
help the next person.
@@ -267,8 +251,8 @@ help the next person.
:local:
-About the pandas documentation
-------------------------------
+About the *pandas* documentation
+--------------------------------
The documentation is written in **reStructuredText**, which is almost like writing
in plain English, and built using `Sphinx <http://sphinx.pocoo.org/>`__. The
@@ -286,7 +270,7 @@ Some other important things to know about the docs:
overviews per topic together with some other information (what's new,
installation, etc).
-- The docstrings follow the **Numpy Docstring Standard** which is used widely
+- The docstrings follow the **Numpy Docstring Standard**, which is used widely
in the Scientific Python community. This standard specifies the format of
the different sections of the docstring. See `this document
<https://github.com/numpy/numpy/blob/master/doc/HOWTO_DOCUMENT.rst.txt>`_
@@ -296,38 +280,44 @@ Some other important things to know about the docs:
- The tutorials make heavy use of the `ipython directive
<http://matplotlib.org/sampledoc/ipython_directive.html>`_ sphinx extension.
This directive lets you put code in the documentation which will be run
- during the doc build. For example:
-
- ::
+ during the doc build. For example::
.. ipython:: python
x = 2
x**3
- will be rendered as
-
- ::
+ will be rendered as::
In [1]: x = 2
In [2]: x**3
Out[2]: 8
- This means that almost all code examples in the docs are always run (and the
- output saved) during the doc build. This way, they will always be up to date,
- but it makes the doc building a bit more complex.
+ Almost all code examples in the docs are run (and the output saved) during the
+ doc build. This approach means that code examples will always be up to date,
+ but it does make the doc building a bit more complex.
+
+.. note::
+
+ The ``.rst`` files are used to automatically generate Markdown and HTML versions
+ of the docs. For this reason, please do not edit ``CONTRIBUTING.md`` directly,
+ but instead make any changes to ``doc/source/contributing.rst``. Then, to
+ generate ``CONTRIBUTING.md``, use `pandoc <http://johnmacfarlane.net/pandoc/>`_
+ with the following command::
+
+ pandoc doc/source/contributing.rst -t markdown_github > CONTRIBUTING.md
The utility script ``scripts/api_rst_coverage.py`` can be used to compare
the list of methods documented in ``doc/source/api.rst`` (which is used to generate
the `API Reference <http://pandas.pydata.org/pandas-docs/stable/api.html>`_ page)
and the actual public methods.
-It will identify methods documented in in ``doc/source/api.rst`` that are not actually
+This will identify methods documented in in ``doc/source/api.rst`` that are not actually
class methods, and existing methods that are not documented in ``doc/source/api.rst``.
-How to build the pandas documentation
--------------------------------------
+How to build the *pandas* documentation
+---------------------------------------
Requirements
~~~~~~~~~~~~
@@ -336,53 +326,50 @@ To build the *pandas* docs there are some extra requirements: you will need to
have ``sphinx`` and ``ipython`` installed. `numpydoc
<https://github.com/numpy/numpydoc>`_ is used to parse the docstrings that
follow the Numpy Docstring Standard (see above), but you don't need to install
-this because a local copy of ``numpydoc`` is included in the *pandas* source
+this because a local copy of numpydoc is included in the *pandas* source
code.
-It is easiest to :ref:`create a development environment <contributing.dev_env>`, then install:
-
-::
+It is easiest to :ref:`create a development environment <contributing.dev_env>`, then install::
conda install -n pandas_dev sphinx ipython
Furthermore, it is recommended to have all `optional dependencies
<http://pandas.pydata.org/pandas-docs/dev/install.html#optional-dependencies>`_
installed. This is not strictly necessary, but be aware that you will see some error
-messages. Because all the code in the documentation is executed during the doc
-build, the examples using this optional dependencies will generate errors.
-Run ``pd.show_versions()`` to get an overview of the installed version of all
-dependencies.
+messages when building the docs. This happens because all the code in the documentation
+is executed during the doc build, and so code examples using optional dependencies
+will generate errors. Run ``pd.show_versions()`` to get an overview of the installed
+version of all dependencies.
.. warning::
- Sphinx version >= 1.2.2 or the older 1.1.3 is required.
+ You need to have ``sphinx`` version 1.2.2 or newer, but older than version 1.3.
+ Versions before 1.1.3 should also work.
Building the documentation
~~~~~~~~~~~~~~~~~~~~~~~~~~
-So how do you build the docs? Navigate to your local the folder
+So how do you build the docs? Navigate to your local
``pandas/doc/`` directory in the console and run::
python make.py html
-And then you can find the html output in the folder ``pandas/doc/build/html/``.
+Then you can find the HTML output in the folder ``pandas/doc/build/html/``.
-The first time it will take quite a while, because it has to run all the code
-examples in the documentation and build all generated docstring pages.
-In subsequent evocations, sphinx will try to only build the pages that have
-been modified.
+The first time you build the docs, it will take quite a while because it has to run
+all the code examples and build all the generated docstring pages. In subsequent
+evocations, sphinx will try to only build the pages that have been modified.
If you want to do a full clean build, do::
python make.py clean
python make.py build
-
-Starting with 0.13.1 you can tell ``make.py`` to compile only a single section
+Starting with *pandas* 0.13.1 you can tell ``make.py`` to compile only a single section
of the docs, greatly reducing the turn-around time for checking your changes.
-You will be prompted to delete `.rst` files that aren't required. This is okay
-since the prior version can be checked out from git, but make sure to
-not commit the file deletions.
+You will be prompted to delete ``.rst`` files that aren't required. This is okay because
+the prior versions of these files can be checked out from git. However, you must make sure
+not to commit the file deletions to your Git repository!
::
@@ -395,9 +382,9 @@ not commit the file deletions.
python make.py clean
python make.py --single indexing
-For comparison, a full documentation build may take 10 minutes. a ``-no-api`` build
-may take 3 minutes and a single section may take 15 seconds. However, subsequent
-builds only process portions you changed. Now, open the following file in a web
+For comparison, a full documentation build may take 10 minutes, a ``-no-api`` build
+may take 3 minutes and a single section may take 15 seconds. Subsequent builds, which
+only process portions you have changed, will be faster. Open the following file in a web
browser to see the full documentation you just built::
pandas/docs/build/html/index.html
@@ -406,11 +393,12 @@ And you'll have the satisfaction of seeing your new and improved documentation!
.. _contributing.dev_docs:
-Built Master Branch Documentation
-~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+Building master branch documentation
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-When pull-requests are merged into the pandas *master* branch, the main parts of the documentation are
-also built by Travis-CI. These docs are then hosted `here <http://pandas-docs.github.io/pandas-docs-travis>`__.
+When pull requests are merged into the *pandas* ``master`` branch, the main parts of
+the documentation are also built by Travis-CI. These docs are then hosted `here
+<http://pandas-docs.github.io/pandas-docs-travis>`__.
Contributing to the code base
=============================
@@ -418,30 +406,35 @@ Contributing to the code base
.. contents:: Code Base:
:local:
-Code Standards
+Code standards
--------------
*pandas* uses the `PEP8 <http://www.python.org/dev/peps/pep-0008/>`_ standard.
There are several tools to ensure you abide by this standard.
-We've written a tool to check that your commits are PEP8 great, `pip install pep8radius <https://github.com/hayd/pep8radius>`_.
-Look at PEP8 fixes in your branch vs master with::
+We've written a tool to check that your commits are PEP8 great, `pip install pep8radius
+<https://github.com/hayd/pep8radius>`_. Look at PEP8 fixes in your branch vs master with::
+
+ pep8radius master --diff
+
+and make these changes with::
- pep8radius master --diff` and make these changes with `pep8radius master --diff --in-place`
+ pep8radius master --diff --in-place
-Alternatively, use `flake8 <http://pypi.python.org/pypi/flake8>`_ tool for checking the style of your code.
-Additional standards are outlined on the `code style wiki page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
+Alternatively, use the `flake8 <http://pypi.python.org/pypi/flake8>`_ tool for checking
+the style of your code. Additional standards are outlined on the `code style wiki
+page <https://github.com/pydata/pandas/wiki/Code-Style-and-Conventions>`_.
-Please try to maintain backward-compatibility. *Pandas* has lots of users with lots of existing code, so
-don't break it if at all possible. If you think breakage is required clearly state why
-as part of the Pull Request. Also, be careful when changing method signatures and add
-deprecation warnings where needed.
+Please try to maintain backward compatibility. *pandas* has lots of users with lots of
+existing code, so don't break it if at all possible. If you think breakage is required,
+clearly state why as part of the pull request. Also, be careful when changing method
+signatures and add deprecation warnings where needed.
-Test-driven Development/Writing Code
+Test-driven development/code writing
------------------------------------
-*Pandas* is serious about testing and strongly encourages individuals to embrace `Test-driven Development (TDD)
-<http://en.wikipedia.org/wiki/Test-driven_development>`_.
+*pandas* is serious about testing and strongly encourages contributors to embrace
+`test-driven development (TDD) <http://en.wikipedia.org/wiki/Test-driven_development>`_.
This development process "relies on the repetition of a very short development cycle:
first the developer writes an (initially failing) automated test case that defines a desired
improvement or new function, then produces the minimum amount of code to pass that test."
@@ -449,30 +442,28 @@ So, before actually writing any code, you should write your tests. Often the te
taken from the original GitHub issue. However, it is always worth considering additional
use cases and writing corresponding tests.
-Adding tests is one of the most common requests after code is pushed to *pandas*. It is worth getting
-in the habit of writing tests ahead of time so this is never an issue.
+Adding tests is one of the most common requests after code is pushed to *pandas*. Therefore,
+it is worth getting in the habit of writing tests ahead of time so this is never an issue.
Like many packages, *pandas* uses the `Nose testing system
-<http://somethingaboutorange.com/mrl/projects/nose/>`_ and the convenient
+<http://nose.readthedocs.org/en/latest/index.html>`_ and the convenient
extensions in `numpy.testing
<http://docs.scipy.org/doc/numpy/reference/routines.testing.html>`_.
Writing tests
~~~~~~~~~~~~~
-All tests should go into the *tests* subdirectory of the specific package.
-There are probably many examples already there and looking to these for
-inspiration is suggested. If you test requires working with files or
-network connectivity there is more information on the `testing page
+All tests should go into the ``tests`` subdirectory of the specific package.
+This folder contains many current examples of tests, and we suggest looking to these for
+inspiration. If your test requires working with files or
+network connectivity, there is more information on the `testing page
<https://github.com/pydata/pandas/wiki/Testing>`_ of the wiki.
The ``pandas.util.testing`` module has many special ``assert`` functions that
make it easier to make statements about whether Series or DataFrame objects are
equivalent. The easiest way to verify that your code is correct is to
explicitly construct the result you expect, then compare the actual result to
-the expected correct result:
-
-::
+the expected correct result::
def test_pivot(self):
data = {
@@ -494,16 +485,14 @@ the expected correct result:
Running the test suite
~~~~~~~~~~~~~~~~~~~~~~
-The tests can then be run directly inside your git clone (without having to
-install *pandas*) by typing:::
+The tests can then be run directly inside your Git clone (without having to
+install *pandas*) by typing::
nosetests pandas
The tests suite is exhaustive and takes around 20 minutes to run. Often it is
worth running only a subset of tests first around your changes before running the
-entire suite. This is done using one of the following constructs:
-
-::
+entire suite. This is done using one of the following constructs::
nosetests pandas/tests/[test-module].py
nosetests pandas/tests/[test-module].py:[TestClass]
@@ -512,71 +501,71 @@ entire suite. This is done using one of the following constructs:
Running the performance test suite
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Performance matters and it is worth considering that your code has not introduced
+Performance matters and it is worth considering whether your code has introduced
performance regressions. *pandas* is in the process of migrating to the
`asv library <https://github.com/spacetelescope/asv>`__
to enable easy monitoring of the performance of critical *pandas* operations.
-These benchmarks are all found in the ``pandas/asv_bench`` directory. *asv*
+These benchmarks are all found in the ``pandas/asv_bench`` directory. asv
supports both python2 and python3.
.. note::
- The *asv* benchmark suite was translated from the previous framework, vbench,
+ The asv benchmark suite was translated from the previous framework, vbench,
so many stylistic issues are likely a result of automated transformation of the
code.
-To use ''asv'' you will need either ''conda'' or ''virtualenv''. For more details
-please check installation webpage http://asv.readthedocs.org/en/latest/installing.html
+To use asv you will need either ``conda`` or ``virtualenv``. For more details
+please check the `asv installation webpage <http://asv.readthedocs.org/en/latest/installing.html>`_.
-To install ''asv''::
+To install asv::
pip install git+https://github.com/spacetelescope/asv
-If you need to run a benchmark, change your directory to asv_bench/ and run
-the following if you have been developing on master::
+If you need to run a benchmark, change your directory to ``/asv_bench/`` and run
+the following if you have been developing on ``master``::
asv continuous master
-Otherwise, if you are working on another branch, either of the following can be used::
+If you are working on another branch, either of the following can be used::
asv continuous master HEAD
asv continuous master your_branch
-This will checkout the master revision and run the suite on both master and
+This will check out the master revision and run the suite on both master and
your commit. Running the full test suite can take up to one hour and use up
-to 3GB of RAM. Usually it is sufficient to paste a subset of the results in
-to the Pull Request to show that the committed changes do not cause unexpected
+to 3GB of RAM. Usually it is sufficient to paste only a subset of the results into
+the pull request to show that the committed changes do not cause unexpected
performance regressions.
-You can run specific benchmarks using the *-b* flag which takes a regular expression.
-For example this will only run tests from a ``pandas/asv_bench/benchmarks/groupby.py``
+You can run specific benchmarks using the ``-b`` flag, which takes a regular expression.
+For example, this will only run tests from a ``pandas/asv_bench/benchmarks/groupby.py``
file::
asv continuous master -b groupby
-If you want to run only some specific group of tests from a file you can do it
+If you want to only run a specific group of tests from a file, you can do it
using ``.`` as a separator. For example::
asv continuous master -b groupby.groupby_agg_builtins1
will only run a ``groupby_agg_builtins1`` test defined in a ``groupby`` file.
-It is also useful to run tests in your current environment. You can simply do it by::
+It can also be useful to run tests in your current environment. You can simply do it by::
asv dev
-which would be equivalent to ``asv run --quick --show-stderr --python=same``. This
-will launch every test only once, display stderr from the benchmarks and use your
-local ``python`` that comes from your $PATH.
+This command is equivalent to::
+
+ asv run --quick --show-stderr --python=same
+
+This will launch every test only once, display stderr from the benchmarks, and use your local ``python`` that comes from your ``$PATH``.
-Information on how to write a benchmark can be found in
-`*asv*'s documentation http://asv.readthedocs.org/en/latest/writing_benchmarks.html`.
+Information on how to write a benchmark can be found in the `asv documentation <http://asv.readthedocs.org/en/latest/writing_benchmarks.html>`_.
Running the vbench performance test suite (phasing out)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-Performance matters and it is worth considering that your code has not introduced
-performance regressions. Historically, *pandas* used `vbench library <https://github.com/pydata/vbench>`__
+Historically, *pandas* used `vbench library <https://github.com/pydata/vbench>`_
to enable easy monitoring of the performance of critical *pandas* operations.
These benchmarks are all found in the ``pandas/vb_suite`` directory. vbench
currently only works on python2.
@@ -585,18 +574,17 @@ To install vbench::
pip install git+https://github.com/pydata/vbench
-Vbench also requires sqlalchemy, gitpython, and psutil which can all be installed
+Vbench also requires ``sqlalchemy``, ``gitpython``, and ``psutil``, which can all be installed
using pip. If you need to run a benchmark, change your directory to the *pandas* root and run::
./test_perf.sh -b master -t HEAD
-This will checkout the master revision and run the suite on both master and
+This will check out the master revision and run the suite on both master and
your commit. Running the full test suite can take up to one hour and use up
-to 3GB of RAM. Usually it is sufficient to paste a subset of the results in
-to the Pull Request to show that the committed changes do not cause unexpected
+to 3GB of RAM. Usually it is sufficient to paste a subset of the results into the Pull Request to show that the committed changes do not cause unexpected
performance regressions.
-You can run specific benchmarks using the *-r* flag which takes a regular expression.
+You can run specific benchmarks using the ``-r`` flag, which takes a regular expression.
See the `performance testing wiki <https://github.com/pydata/pandas/wiki/Performance-Testing>`_ for information
on how to write a benchmark.
@@ -604,7 +592,7 @@ on how to write a benchmark.
Documenting your code
---------------------
-Changes should be reflected in the release notes located in `doc/source/whatsnew/vx.y.z.txt`.
+Changes should be reflected in the release notes located in ``doc/source/whatsnew/vx.y.z.txt``.
This file contains an ongoing change log for each release. Add an entry to this file to
document your fix, enhancement or (unavoidable) breaking change. Make sure to include the
GitHub issue number when adding your entry (using `` :issue:`1234` `` where `1234` is the
@@ -631,17 +619,17 @@ Contributing your changes to *pandas*
Committing your code
--------------------
-Keep style fixes to a separate commit to make your PR more readable.
+Keep style fixes to a separate commit to make your pull request more readable.
Once you've made changes, you can see them by typing::
git status
-If you've created a new file, it is not being tracked by git. Add it by typing ::
+If you have created a new file, it is not being tracked by git. Add it by typing::
git add path/to/file-to-be-added.py
-Doing 'git status' again should give something like ::
+Doing 'git status' again should give something like::
# On branch shiny-new-feature
#
@@ -661,7 +649,7 @@ some common prefixes along with general guidelines for when to use them:
* CLN: Code cleanup
The following defines how a commit message should be structured. Please reference the
-relevant GitHub issues in your commit message using `GH1234` or `#1234`. Either style
+relevant GitHub issues in your commit message using GH1234 or #1234. Either style
is fine, but the former is generally preferred:
* a subject line with `< 80` chars.
@@ -675,9 +663,10 @@ Now you can commit your changes in your local repository::
Combining commits
-----------------
-If you have multiple commits, it is common to want to combine them into one commit, often
+If you have multiple commits, you may want to combine them into one commit, often
referred to as "squashing" or "rebasing". This is a common request by package maintainers
-when submitting a Pull Request as it maintains a more compact commit history. To rebase your commits::
+when submitting a pull request as it maintains a more compact commit history. To rebase
+your commits::
git rebase -i HEAD~#
@@ -689,9 +678,10 @@ To squash to the master branch do::
git rebase -i master
Use the ``s`` option on a commit to ``squash``, meaning to keep the commit messages,
- or ``f`` to ``fixup``, meaning to merge the commit messages.
+or ``f`` to ``fixup``, meaning to merge the commit messages.
-Then you will need to push the branch (see below) forcefully to replace the current commits with the new ones::
+Then you will need to push the branch (see below) forcefully to replace the current
+commits with the new ones::
git push origin shiny-new-feature -f
@@ -700,17 +690,17 @@ Pushing your changes
--------------------
When you want your changes to appear publicly on your GitHub page, push your
-forked feature branch's commits ::
+forked feature branch's commits::
git push origin shiny-new-feature
-Here `origin` is the default name given to your remote repository on GitHub.
-You can see the remote repositories ::
+Here ``origin`` is the default name given to your remote repository on GitHub.
+You can see the remote repositories::
git remote -v
If you added the upstream repository as described above you will see something
-like ::
+like::
origin [email protected]:yourname/pandas.git (fetch)
origin [email protected]:yourname/pandas.git (push)
@@ -718,51 +708,54 @@ like ::
upstream git://github.com/pydata/pandas.git (push)
Now your code is on GitHub, but it is not yet a part of the *pandas* project. For that to
-happen, a Pull Request needs to be submitted on GitHub.
+happen, a pull request needs to be submitted on GitHub.
Review your code
----------------
-When you're ready to ask for a code review, you will file a Pull Request. Before you do,
-again make sure you've followed all the guidelines outlined in this document regarding
-code style, tests, performance tests, and documentation. You should also double check
-your branch changes against the branch it was based off of:
+When you're ready to ask for a code review, file a pull request. Before you do, once
+again make sure that you have followed all the guidelines outlined in this document
+regarding code style, tests, performance tests, and documentation. You should also
+double check your branch changes against the branch it was based on:
-#. Navigate to your repository on GitHub--https://github.com/your-user-name/pandas.
-#. Click on `Branches`.
-#. Click on the `Compare` button for your feature branch.
-#. Select the `base` and `compare` branches, if necessary. This will be `master` and
- `shiny-new-feature`, respectively.
+#. Navigate to your repository on GitHub -- https://github.com/your-user-name/pandas
+#. Click on ``Branches``
+#. Click on the ``Compare`` button for your feature branch
+#. Select the ``base`` and ``compare`` branches, if necessary. This will be ``master`` and
+ ``shiny-new-feature``, respectively.
-Finally, make the Pull Request
+Finally, make the pull request
------------------------------
-If everything looks good you are ready to make a Pull Request. A Pull Request is how
+If everything looks good, you are ready to make a pull request. A pull request is how
code from a local repository becomes available to the GitHub community and can be looked
-at and eventually merged into the master version. This Pull Request and its associated
+at and eventually merged into the master version. This pull request and its associated
changes will eventually be committed to the master branch and available in the next
-release. To submit a Pull Request:
+release. To submit a pull request:
-#. Navigate to your repository on GitHub.
-#. Click on the `Pull Request` button.
-#. You can then click on `Commits` and `Files Changed` to make sure everything looks okay one last time.
-#. Write a description of your changes in the `Preview Discussion` tab.
-#. Click `Send Pull Request`.
+#. Navigate to your repository on GitHub
+#. Click on the ``Pull Request`` button
+#. You can then click on ``Commits`` and ``Files Changed`` to make sure everything looks
+ okay one last time
+#. Write a description of your changes in the ``Preview Discussion`` tab
+#. Click ``Send Pull Request``.
-This request then appears to the repository maintainers, and they will review
+This request then goes to the repository maintainers, and they will review
the code. If you need to make more changes, you can make them in
your branch, push them to GitHub, and the pull request will be automatically
updated. Pushing them to GitHub again is done by::
git push -f origin shiny-new-feature
-This will automatically update your Pull Request with the latest code and restart the Travis-CI tests.
+This will automatically update your pull request with the latest code and restart the
+Travis-CI tests.
Delete your merged branch (optional)
------------------------------------
Once your feature branch is accepted into upstream, you'll probably want to get rid of
-the branch. First, merge upstream master into your branch so git knows it is safe to delete your branch ::
+the branch. First, merge upstream master into your branch so git knows it is safe to
+delete your branch::
git fetch upstream
git checkout master
@@ -772,9 +765,9 @@ Then you can just do::
git branch -d shiny-new-feature
-Make sure you use a lower-case -d, or else git won't warn you if your feature
+Make sure you use a lower-case ``-d``, or else git won't warn you if your feature
branch has not actually been merged.
-The branch will still exist on GitHub, so to delete it there do ::
+The branch will still exist on GitHub, so to delete it there do::
git push origin --delete shiny-new-feature
| Also fixed a dead link to the Nose testing docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11469 | 2015-10-29T10:57:32Z | 2015-11-09T22:22:10Z | 2015-11-09T22:22:10Z | 2015-11-10T09:54:56Z |
CLN: move routines for filling missing data from core/common.py to core/missing.py | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 9decd5e212cbf..e304684036766 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -10,6 +10,7 @@
from pandas.core.algorithms import factorize
from pandas.core.base import PandasObject, PandasDelegate
import pandas.core.common as com
+from pandas.core.missing import interpolate_2d
from pandas.util.decorators import cache_readonly, deprecate_kwarg
from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
@@ -1312,7 +1313,7 @@ def fillna(self, value=None, method=None, limit=None):
if method is not None:
values = self.to_dense().reshape(-1, len(self))
- values = com.interpolate_2d(
+ values = interpolate_2d(
values, method, 0, None, value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index d6aa6e6bb90cc..4490aaf58a002 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1420,350 +1420,6 @@ def _fill_zeros(result, x, y, name, fill):
return result
-def _interp_wrapper(f, wrap_dtype, na_override=None):
- def wrapper(arr, mask, limit=None):
- view = arr.view(wrap_dtype)
- f(view, mask, limit=limit)
- return wrapper
-
-
-_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
-_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
-_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
- np.int64)
-_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
- np.int64)
-
-
-def pad_1d(values, limit=None, mask=None, dtype=None):
-
- if dtype is None:
- dtype = values.dtype
- _method = None
- if is_float_dtype(values):
- _method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
- _method = _pad_1d_datetime
- elif is_integer_dtype(values):
- values = _ensure_float64(values)
- _method = algos.pad_inplace_float64
- elif values.dtype == np.object_:
- _method = algos.pad_inplace_object
-
- if _method is None:
- raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
-
- if mask is None:
- mask = isnull(values)
- mask = mask.view(np.uint8)
- _method(values, mask, limit=limit)
- return values
-
-
-def backfill_1d(values, limit=None, mask=None, dtype=None):
-
- if dtype is None:
- dtype = values.dtype
- _method = None
- if is_float_dtype(values):
- _method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
- _method = _backfill_1d_datetime
- elif is_integer_dtype(values):
- values = _ensure_float64(values)
- _method = algos.backfill_inplace_float64
- elif values.dtype == np.object_:
- _method = algos.backfill_inplace_object
-
- if _method is None:
- raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
-
- if mask is None:
- mask = isnull(values)
- mask = mask.view(np.uint8)
-
- _method(values, mask, limit=limit)
- return values
-
-
-def pad_2d(values, limit=None, mask=None, dtype=None):
-
- if dtype is None:
- dtype = values.dtype
- _method = None
- if is_float_dtype(values):
- _method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
- _method = _pad_2d_datetime
- elif is_integer_dtype(values):
- values = _ensure_float64(values)
- _method = algos.pad_2d_inplace_float64
- elif values.dtype == np.object_:
- _method = algos.pad_2d_inplace_object
-
- if _method is None:
- raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
-
- if mask is None:
- mask = isnull(values)
- mask = mask.view(np.uint8)
-
- if np.all(values.shape):
- _method(values, mask, limit=limit)
- else:
- # for test coverage
- pass
- return values
-
-
-def backfill_2d(values, limit=None, mask=None, dtype=None):
-
- if dtype is None:
- dtype = values.dtype
- _method = None
- if is_float_dtype(values):
- _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
- elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
- _method = _backfill_2d_datetime
- elif is_integer_dtype(values):
- values = _ensure_float64(values)
- _method = algos.backfill_2d_inplace_float64
- elif values.dtype == np.object_:
- _method = algos.backfill_2d_inplace_object
-
- if _method is None:
- raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
-
- if mask is None:
- mask = isnull(values)
- mask = mask.view(np.uint8)
-
- if np.all(values.shape):
- _method(values, mask, limit=limit)
- else:
- # for test coverage
- pass
- return values
-
-
-def _clean_interp_method(method, **kwargs):
- order = kwargs.get('order')
- valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
- 'quadratic', 'cubic', 'barycentric', 'polynomial',
- 'krogh', 'piecewise_polynomial',
- 'pchip', 'spline']
- if method in ('spline', 'polynomial') and order is None:
- raise ValueError("You must specify the order of the spline or "
- "polynomial.")
- if method not in valid:
- raise ValueError("method must be one of {0}."
- "Got '{1}' instead.".format(valid, method))
- return method
-
-
-def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
- limit_direction='forward',
- fill_value=None, bounds_error=False, order=None, **kwargs):
- """
- Logic for the 1-d interpolation. The result should be 1-d, inputs
- xvalues and yvalues will each be 1-d arrays of the same length.
-
- Bounds_error is currently hardcoded to False since non-scipy ones don't
- take it as an argumnet.
- """
- # Treat the original, non-scipy methods first.
-
- invalid = isnull(yvalues)
- valid = ~invalid
-
- if not valid.any():
- # have to call np.asarray(xvalues) since xvalues could be an Index
- # which cant be mutated
- result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
- result.fill(np.nan)
- return result
-
- if valid.all():
- return yvalues
-
- if method == 'time':
- if not getattr(xvalues, 'is_all_dates', None):
- # if not issubclass(xvalues.dtype.type, np.datetime64):
- raise ValueError('time-weighted interpolation only works '
- 'on Series or DataFrames with a '
- 'DatetimeIndex')
- method = 'values'
-
- def _interp_limit(invalid, fw_limit, bw_limit):
- "Get idx of values that won't be filled b/c they exceed the limits."
- for x in np.where(invalid)[0]:
- if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
- yield x
-
- valid_limit_directions = ['forward', 'backward', 'both']
- limit_direction = limit_direction.lower()
- if limit_direction not in valid_limit_directions:
- msg = 'Invalid limit_direction: expecting one of %r, got %r.' % (
- valid_limit_directions, limit_direction)
- raise ValueError(msg)
-
- from pandas import Series
- ys = Series(yvalues)
- start_nans = set(range(ys.first_valid_index()))
- end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
-
- # This is a list of the indexes in the series whose yvalue is currently NaN,
- # but whose interpolated yvalue will be overwritten with NaN after computing
- # the interpolation. For each index in this list, one of these conditions is
- # true of the corresponding NaN in the yvalues:
- #
- # a) It is one of a chain of NaNs at the beginning of the series, and either
- # limit is not specified or limit_direction is 'forward'.
- # b) It is one of a chain of NaNs at the end of the series, and limit is
- # specified and limit_direction is 'backward' or 'both'.
- # c) Limit is nonzero and it is further than limit from the nearest non-NaN
- # value (with respect to the limit_direction setting).
- #
- # The default behavior is to fill forward with no limit, ignoring NaNs at
- # the beginning (see issues #9218 and #10420)
- violate_limit = sorted(start_nans)
-
- if limit:
- if limit_direction == 'forward':
- violate_limit = sorted(start_nans | set(_interp_limit(invalid, limit, 0)))
- if limit_direction == 'backward':
- violate_limit = sorted(end_nans | set(_interp_limit(invalid, 0, limit)))
- if limit_direction == 'both':
- violate_limit = sorted(_interp_limit(invalid, limit, limit))
-
- xvalues = getattr(xvalues, 'values', xvalues)
- yvalues = getattr(yvalues, 'values', yvalues)
- result = yvalues.copy()
-
- if method in ['linear', 'time', 'index', 'values']:
- if method in ('values', 'index'):
- inds = np.asarray(xvalues)
- # hack for DatetimeIndex, #1646
- if issubclass(inds.dtype.type, np.datetime64):
- inds = inds.view(np.int64)
- if inds.dtype == np.object_:
- inds = lib.maybe_convert_objects(inds)
- else:
- inds = xvalues
- result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
- result[violate_limit] = np.nan
- return result
-
- sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
- 'barycentric', 'krogh', 'spline', 'polynomial',
- 'piecewise_polynomial', 'pchip']
- if method in sp_methods:
- inds = np.asarray(xvalues)
- # hack for DatetimeIndex, #1646
- if issubclass(inds.dtype.type, np.datetime64):
- inds = inds.view(np.int64)
- result[invalid] = _interpolate_scipy_wrapper(
- inds[valid], yvalues[valid], inds[invalid], method=method,
- fill_value=fill_value,
- bounds_error=bounds_error, order=order, **kwargs)
- result[violate_limit] = np.nan
- return result
-
-
-def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
- bounds_error=False, order=None, **kwargs):
- """
- passed off to scipy.interpolate.interp1d. method is scipy's kind.
- Returns an array interpolated at new_x. Add any new methods to
- the list in _clean_interp_method
- """
- try:
- from scipy import interpolate
- from pandas import DatetimeIndex
- except ImportError:
- raise ImportError('{0} interpolation requires Scipy'.format(method))
-
- new_x = np.asarray(new_x)
-
- # ignores some kwargs that could be passed along.
- alt_methods = {
- 'barycentric': interpolate.barycentric_interpolate,
- 'krogh': interpolate.krogh_interpolate,
- 'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
- }
-
- if getattr(x, 'is_all_dates', False):
- # GH 5975, scipy.interp1d can't hande datetime64s
- x, new_x = x._values.astype('i8'), new_x.astype('i8')
-
- try:
- alt_methods['pchip'] = interpolate.pchip_interpolate
- except AttributeError:
- if method == 'pchip':
- raise ImportError("Your version of scipy does not support "
- "PCHIP interpolation.")
-
- interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
- 'polynomial']
- if method in interp1d_methods:
- if method == 'polynomial':
- method = order
- terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
- bounds_error=bounds_error)
- new_y = terp(new_x)
- elif method == 'spline':
- # GH #10633
- if not order:
- raise ValueError("order needs to be specified and greater than 0")
- terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
- new_y = terp(new_x)
- else:
- # GH 7295: need to be able to write for some reason
- # in some circumstances: check all three
- if not x.flags.writeable:
- x = x.copy()
- if not y.flags.writeable:
- y = y.copy()
- if not new_x.flags.writeable:
- new_x = new_x.copy()
- method = alt_methods[method]
- new_y = method(x, y, new_x, **kwargs)
- return new_y
-
-
-def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
- """ perform an actual interpolation of values, values will be make 2-d if
- needed fills inplace, returns the result
- """
-
- transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
-
- # reshape a 1 dim if needed
- ndim = values.ndim
- if values.ndim == 1:
- if axis != 0: # pragma: no cover
- raise AssertionError("cannot interpolate on a ndim == 1 with "
- "axis != 0")
- values = values.reshape(tuple((1,) + values.shape))
-
- if fill_value is None:
- mask = None
- else: # todo create faster fill func without masking
- mask = mask_missing(transf(values), fill_value)
-
- method = _clean_fill_method(method)
- if method == 'pad':
- values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
- else:
- values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
-
- # reshape back
- if ndim == 1:
- values = values[0]
-
- return values
-
-
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
@@ -1772,14 +1428,6 @@ def _consensus_name_attr(objs):
return name
-_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
-
-
-def _get_fill_func(method):
- method = _clean_fill_method(method)
- return _fill_methods[method]
-
-
#----------------------------------------------------------------------
# Lots of little utilities
@@ -2815,31 +2463,6 @@ def _astype_nansafe(arr, dtype, copy=True):
return arr.view(dtype)
-def _clean_fill_method(method, allow_nearest=False):
- if method is None:
- return None
- method = method.lower()
- if method == 'ffill':
- method = 'pad'
- if method == 'bfill':
- method = 'backfill'
-
- valid_methods = ['pad', 'backfill']
- expecting = 'pad (ffill) or backfill (bfill)'
- if allow_nearest:
- valid_methods.append('nearest')
- expecting = 'pad (ffill), backfill (bfill) or nearest'
- if method not in valid_methods:
- msg = ('Invalid fill method. Expecting %s. Got %s'
- % (expecting, method))
- raise ValueError(msg)
- return method
-
-
-def _clean_reindex_fill_method(method):
- return _clean_fill_method(method, allow_nearest=True)
-
-
def _all_none(*args):
for arg in args:
if arg is not None:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c18f4ec0a1f47..f46296bb6f70c 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -16,6 +16,7 @@
from pandas.tseries.period import PeriodIndex
from pandas.core.internals import BlockManager
import pandas.core.common as com
+import pandas.core.missing as mis
import pandas.core.datetools as datetools
from pandas import compat
from pandas.compat import map, zip, lrange, string_types, isidentifier
@@ -50,7 +51,7 @@ def _single_replace(self, to_replace, method, inplace, limit):
orig_dtype = self.dtype
result = self if inplace else self.copy()
- fill_f = com._get_fill_func(method)
+ fill_f = mis._get_fill_func(method)
mask = com.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
@@ -1928,7 +1929,7 @@ def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
- method = com._clean_reindex_fill_method(kwargs.pop('method', None))
+ method = mis._clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
@@ -2041,7 +2042,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
- method = com._clean_reindex_fill_method(method)
+ method = mis._clean_reindex_fill_method(method)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers(
@@ -2775,7 +2776,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
if axis is None:
axis = 0
axis = self._get_axis_number(axis)
- method = com._clean_fill_method(method)
+ method = mis._clean_fill_method(method)
from pandas import DataFrame
if value is None:
@@ -2806,7 +2807,7 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
return self._constructor.from_dict(result).__finalize__(self)
# 2d or less
- method = com._clean_fill_method(method)
+ method = mis._clean_fill_method(method)
new_data = self._data.interpolate(method=method,
axis=axis,
limit=limit,
@@ -3749,7 +3750,7 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
- method = com._clean_fill_method(method)
+ method = mis._clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 003e444672d85..855e3f013bfd3 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -19,6 +19,7 @@
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
+from pandas.core.missing import _clean_reindex_fill_method
from pandas.core.common import (isnull, array_equivalent, is_dtype_equal, is_object_dtype,
is_datetimetz, ABCSeries, ABCCategorical, ABCPeriodIndex,
_values_from_object, is_float, is_integer, is_iterator, is_categorical_dtype,
@@ -1832,7 +1833,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
positions matches the corresponding target values. Missing values
in the target are marked by -1.
"""
- method = com._clean_reindex_fill_method(method)
+ method = _clean_reindex_fill_method(method)
target = _ensure_index(target)
if tolerance is not None:
tolerance = self._convert_tolerance(tolerance)
@@ -3347,7 +3348,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
-------
(indexer, mask) : (ndarray, ndarray)
"""
- method = com._clean_reindex_fill_method(method)
+ method = _clean_reindex_fill_method(method)
target = _ensure_index(target)
if isinstance(target, CategoricalIndex):
@@ -5131,7 +5132,7 @@ def get_indexer(self, target, method=None, limit=None, tolerance=None):
-------
(indexer, mask) : (ndarray, ndarray)
"""
- method = com._clean_reindex_fill_method(method)
+ method = _clean_reindex_fill_method(method)
target = _ensure_index(target)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index b3e7e82b5feb7..1b08140ebec09 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -25,6 +25,7 @@
from pandas.core.categorical import Categorical, maybe_to_categorical
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
+import pandas.core.missing as mis
import pandas.core.convert as convert
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
@@ -852,7 +853,7 @@ def check_int_bool(self, inplace):
# a fill na type method
try:
- m = com._clean_fill_method(method)
+ m = mis._clean_fill_method(method)
except:
m = None
@@ -870,7 +871,7 @@ def check_int_bool(self, inplace):
mgr=mgr)
# try an interp method
try:
- m = com._clean_interp_method(method, **kwargs)
+ m = mis._clean_interp_method(method, **kwargs)
except:
m = None
@@ -909,7 +910,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
- values = com.interpolate_2d(values,
+ values = mis.interpolate_2d(values,
method=method,
axis=axis,
limit=limit,
@@ -949,8 +950,8 @@ def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
- # i.e. not an arg to com.interpolate_1d
- return com.interpolate_1d(index, x, method=method, limit=limit,
+ # i.e. not an arg to mis.interpolate_1d
+ return mis.interpolate_1d(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
@@ -2357,7 +2358,7 @@ def make_block_same_class(self, values, placement,
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
- values = com.interpolate_2d(
+ values = mis.interpolate_2d(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
@@ -3773,7 +3774,7 @@ def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
# fill if needed
if method is not None or limit is not None:
- new_values = com.interpolate_2d(new_values, method=method,
+ new_values = mis.interpolate_2d(new_values, method=method,
limit=limit, fill_value=fill_value)
if self._block.is_sparse:
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
new file mode 100644
index 0000000000000..f1143ad808b91
--- /dev/null
+++ b/pandas/core/missing.py
@@ -0,0 +1,390 @@
+"""
+Routines for filling missing data
+"""
+
+from functools import partial
+
+import numpy as np
+
+import pandas as pd
+import pandas.core.common as com
+import pandas.algos as algos
+import pandas.lib as lib
+from pandas.compat import range
+
+
+def _clean_fill_method(method, allow_nearest=False):
+ if method is None:
+ return None
+ method = method.lower()
+ if method == 'ffill':
+ method = 'pad'
+ if method == 'bfill':
+ method = 'backfill'
+
+ valid_methods = ['pad', 'backfill']
+ expecting = 'pad (ffill) or backfill (bfill)'
+ if allow_nearest:
+ valid_methods.append('nearest')
+ expecting = 'pad (ffill), backfill (bfill) or nearest'
+ if method not in valid_methods:
+ msg = ('Invalid fill method. Expecting %s. Got %s'
+ % (expecting, method))
+ raise ValueError(msg)
+ return method
+
+
+def _clean_interp_method(method, **kwargs):
+ order = kwargs.get('order')
+ valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
+ 'quadratic', 'cubic', 'barycentric', 'polynomial',
+ 'krogh', 'piecewise_polynomial',
+ 'pchip', 'spline']
+ if method in ('spline', 'polynomial') and order is None:
+ raise ValueError("You must specify the order of the spline or "
+ "polynomial.")
+ if method not in valid:
+ raise ValueError("method must be one of {0}."
+ "Got '{1}' instead.".format(valid, method))
+ return method
+
+
+def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
+ limit_direction='forward',
+ fill_value=None, bounds_error=False, order=None, **kwargs):
+ """
+ Logic for the 1-d interpolation. The result should be 1-d, inputs
+ xvalues and yvalues will each be 1-d arrays of the same length.
+
+ Bounds_error is currently hardcoded to False since non-scipy ones don't
+ take it as an argumnet.
+ """
+ # Treat the original, non-scipy methods first.
+
+ invalid = com.isnull(yvalues)
+ valid = ~invalid
+
+ if not valid.any():
+ # have to call np.asarray(xvalues) since xvalues could be an Index
+ # which cant be mutated
+ result = np.empty_like(np.asarray(xvalues), dtype=np.float64)
+ result.fill(np.nan)
+ return result
+
+ if valid.all():
+ return yvalues
+
+ if method == 'time':
+ if not getattr(xvalues, 'is_all_dates', None):
+ # if not issubclass(xvalues.dtype.type, np.datetime64):
+ raise ValueError('time-weighted interpolation only works '
+ 'on Series or DataFrames with a '
+ 'DatetimeIndex')
+ method = 'values'
+
+ def _interp_limit(invalid, fw_limit, bw_limit):
+ "Get idx of values that won't be filled b/c they exceed the limits."
+ for x in np.where(invalid)[0]:
+ if invalid[max(0, x - fw_limit):x + bw_limit + 1].all():
+ yield x
+
+ valid_limit_directions = ['forward', 'backward', 'both']
+ limit_direction = limit_direction.lower()
+ if limit_direction not in valid_limit_directions:
+ msg = 'Invalid limit_direction: expecting one of %r, got %r.' % (
+ valid_limit_directions, limit_direction)
+ raise ValueError(msg)
+
+ from pandas import Series
+ ys = Series(yvalues)
+ start_nans = set(range(ys.first_valid_index()))
+ end_nans = set(range(1 + ys.last_valid_index(), len(valid)))
+
+ # This is a list of the indexes in the series whose yvalue is currently NaN,
+ # but whose interpolated yvalue will be overwritten with NaN after computing
+ # the interpolation. For each index in this list, one of these conditions is
+ # true of the corresponding NaN in the yvalues:
+ #
+ # a) It is one of a chain of NaNs at the beginning of the series, and either
+ # limit is not specified or limit_direction is 'forward'.
+ # b) It is one of a chain of NaNs at the end of the series, and limit is
+ # specified and limit_direction is 'backward' or 'both'.
+ # c) Limit is nonzero and it is further than limit from the nearest non-NaN
+ # value (with respect to the limit_direction setting).
+ #
+ # The default behavior is to fill forward with no limit, ignoring NaNs at
+ # the beginning (see issues #9218 and #10420)
+ violate_limit = sorted(start_nans)
+
+ if limit:
+ if limit_direction == 'forward':
+ violate_limit = sorted(start_nans | set(_interp_limit(invalid, limit, 0)))
+ if limit_direction == 'backward':
+ violate_limit = sorted(end_nans | set(_interp_limit(invalid, 0, limit)))
+ if limit_direction == 'both':
+ violate_limit = sorted(_interp_limit(invalid, limit, limit))
+
+ xvalues = getattr(xvalues, 'values', xvalues)
+ yvalues = getattr(yvalues, 'values', yvalues)
+ result = yvalues.copy()
+
+ if method in ['linear', 'time', 'index', 'values']:
+ if method in ('values', 'index'):
+ inds = np.asarray(xvalues)
+ # hack for DatetimeIndex, #1646
+ if issubclass(inds.dtype.type, np.datetime64):
+ inds = inds.view(np.int64)
+ if inds.dtype == np.object_:
+ inds = lib.maybe_convert_objects(inds)
+ else:
+ inds = xvalues
+ result[invalid] = np.interp(inds[invalid], inds[valid], yvalues[valid])
+ result[violate_limit] = np.nan
+ return result
+
+ sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
+ 'barycentric', 'krogh', 'spline', 'polynomial',
+ 'piecewise_polynomial', 'pchip']
+ if method in sp_methods:
+ inds = np.asarray(xvalues)
+ # hack for DatetimeIndex, #1646
+ if issubclass(inds.dtype.type, np.datetime64):
+ inds = inds.view(np.int64)
+ result[invalid] = _interpolate_scipy_wrapper(
+ inds[valid], yvalues[valid], inds[invalid], method=method,
+ fill_value=fill_value,
+ bounds_error=bounds_error, order=order, **kwargs)
+ result[violate_limit] = np.nan
+ return result
+
+
+def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
+ bounds_error=False, order=None, **kwargs):
+ """
+ passed off to scipy.interpolate.interp1d. method is scipy's kind.
+ Returns an array interpolated at new_x. Add any new methods to
+ the list in _clean_interp_method
+ """
+ try:
+ from scipy import interpolate
+ from pandas import DatetimeIndex
+ except ImportError:
+ raise ImportError('{0} interpolation requires Scipy'.format(method))
+
+ new_x = np.asarray(new_x)
+
+ # ignores some kwargs that could be passed along.
+ alt_methods = {
+ 'barycentric': interpolate.barycentric_interpolate,
+ 'krogh': interpolate.krogh_interpolate,
+ 'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
+ }
+
+ if getattr(x, 'is_all_dates', False):
+ # GH 5975, scipy.interp1d can't hande datetime64s
+ x, new_x = x._values.astype('i8'), new_x.astype('i8')
+
+ try:
+ alt_methods['pchip'] = interpolate.pchip_interpolate
+ except AttributeError:
+ if method == 'pchip':
+ raise ImportError("Your version of scipy does not support "
+ "PCHIP interpolation.")
+
+ interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
+ 'polynomial']
+ if method in interp1d_methods:
+ if method == 'polynomial':
+ method = order
+ terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
+ bounds_error=bounds_error)
+ new_y = terp(new_x)
+ elif method == 'spline':
+ # GH #10633
+ if not order:
+ raise ValueError("order needs to be specified and greater than 0")
+ terp = interpolate.UnivariateSpline(x, y, k=order, **kwargs)
+ new_y = terp(new_x)
+ else:
+ # GH 7295: need to be able to write for some reason
+ # in some circumstances: check all three
+ if not x.flags.writeable:
+ x = x.copy()
+ if not y.flags.writeable:
+ y = y.copy()
+ if not new_x.flags.writeable:
+ new_x = new_x.copy()
+ method = alt_methods[method]
+ new_y = method(x, y, new_x, **kwargs)
+ return new_y
+
+
+def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
+ """ perform an actual interpolation of values, values will be make 2-d if
+ needed fills inplace, returns the result
+ """
+
+ transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
+
+ # reshape a 1 dim if needed
+ ndim = values.ndim
+ if values.ndim == 1:
+ if axis != 0: # pragma: no cover
+ raise AssertionError("cannot interpolate on a ndim == 1 with "
+ "axis != 0")
+ values = values.reshape(tuple((1,) + values.shape))
+
+ if fill_value is None:
+ mask = None
+ else: # todo create faster fill func without masking
+ mask = com.mask_missing(transf(values), fill_value)
+
+ method = _clean_fill_method(method)
+ if method == 'pad':
+ values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
+ else:
+ values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
+
+ # reshape back
+ if ndim == 1:
+ values = values[0]
+
+ return values
+
+
+def _interp_wrapper(f, wrap_dtype, na_override=None):
+ def wrapper(arr, mask, limit=None):
+ view = arr.view(wrap_dtype)
+ f(view, mask, limit=limit)
+ return wrapper
+
+
+_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
+_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
+_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
+ np.int64)
+_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
+ np.int64)
+
+
+def pad_1d(values, limit=None, mask=None, dtype=None):
+
+ if dtype is None:
+ dtype = values.dtype
+ _method = None
+ if com.is_float_dtype(values):
+ _method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
+ elif dtype in com._DATELIKE_DTYPES or com.is_datetime64_dtype(values):
+ _method = _pad_1d_datetime
+ elif com.is_integer_dtype(values):
+ values = com._ensure_float64(values)
+ _method = algos.pad_inplace_float64
+ elif values.dtype == np.object_:
+ _method = algos.pad_inplace_object
+
+ if _method is None:
+ raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
+
+ if mask is None:
+ mask = com.isnull(values)
+ mask = mask.view(np.uint8)
+ _method(values, mask, limit=limit)
+ return values
+
+
+def backfill_1d(values, limit=None, mask=None, dtype=None):
+
+ if dtype is None:
+ dtype = values.dtype
+ _method = None
+ if com.is_float_dtype(values):
+ _method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
+ elif dtype in com._DATELIKE_DTYPES or com.is_datetime64_dtype(values):
+ _method = _backfill_1d_datetime
+ elif com.is_integer_dtype(values):
+ values = com._ensure_float64(values)
+ _method = algos.backfill_inplace_float64
+ elif values.dtype == np.object_:
+ _method = algos.backfill_inplace_object
+
+ if _method is None:
+ raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
+
+ if mask is None:
+ mask = com.isnull(values)
+ mask = mask.view(np.uint8)
+
+ _method(values, mask, limit=limit)
+ return values
+
+
+def pad_2d(values, limit=None, mask=None, dtype=None):
+
+ if dtype is None:
+ dtype = values.dtype
+ _method = None
+ if com.is_float_dtype(values):
+ _method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
+ elif dtype in com._DATELIKE_DTYPES or com.is_datetime64_dtype(values):
+ _method = _pad_2d_datetime
+ elif com.is_integer_dtype(values):
+ values = com._ensure_float64(values)
+ _method = algos.pad_2d_inplace_float64
+ elif values.dtype == np.object_:
+ _method = algos.pad_2d_inplace_object
+
+ if _method is None:
+ raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
+
+ if mask is None:
+ mask = com.isnull(values)
+ mask = mask.view(np.uint8)
+
+ if np.all(values.shape):
+ _method(values, mask, limit=limit)
+ else:
+ # for test coverage
+ pass
+ return values
+
+
+def backfill_2d(values, limit=None, mask=None, dtype=None):
+
+ if dtype is None:
+ dtype = values.dtype
+ _method = None
+ if com.is_float_dtype(values):
+ _method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
+ elif dtype in com._DATELIKE_DTYPES or com.is_datetime64_dtype(values):
+ _method = _backfill_2d_datetime
+ elif com.is_integer_dtype(values):
+ values = com._ensure_float64(values)
+ _method = algos.backfill_2d_inplace_float64
+ elif values.dtype == np.object_:
+ _method = algos.backfill_2d_inplace_object
+
+ if _method is None:
+ raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
+
+ if mask is None:
+ mask = com.isnull(values)
+ mask = mask.view(np.uint8)
+
+ if np.all(values.shape):
+ _method(values, mask, limit=limit)
+ else:
+ # for test coverage
+ pass
+ return values
+
+
+_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
+
+
+def _get_fill_func(method):
+ method = _clean_fill_method(method)
+ return _fill_methods[method]
+
+
+def _clean_reindex_fill_method(method):
+ return _clean_fill_method(method, allow_nearest=True)
| This PR moves the routines used for filling missing data into their own module as suggested in PR #11445.
Thoughts on re-naming `interpolate_1d()` and `interpolate_2d()`? They way they are currently named makes them sound like they're the same functions for arrays of differing dimensions. However, `interpolate_1d()` is used for interpolating with scipy interp methods whereas `interpolate_2d()` is used for forward- or back-filling.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11462 | 2015-10-28T18:54:11Z | 2015-10-28T21:49:00Z | 2015-10-28T21:49:00Z | 2015-10-28T21:56:56Z |
allow rename to work with any Mapping, not just dict | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index c18f4ec0a1f47..52605ac8e41b1 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -4,6 +4,8 @@
import weakref
import gc
+from collections.abc import Mapping
+
import numpy as np
import pandas.lib as lib
@@ -580,7 +582,7 @@ def rename(self, *args, **kwargs):
# renamer function if passed a dict
def _get_rename_function(mapper):
- if isinstance(mapper, (dict, ABCSeries)):
+ if isinstance(mapper, (Mapping, ABCSeries)):
def f(x):
if x in mapper:
return mapper[x]
| http://pandas.pydata.org/pandas-docs/stable/generated/pandas.DataFrame.rename.html claims to work with a dict-like object, but according to https://github.com/jab/bidict/issues/18#issuecomment-147643550 it does not. This patch changes `isinstance(..., dict)` to `isinstance(..., collections.abc.Mapping)` as a first pass attempt to bring the code more in line with the documentation.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11461 | 2015-10-28T18:07:17Z | 2015-11-25T18:40:32Z | null | 2019-08-24T18:14:07Z |
implemented fix for groupby date bug, #11324 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 84db16e338d87..3b3c250862b2e 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -117,3 +117,5 @@ Bug Fixes
- Bug in ``to_excel`` with openpyxl 2.2+ and merging (:issue:`11408`)
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
+
+- Bug in ``pandas.core.groupby`` raises exception when ``func`` in ``df.groupby(...).apply(func)`` doesn't return existing time columns (:issue:`11324`)
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index add5080a69ee4..9c5a40f6e34d6 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -3124,6 +3124,7 @@ def _wrap_applied_output(self, keys, values, not_indexed_same=False):
result = result._convert(numeric=True)
date_cols = self._selected_obj.select_dtypes(
include=list(_DATELIKE_DTYPES)).columns
+ date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 46026a4c887a6..9649288ab5b6d 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -824,6 +824,30 @@ def test_apply_issues(self):
result = df.groupby('date').apply(lambda x: x['time'][x['value'].idxmax()])
assert_series_equal(result, expected)
+ def test_time_field_bug(self):
+ # Test a fix for the following error related to GH issue 11324
+ # When non-key fields in a group-by dataframe contained time-based fields that
+ # were not returned by the apply function, an exception would be raised.
+
+ df = pd.DataFrame({'a': 1,'b': [datetime.now() for nn in range(10)]})
+
+ def func_with_no_date(batch):
+ return pd.Series({'c': 2})
+
+ def func_with_date(batch):
+ return pd.Series({'c': 2, 'b': datetime(2015, 1, 1)})
+
+ dfg_no_conversion = df.groupby(by=['a']).apply(func_with_no_date)
+ dfg_no_conversion_expected = pd.DataFrame({'c': 2}, index=[1])
+ dfg_no_conversion_expected.index.name = 'a'
+
+ dfg_conversion = df.groupby(by=['a']).apply(func_with_date)
+ dfg_conversion_expected = pd.DataFrame({'b': datetime(2015, 1, 1), 'c': 2}, index=[1])
+ dfg_conversion_expected.index.name = 'a'
+
+ self.assert_frame_equal(dfg_no_conversion, dfg_no_conversion_expected)
+ self.assert_frame_equal(dfg_conversion, dfg_conversion_expected)
+
def test_len(self):
df = tm.makeTimeDataFrame()
grouped = df.groupby([lambda x: x.year,
| This is a fix for issue #11324 in which grouping when datetime fields are involved raises an exceptoin
| https://api.github.com/repos/pandas-dev/pandas/pulls/11460 | 2015-10-28T17:04:13Z | 2015-11-13T21:48:21Z | null | 2015-11-13T21:48:21Z |
ENH: tilde expansion for write functions, #11438 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 84db16e338d87..3a4781f1d022d 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -1,23 +1,56 @@
.. _whatsnew_0171:
-v0.17.1 (November ??, 2015)
+v0.17.1 (November 21, 2015)
---------------------------
-This is a minor bug-fix release from 0.17.0 and includes a a large number of
+This is a minor bug-fix release from 0.17.0 and includes a large number of
bug fixes along several new features, enhancements, and performance improvements.
We recommend that all users upgrade to this version.
Highlights include:
+- Support for Conditional HTML Formatting, see :ref:`here <whatsnew_0171.style>`
+- Releasing the GIL on the csv reader & other ops, see :ref:`here <whatsnew_0171.performance>`
+- Regression in ``DataFrame.drop_duplicates`` from 0.16.2, causing incorrect results on integer values (:issue:`11376`)
+
.. contents:: What's new in v0.17.1
:local:
:backlinks: none
+New features
+~~~~~~~~~~~~
+
+.. _whatsnew_0171.style:
+
+Conditional HTML Formatting
+^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+We've added *experimental* support for conditional HTML formatting:
+the visual styling of a DataFrame based on the data.
+The styling is accomplished with HTML and CSS.
+Acesses the styler class with :attr:`pandas.DataFrame.style`, attribute,
+an instance of :class:`~pandas.core.style.Styler` with your data attached.
+
+Here's a quick example:
+
+ .. ipython:: python
+
+ np.random.seed(123)
+ df = DataFrame(np.random.randn(10, 5), columns=list('abcde'))
+ html = df.style.background_gradient(cmap='viridis', low=.5)
+
+We can render the HTML to get the following table.
+
+.. raw:: html
+ :file: whatsnew_0171_html_table.html
+
+See the :ref:`example notebook <style>` for more.
+
.. _whatsnew_0171.enhancements:
Enhancements
~~~~~~~~~~~~
-- ``DatetimeIndex`` now supports conversion to strings with astype(str) (:issue:`10442`)
+- ``DatetimeIndex`` now supports conversion to strings with ``astype(str)`` (:issue:`10442`)
- Support for ``compression`` (gzip/bz2) in :meth:`pandas.DataFrame.to_csv` (:issue:`7615`)
- Improve the error message in :func:`pandas.io.gbq.to_gbq` when a streaming insert fails (:issue:`11285`)
@@ -25,12 +58,48 @@ Enhancements
objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
- ``DataFrame`` now uses the fields of a ``namedtuple`` as columns, if columns are not supplied (:issue:`11181`)
- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
+- Added ``axvlines_kwds`` to parallel coordinates plot (:issue:`10709`)
+
+- Option to ``.info()`` and ``.memory_usage()`` to provide for deep introspection of memory consumption. Note that this can be expensive to compute and therefore is an optional parameter. (:issue:`11595`)
+
+ .. ipython:: python
+
+ df = DataFrame({'A' : ['foo']*1000})
+ df['B'] = df['A'].astype('category')
+
+ # shows the '+' as we have object dtypes
+ df.info()
+
+ # we have an accurate memory assessment (but can be expensive to compute this)
+ df.info(memory_usage='deep')
+
+- ``Index`` now has a ``fillna`` method (:issue:`10089`)
+
+ .. ipython:: python
+
+ pd.Index([1, np.nan, 3]).fillna(2)
+
+- Series of type ``"category"`` now make ``.str.<...>`` and ``.dt.<...>`` accessor methods / properties available, if the categories are of that type. (:issue:`10661`)
+
+ .. ipython:: python
+
+ s = pd.Series(list('aabb')).astype('category')
+ s
+ s.str.contains("a")
+
+ date = pd.Series(pd.date_range('1/1/2015', periods=5)).astype('category')
+ date
+ date.dt.day
+
+- ``pivot_table`` now has a ``margins_name`` argument so you can use something other than the default of 'All' (:issue:`3335`)
+- Implement export of ``datetime64[ns, tz]`` dtypes with a fixed HDF5 store (:issue:`11411`)
+- The ``DataFrame`` and ``Series`` functions ``.to_csv()``, ``.to_html()`` and ``.to_latex()`` can now handle paths beginning with tildes (e.g. ``~/Documents/``)
.. _whatsnew_0171.api:
API changes
~~~~~~~~~~~
-
+- raise ``NotImplementedError`` in ``Index.shift`` for non-supported index types (:issue:`8083`)
- min and max reductions on ``datetime64`` and ``timedelta64`` dtyped series now
result in ``NaT`` and not ``nan`` (:issue:`11245`).
- Regression from 0.16.2 for output formatting of long floats/nan, restored in (:issue:`11302`)
@@ -38,7 +107,10 @@ API changes
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
- Indexing with a null key will raise a ``TypeError``, instead of a ``ValueError`` (:issue:`11356`)
- ``Series.sort_index()`` now correctly handles the ``inplace`` option (:issue:`11402`)
-- ``DataFrame.itertuples()`` now returns ``namedtuple`` objects, when possible. (:issue:`11269`)
+- ``SparseArray.__iter__()`` now does not cause ``PendingDeprecationWarning`` in Python 3.5 (:issue:`11622`)
+
+- ``DataFrame.itertuples()`` now returns ``namedtuple`` objects, when possible. (:issue:`11269`, :issue:`11625`)
+- ``Series.ptp`` will now ignore missing values by default (:issue:`11163`)
.. _whatsnew_0171.deprecations:
@@ -46,7 +118,7 @@ Deprecations
^^^^^^^^^^^^
- The ``pandas.io.ga`` module which implements ``google-analytics`` support is deprecated and will be removed in a future version (:issue:`11308`)
-- Deprecate the ``engine`` keyword from ``.to_csv()``, which will be removed in a future version (:issue:`11274`)
+- Deprecate the ``engine`` keyword in ``.to_csv()``, which will be removed in a future version (:issue:`11274`)
.. _whatsnew_0171.performance:
@@ -56,64 +128,67 @@ Performance Improvements
- Checking monotonic-ness before sorting on an index (:issue:`11080`)
- ``Series.dropna`` performance improvement when its dtype can't contain ``NaN`` (:issue:`11159`)
-
-
- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
-
-
+- Release the GIL on some rolling algos (``rolling_median``, ``rolling_mean``, ``rolling_max``, ``rolling_min``, ``rolling_var``, ``rolling_kurt``, ``rolling_skew`` (:issue:`11450`)
+- Release the GIL when reading and parsing text files in ``read_csv``, ``read_table`` (:issue:`11272`)
+- Improved performance of ``rolling_median`` (:issue:`11450`)
- Improved performance to ``to_excel`` (:issue:`11352`)
+- Performance bug in repr of ``Categorical`` categories, which was rendering the strings before chopping them for display (:issue:`11305`)
+- Performance improvement in ``Categorical.remove_unused_categories``, (:issue:`11643`).
+- Improved performance of ``Series`` constructor with no data and ``DatetimeIndex`` (:issue:`11433`)
+
+- Improved performance of ``shift``, ``cumprod``, and ``cumsum`` with groupby (:issue:`4095`)
.. _whatsnew_0171.bug_fixes:
+
Bug Fixes
~~~~~~~~~
-- Bug in ``.to_latex()`` output broken when the index has a name (:issue: `10660`)
+- Incorrectly distributed .c file in the build on ``PyPi`` when reading a csv of floats and passing ``na_values=<a scalar>`` would show an exception (:issue:`11374`)
+- Bug in ``.to_latex()`` output broken when the index has a name (:issue:`10660`)
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
- Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`)
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-- Bug in using ``DataFrame.ix`` with a multi-index indexer(:issue:`11372`)
-
-
+- Bug in using ``DataFrame.ix`` with a multi-index indexer (:issue:`11372`)
+- Bug in ``date_range`` with ambigous endpoints (:issue:`11626`)
+- Prevent adding new attributes to the accessors ``.str``, ``.dt`` and ``.cat``. Retrieving such
+ a value was not possible, so error out on setting it. (:issue:`10673`)
- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
+- Bug in output formatting when using an index of ambiguous times (:issue:`11619`)
- Bug in comparisons of Series vs list-likes (:issue:`11339`)
-
-
- Bug in ``DataFrame.replace`` with a ``datetime64[ns, tz]`` and a non-compat to_replace (:issue:`11326`, :issue:`11153`)
-
- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
-
- Bug in ``pivot_table`` with ``margins=True`` when indexes are of ``Categorical`` dtype (:issue:`10993`)
- Bug in ``DataFrame.plot`` cannot use hex strings colors (:issue:`10299`)
-
-- Bug in ``DataFrame.drop_duplicates`` (regression from 0.16.2) causing some non-duplicate rows containing integer values to be dropped (:issue:`11376`)
-
-
+- Regression in ``DataFrame.drop_duplicates`` from 0.16.2, causing incorrect results on integer values (:issue:`11376`)
- Bug in ``pd.eval`` where unary ops in a list error (:issue:`11235`)
- Bug in ``squeeze()`` with zero length arrays (:issue:`11230`, :issue:`8999`)
+- Bug in ``describe()`` dropping column names for hierarchical indexes (:issue:`11517`)
+- Bug in ``DataFrame.pct_change()`` not propagating ``axis`` keyword on ``.fillna`` method (:issue:`11150`)
-
-
-
-
-
-
+- Bug in ``to_sql`` using unicode column names giving UnicodeEncodeError with (:issue:`11431`).
+- Fix regression in setting of ``xticks`` in ``plot`` (:issue:`11529`).
+- Bug in ``holiday.dates`` where observance rules could not be applied to holiday and doc enhancement (:issue:`11477`, :issue:`11533`)
+- Fix plotting issues when having plain ``Axes`` instances instead of ``SubplotAxes`` (:issue:`11520`, :issue:`11556`).
- Bug in ``DataFrame.to_latex()`` produces an extra rule when ``header=False`` (:issue:`7124`)
-
-
+- Bug in ``df.groupby(...).apply(func)`` when a func returns a ``Series`` containing a new datetimelike column (:issue:`11324`)
- Bug in ``pandas.json`` when file to load is big (:issue:`11344`)
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
-
-- Fixed a bug that prevented the construction of an empty series of dtype
- ``datetime64[ns, tz]`` (:issue:`11245`).
-
+- Fixed a bug that prevented the construction of an empty series of dtype ``datetime64[ns, tz]`` (:issue:`11245`).
- Bug in ``read_excel`` with multi-index containing integers (:issue:`11317`)
-
- Bug in ``to_excel`` with openpyxl 2.2+ and merging (:issue:`11408`)
-
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
+- Bug in ``DataFrame.corr()`` raises exception when computes Kendall correlation for DataFrames with boolean and not boolean columns (:issue:`11560`)
+- Bug in the link-time error caused by C ``inline`` functions on FreeBSD 10+ (with ``clang``) (:issue:`10510`)
+- Bug in ``DataFrame.to_csv`` in passing through arguments for formatting ``MultiIndexes``, including ``date_format`` (:issue:`7791`)
+- Bug in ``DataFrame.join()`` with ``how='right'`` producing a ``TypeError`` (:issue:`11519`)
+- Bug in ``Series.quantile`` with empty list results has ``Index`` with ``object`` dtype (:issue:`11588`)
+- Bug in ``pd.merge`` results in empty ``Int64Index`` rather than ``Index(dtype=object)`` when the merge result is empty (:issue:`11588`)
+- Bug in ``Categorical.remove_unused_categories`` when having ``NaN`` values (:issue:`11599`)
+- Bug in ``DataFrame.to_sparse()`` loses column names for MultiIndexes (:issue:`11600`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b06f1b947bbe7..8617be1a244d7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -53,7 +53,7 @@
from pandas.tseries.period import PeriodIndex
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.tdi import TimedeltaIndex
-
+from pandas.io.common import _expand_user
import pandas.core.algorithms as algos
import pandas.core.base as base
@@ -1302,7 +1302,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
.. versionadded:: 0.16.0
"""
-
+ path_or_buf = _expand_user(path_or_buf)
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator,
sep=sep, encoding=encoding,
@@ -1505,6 +1505,7 @@ def to_html(self, buf=None, columns=None, col_space=None, colSpace=None,
FutureWarning, stacklevel=2)
col_space = colSpace
+ buf = _expand_user(buf)
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
formatters=formatters,
@@ -1554,6 +1555,7 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
FutureWarning, stacklevel=2)
col_space = colSpace
+ buf = _expand_user(buf)
formatter = fmt.DataFrameFormatter(self, buf=buf, columns=columns,
col_space=col_space, na_rep=na_rep,
header=header, index=index,
| closes #11438
Added calls to `_expand_user()` in multiple Formatter classes so that writer functions, such as `to_csv()` and `to_html()`, can perform tilde expansion on file paths beginning `~/`. This makes the writer functions consistent with the reader functions, such as `read_csv()`. I also changed the `__init__()` for `CSVFormatter` to conform to the pep8 width limit.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11458 | 2015-10-28T15:55:59Z | 2015-11-19T20:26:35Z | null | 2015-11-23T17:26:10Z |
CLN/PERF: remove used functions; use C skip list for rolling median | diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index eeca2d54381b2..fdeace108f76e 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -366,3 +366,73 @@ def time_period_to_datetime(self):
def run(period):
period.to_timestamp()
run(self.period)
+
+
+class nogil_rolling_algos_slow(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.win = 100
+ np.random.seed(1234)
+ self.arr = np.random.rand(100000)
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+
+ def time_nogil_rolling_median(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_median(arr, win)
+ run(self.arr, self.win)
+
+
+class nogil_rolling_algos_fast(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.win = 100
+ np.random.seed(1234)
+ self.arr = np.random.rand(1000000)
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+
+ def time_nogil_rolling_mean(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_mean(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_min(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_min(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_max(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_max(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_var(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_var(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_skew(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_skew(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_kurt(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_kurt(arr, win)
+ run(self.arr, self.win)
+
+ def time_nogil_rolling_std(self):
+ @test_parallel(num_threads=2)
+ def run(arr, win):
+ rolling_std(arr, win)
+ run(self.arr, self.win)
\ No newline at end of file
diff --git a/asv_bench/benchmarks/stat_ops.py b/asv_bench/benchmarks/stat_ops.py
index 4125357455d2e..daf5135e64c40 100644
--- a/asv_bench/benchmarks/stat_ops.py
+++ b/asv_bench/benchmarks/stat_ops.py
@@ -231,6 +231,31 @@ class stats_rolling_mean(object):
def setup(self):
self.arr = np.random.randn(100000)
+ self.win = 100
- def time_stats_rolling_mean(self):
- rolling_mean(self.arr, 100)
\ No newline at end of file
+ def time_rolling_mean(self):
+ rolling_mean(self.arr, self.win)
+
+ def time_rolling_median(self):
+ rolling_median(self.arr, self.win)
+
+ def time_rolling_min(self):
+ rolling_min(self.arr, self.win)
+
+ def time_rolling_max(self):
+ rolling_max(self.arr, self.win)
+
+ def time_rolling_sum(self):
+ rolling_sum(self.arr, self.win)
+
+ def time_rolling_std(self):
+ rolling_std(self.arr, self.win)
+
+ def time_rolling_var(self):
+ rolling_var(self.arr, self.win)
+
+ def time_rolling_skew(self):
+ rolling_skew(self.arr, self.win)
+
+ def time_rolling_kurt(self):
+ rolling_kurt(self.arr, self.win)
\ No newline at end of file
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 84db16e338d87..55df24bb8ab58 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -59,7 +59,8 @@ Performance Improvements
- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
-
+- Release the GIL on some srolling algos (``rolling_median``, ``rolling_mean``, ``rolling_max``, ``rolling_min``, ``rolling_var``, ``rolling_kurt``, `rolling_skew`` (:issue:`11450`)
+- Improved performance of ``rolling_median`` (:issue:`11450`)
- Improved performance to ``to_excel`` (:issue:`11352`)
diff --git a/pandas/algos.pyx b/pandas/algos.pyx
index 44b1996272356..8569209f2e946 100644
--- a/pandas/algos.pyx
+++ b/pandas/algos.pyx
@@ -56,9 +56,9 @@ cdef inline int int_min(int a, int b): return a if a <= b else b
cdef extern from "src/headers/math.h":
- double sqrt(double x)
- double fabs(double)
- int signbit(double)
+ double sqrt(double x) nogil
+ double fabs(double) nogil
+ int signbit(double) nogil
from pandas import lib
@@ -864,7 +864,8 @@ def min_subseq(ndarray[double_t] arr):
#-------------------------------------------------------------------------------
# Rolling sum
-
[email protected](False)
[email protected](False)
def roll_sum(ndarray[double_t] input, int win, int minp):
cdef double val, prev, sum_x = 0
cdef int nobs = 0, i
@@ -873,40 +874,41 @@ def roll_sum(ndarray[double_t] input, int win, int minp):
cdef ndarray[double_t] output = np.empty(N, dtype=float)
minp = _check_minp(win, minp, N)
+ with nogil:
+ for i from 0 <= i < minp - 1:
+ val = input[i]
- for i from 0 <= i < minp - 1:
- val = input[i]
+ # Not NaN
+ if val == val:
+ nobs += 1
+ sum_x += val
- # Not NaN
- if val == val:
- nobs += 1
- sum_x += val
+ output[i] = NaN
- output[i] = NaN
+ for i from minp - 1 <= i < N:
+ val = input[i]
- for i from minp - 1 <= i < N:
- val = input[i]
-
- if val == val:
- nobs += 1
- sum_x += val
+ if val == val:
+ nobs += 1
+ sum_x += val
- if i > win - 1:
- prev = input[i - win]
- if prev == prev:
- sum_x -= prev
- nobs -= 1
+ if i > win - 1:
+ prev = input[i - win]
+ if prev == prev:
+ sum_x -= prev
+ nobs -= 1
- if nobs >= minp:
- output[i] = sum_x
- else:
- output[i] = NaN
+ if nobs >= minp:
+ output[i] = sum_x
+ else:
+ output[i] = NaN
return output
#-------------------------------------------------------------------------------
# Rolling mean
-
[email protected](False)
[email protected](False)
def roll_mean(ndarray[double_t] input,
int win, int minp):
cdef:
@@ -916,48 +918,48 @@ def roll_mean(ndarray[double_t] input,
cdef ndarray[double_t] output = np.empty(N, dtype=float)
minp = _check_minp(win, minp, N)
+ with nogil:
+ for i from 0 <= i < minp - 1:
+ val = input[i]
- for i from 0 <= i < minp - 1:
- val = input[i]
-
- # Not NaN
- if val == val:
- nobs += 1
- sum_x += val
- if signbit(val):
- neg_ct += 1
-
- output[i] = NaN
-
- for i from minp - 1 <= i < N:
- val = input[i]
+ # Not NaN
+ if val == val:
+ nobs += 1
+ sum_x += val
+ if signbit(val):
+ neg_ct += 1
- if val == val:
- nobs += 1
- sum_x += val
- if signbit(val):
- neg_ct += 1
+ output[i] = NaN
- if i > win - 1:
- prev = input[i - win]
- if prev == prev:
- sum_x -= prev
- nobs -= 1
- if signbit(prev):
- neg_ct -= 1
+ for i from minp - 1 <= i < N:
+ val = input[i]
- if nobs >= minp:
- result = sum_x / nobs
- if neg_ct == 0 and result < 0:
- # all positive
- output[i] = 0
- elif neg_ct == nobs and result > 0:
- # all negative
- output[i] = 0
+ if val == val:
+ nobs += 1
+ sum_x += val
+ if signbit(val):
+ neg_ct += 1
+
+ if i > win - 1:
+ prev = input[i - win]
+ if prev == prev:
+ sum_x -= prev
+ nobs -= 1
+ if signbit(prev):
+ neg_ct -= 1
+
+ if nobs >= minp:
+ result = sum_x / nobs
+ if neg_ct == 0 and result < 0:
+ # all positive
+ output[i] = 0
+ elif neg_ct == nobs and result > 0:
+ # all negative
+ output[i] = 0
+ else:
+ output[i] = result
else:
- output[i] = result
- else:
- output[i] = NaN
+ output[i] = NaN
return output
@@ -1242,6 +1244,8 @@ def nancorr_spearman(ndarray[float64_t, ndim=2] mat, Py_ssize_t minp=1):
#----------------------------------------------------------------------
# Rolling variance
[email protected](False)
[email protected](False)
def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1):
"""
Numerically stable implementation using Welford's method.
@@ -1257,80 +1261,82 @@ def roll_var(ndarray[double_t] input, int win, int minp, int ddof=1):
# Check for windows larger than array, addresses #7297
win = min(win, N)
- # Over the first window, observations can only be added, never removed
- for i from 0 <= i < win:
- val = input[i]
-
- # Not NaN
- if val == val:
- nobs += 1
- delta = (val - mean_x)
- mean_x += delta / nobs
- ssqdm_x += delta * (val - mean_x)
-
- if (nobs >= minp) and (nobs > ddof):
- #pathological case
- if nobs == 1:
- val = 0
- else:
- val = ssqdm_x / (nobs - ddof)
- if val < 0:
- val = 0
- else:
- val = NaN
-
- output[i] = val
-
- # After the first window, observations can both be added and removed
- for i from win <= i < N:
- val = input[i]
- prev = input[i - win]
+ with nogil:
+ # Over the first window, observations can only be added, never removed
+ for i from 0 <= i < win:
+ val = input[i]
- if val == val:
- if prev == prev:
- # Adding one observation and removing another one
- delta = val - prev
- prev -= mean_x
- mean_x += delta / nobs
- val -= mean_x
- ssqdm_x += (val + prev) * delta
- else:
- # Adding one observation and not removing any
+ # Not NaN
+ if val == val:
nobs += 1
delta = (val - mean_x)
mean_x += delta / nobs
ssqdm_x += delta * (val - mean_x)
- elif prev == prev:
- # Adding no new observation, but removing one
- nobs -= 1
- if nobs:
- delta = (prev - mean_x)
- mean_x -= delta / nobs
- ssqdm_x -= delta * (prev - mean_x)
- else:
- mean_x = 0
- ssqdm_x = 0
- # Variance is unchanged if no observation is added or removed
-
- if (nobs >= minp) and (nobs > ddof):
- #pathological case
- if nobs == 1:
- val = 0
+
+ if (nobs >= minp) and (nobs > ddof):
+ #pathological case
+ if nobs == 1:
+ val = 0
+ else:
+ val = ssqdm_x / (nobs - ddof)
+ if val < 0:
+ val = 0
else:
- val = ssqdm_x / (nobs - ddof)
- if val < 0:
+ val = NaN
+
+ output[i] = val
+
+ # After the first window, observations can both be added and removed
+ for i from win <= i < N:
+ val = input[i]
+ prev = input[i - win]
+
+ if val == val:
+ if prev == prev:
+ # Adding one observation and removing another one
+ delta = val - prev
+ prev -= mean_x
+ mean_x += delta / nobs
+ val -= mean_x
+ ssqdm_x += (val + prev) * delta
+ else:
+ # Adding one observation and not removing any
+ nobs += 1
+ delta = (val - mean_x)
+ mean_x += delta / nobs
+ ssqdm_x += delta * (val - mean_x)
+ elif prev == prev:
+ # Adding no new observation, but removing one
+ nobs -= 1
+ if nobs:
+ delta = (prev - mean_x)
+ mean_x -= delta / nobs
+ ssqdm_x -= delta * (prev - mean_x)
+ else:
+ mean_x = 0
+ ssqdm_x = 0
+ # Variance is unchanged if no observation is added or removed
+
+ if (nobs >= minp) and (nobs > ddof):
+ #pathological case
+ if nobs == 1:
val = 0
- else:
- val = NaN
+ else:
+ val = ssqdm_x / (nobs - ddof)
+ if val < 0:
+ val = 0
+ else:
+ val = NaN
- output[i] = val
+ output[i] = val
return output
#-------------------------------------------------------------------------------
# Rolling skewness
-
[email protected](False)
[email protected](False)
def roll_skew(ndarray[double_t] input, int win, int minp):
cdef double val, prev
cdef double x = 0, xx = 0, xxx = 0
@@ -1343,55 +1349,55 @@ def roll_skew(ndarray[double_t] input, int win, int minp):
cdef double A, B, C, R
minp = _check_minp(win, minp, N)
+ with nogil:
+ for i from 0 <= i < minp - 1:
+ val = input[i]
- for i from 0 <= i < minp - 1:
- val = input[i]
-
- # Not NaN
- if val == val:
- nobs += 1
- x += val
- xx += val * val
- xxx += val * val * val
-
- output[i] = NaN
-
- for i from minp - 1 <= i < N:
- val = input[i]
+ # Not NaN
+ if val == val:
+ nobs += 1
+ x += val
+ xx += val * val
+ xxx += val * val * val
- if val == val:
- nobs += 1
- x += val
- xx += val * val
- xxx += val * val * val
+ output[i] = NaN
- if i > win - 1:
- prev = input[i - win]
- if prev == prev:
- x -= prev
- xx -= prev * prev
- xxx -= prev * prev * prev
+ for i from minp - 1 <= i < N:
+ val = input[i]
- nobs -= 1
- if nobs >= minp:
- A = x / nobs
- B = xx / nobs - A * A
- C = xxx / nobs - A * A * A - 3 * A * B
- if B <= 0 or nobs < 3:
- output[i] = NaN
+ if val == val:
+ nobs += 1
+ x += val
+ xx += val * val
+ xxx += val * val * val
+
+ if i > win - 1:
+ prev = input[i - win]
+ if prev == prev:
+ x -= prev
+ xx -= prev * prev
+ xxx -= prev * prev * prev
+
+ nobs -= 1
+ if nobs >= minp:
+ A = x / nobs
+ B = xx / nobs - A * A
+ C = xxx / nobs - A * A * A - 3 * A * B
+ if B <= 0 or nobs < 3:
+ output[i] = NaN
+ else:
+ R = sqrt(B)
+ output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
+ ((nobs-2) * R * R * R))
else:
- R = sqrt(B)
- output[i] = ((sqrt(nobs * (nobs - 1.)) * C) /
- ((nobs-2) * R * R * R))
- else:
- output[i] = NaN
+ output[i] = NaN
return output
#-------------------------------------------------------------------------------
# Rolling kurtosis
-
-
[email protected](False)
[email protected](False)
def roll_kurt(ndarray[double_t] input,
int win, int minp):
cdef double val, prev
@@ -1405,62 +1411,62 @@ def roll_kurt(ndarray[double_t] input,
cdef double A, B, C, D, R, K
minp = _check_minp(win, minp, N)
+ with nogil:
+ for i from 0 <= i < minp - 1:
+ val = input[i]
- for i from 0 <= i < minp - 1:
- val = input[i]
-
- # Not NaN
- if val == val:
- nobs += 1
-
- # seriously don't ask me why this is faster
- x += val
- xx += val * val
- xxx += val * val * val
- xxxx += val * val * val * val
+ # Not NaN
+ if val == val:
+ nobs += 1
- output[i] = NaN
+ # seriously don't ask me why this is faster
+ x += val
+ xx += val * val
+ xxx += val * val * val
+ xxxx += val * val * val * val
- for i from minp - 1 <= i < N:
- val = input[i]
+ output[i] = NaN
- if val == val:
- nobs += 1
- x += val
- xx += val * val
- xxx += val * val * val
- xxxx += val * val * val * val
+ for i from minp - 1 <= i < N:
+ val = input[i]
- if i > win - 1:
- prev = input[i - win]
- if prev == prev:
- x -= prev
- xx -= prev * prev
- xxx -= prev * prev * prev
- xxxx -= prev * prev * prev * prev
+ if val == val:
+ nobs += 1
+ x += val
+ xx += val * val
+ xxx += val * val * val
+ xxxx += val * val * val * val
+
+ if i > win - 1:
+ prev = input[i - win]
+ if prev == prev:
+ x -= prev
+ xx -= prev * prev
+ xxx -= prev * prev * prev
+ xxxx -= prev * prev * prev * prev
+
+ nobs -= 1
+
+ if nobs >= minp:
+ A = x / nobs
+ R = A * A
+ B = xx / nobs - R
+ R = R * A
+ C = xxx / nobs - R - 3 * A * B
+ R = R * A
+ D = xxxx / nobs - R - 6*B*A*A - 4*C*A
+
+ if B == 0 or nobs < 4:
+ output[i] = NaN
- nobs -= 1
+ else:
+ K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
+ K = K / ((nobs - 2.)*(nobs-3.))
- if nobs >= minp:
- A = x / nobs
- R = A * A
- B = xx / nobs - R
- R = R * A
- C = xxx / nobs - R - 3 * A * B
- R = R * A
- D = xxxx / nobs - R - 6*B*A*A - 4*C*A
-
- if B == 0 or nobs < 4:
- output[i] = NaN
+ output[i] = K
else:
- K = (nobs * nobs - 1.)*D/(B*B) - 3*((nobs-1.)**2)
- K = K / ((nobs - 2.)*(nobs-3.))
-
- output[i] = K
-
- else:
- output[i] = NaN
+ output[i] = NaN
return output
@@ -1512,9 +1518,13 @@ cdef _roll_skiplist_op(ndarray arg, int win, int minp, skiplist_f op):
from skiplist cimport *
+
[email protected](False)
[email protected](False)
def roll_median_c(ndarray[float64_t] arg, int win, int minp):
- cdef double val, res, prev
cdef:
+ double val, res, prev
+ bint err=0
int ret=0
skiplist_t *sl
Py_ssize_t midpoint, nobs = 0, i
@@ -1524,71 +1534,59 @@ def roll_median_c(ndarray[float64_t] arg, int win, int minp):
cdef ndarray[double_t] output = np.empty(N, dtype=float)
sl = skiplist_init(win)
+ if sl == NULL:
+ raise MemoryError("skiplist_init failed")
minp = _check_minp(win, minp, N)
- for i from 0 <= i < minp - 1:
- val = arg[i]
-
- # Not NaN
- if val == val:
- nobs += 1
- skiplist_insert(sl, val)
-
- output[i] = NaN
+ with nogil:
+ for i from 0 <= i < minp - 1:
+ val = arg[i]
- for i from minp - 1 <= i < N:
- val = arg[i]
+ # Not NaN
+ if val == val:
+ nobs += 1
+ err = skiplist_insert(sl, val) != 1
+ if err:
+ break
+ output[i] = NaN
- if i > win - 1:
- prev = arg[i - win]
+ with nogil:
+ if not err:
+ for i from minp - 1 <= i < N:
- if prev == prev:
- skiplist_remove(sl, prev)
- nobs -= 1
+ val = arg[i]
- if val == val:
- nobs += 1
- skiplist_insert(sl, val)
+ if i > win - 1:
+ prev = arg[i - win]
- if nobs >= minp:
- midpoint = nobs / 2
- if nobs % 2:
- res = skiplist_get(sl, midpoint, &ret)
- else:
- res = (skiplist_get(sl, midpoint, &ret) +
- skiplist_get(sl, (midpoint - 1), &ret)) / 2
- else:
- res = NaN
+ if prev == prev:
+ skiplist_remove(sl, prev)
+ nobs -= 1
- output[i] = res
+ if val == val:
+ nobs += 1
+ err = skiplist_insert(sl, val) != 1
+ if err:
+ break
+
+ if nobs >= minp:
+ midpoint = nobs / 2
+ if nobs % 2:
+ res = skiplist_get(sl, midpoint, &ret)
+ else:
+ res = (skiplist_get(sl, midpoint, &ret) +
+ skiplist_get(sl, (midpoint - 1), &ret)) / 2
+ else:
+ res = NaN
- skiplist_destroy(sl)
+ output[i] = res
+ skiplist_destroy(sl)
+ if err:
+ raise MemoryError("skiplist_insert failed")
return output
-def roll_median_cython(ndarray input, int win, int minp):
- '''
- O(N log(window)) implementation using skip list
- '''
- return _roll_skiplist_op(input, win, minp, _get_median)
-
-# Unfortunately had to resort to some hackery here, would like for
-# Cython to be able to get this right.
-
-cdef double_t _get_median(object sl, int nobs, int minp):
- cdef Py_ssize_t midpoint
- cdef IndexableSkiplist skiplist = <IndexableSkiplist> sl
- if nobs >= minp:
- midpoint = nobs / 2
- if nobs % 2:
- return skiplist.get(midpoint)
- else:
- return (skiplist.get(midpoint) +
- skiplist.get(midpoint - 1)) / 2
- else:
- return NaN
-
#----------------------------------------------------------------------
# Moving maximum / minimum code taken from Bottleneck under the terms
@@ -1603,7 +1601,7 @@ from libc cimport stdlib
@cython.boundscheck(False)
@cython.wraparound(False)
-def roll_max2(ndarray[float64_t] a, int window, int minp):
+def roll_max(ndarray[float64_t] a, int window, int minp):
"Moving max of 1d array of dtype=float64 along axis=0 ignoring NaNs."
cdef np.float64_t ai, aold
cdef Py_ssize_t count
@@ -1617,7 +1615,7 @@ def roll_max2(ndarray[float64_t] a, int window, int minp):
cdef Py_ssize_t n0 = dim[0]
cdef np.npy_intp *dims = [n0]
cdef np.ndarray[np.float64_t, ndim=1] y = PyArray_EMPTY(1, dims,
- NPY_float64, 0)
+ NPY_float64, 0)
if window < 1:
raise ValueError('Invalid window size %d'
@@ -1628,65 +1626,59 @@ def roll_max2(ndarray[float64_t] a, int window, int minp):
% (minp, window))
minp = _check_minp(window, minp, n0)
+ with nogil:
+ ring = <pairs*>stdlib.malloc(window * sizeof(pairs))
+ end = ring + window
+ last = ring
- ring = <pairs*>stdlib.malloc(window * sizeof(pairs))
- end = ring + window
- last = ring
-
- minpair = ring
- ai = a[0]
- if ai == ai:
- minpair.value = ai
- else:
- minpair.value = MINfloat64
- minpair.death = window
-
- count = 0
- for i0 in range(n0):
- ai = a[i0]
+ minpair = ring
+ ai = a[0]
if ai == ai:
- count += 1
- else:
- ai = MINfloat64
- if i0 >= window:
- aold = a[i0 - window]
- if aold == aold:
- count -= 1
- if minpair.death == i0:
- minpair += 1
- if minpair >= end:
- minpair = ring
- if ai >= minpair.value:
minpair.value = ai
- minpair.death = i0 + window
- last = minpair
- else:
- while last.value <= ai:
- if last == ring:
- last = end
- last -= 1
- last += 1
- if last == end:
- last = ring
- last.value = ai
- last.death = i0 + window
- if count >= minp:
- y[i0] = minpair.value
else:
- y[i0] = NaN
+ minpair.value = MINfloat64
+ minpair.death = window
+
+ count = 0
+ for i0 in range(n0):
+ ai = a[i0]
+ if ai == ai:
+ count += 1
+ else:
+ ai = MINfloat64
+ if i0 >= window:
+ aold = a[i0 - window]
+ if aold == aold:
+ count -= 1
+ if minpair.death == i0:
+ minpair += 1
+ if minpair >= end:
+ minpair = ring
+ if ai >= minpair.value:
+ minpair.value = ai
+ minpair.death = i0 + window
+ last = minpair
+ else:
+ while last.value <= ai:
+ if last == ring:
+ last = end
+ last -= 1
+ last += 1
+ if last == end:
+ last = ring
+ last.value = ai
+ last.death = i0 + window
+ if count >= minp:
+ y[i0] = minpair.value
+ else:
+ y[i0] = NaN
- for i0 in range(minp - 1):
- y[i0] = NaN
+ for i0 in range(minp - 1):
+ y[i0] = NaN
- stdlib.free(ring)
+ stdlib.free(ring)
return y
-def roll_max(ndarray input, int win, int minp):
- '''
- O(N log(window)) implementation using skip list
- '''
- return _roll_skiplist_op(input, win, minp, _get_max)
-
cdef double_t _get_max(object skiplist, int nobs, int minp):
if nobs >= minp:
@@ -1694,15 +1686,10 @@ cdef double_t _get_max(object skiplist, int nobs, int minp):
else:
return NaN
-def roll_min(ndarray input, int win, int minp):
- '''
- O(N log(window)) implementation using skip list
- '''
- return _roll_skiplist_op(input, win, minp, _get_min)
@cython.boundscheck(False)
@cython.wraparound(False)
-def roll_min2(np.ndarray[np.float64_t, ndim=1] a, int window, int minp):
+def roll_min(np.ndarray[np.float64_t, ndim=1] a, int window, int minp):
"Moving min of 1d array of dtype=float64 along axis=0 ignoring NaNs."
cdef np.float64_t ai, aold
cdef Py_ssize_t count
@@ -1716,7 +1703,7 @@ def roll_min2(np.ndarray[np.float64_t, ndim=1] a, int window, int minp):
cdef Py_ssize_t n0 = dim[0]
cdef np.npy_intp *dims = [n0]
cdef np.ndarray[np.float64_t, ndim=1] y = PyArray_EMPTY(1, dims,
- NPY_float64, 0)
+ NPY_float64, 0)
if window < 1:
raise ValueError('Invalid window size %d'
@@ -1727,57 +1714,57 @@ def roll_min2(np.ndarray[np.float64_t, ndim=1] a, int window, int minp):
% (minp, window))
minp = _check_minp(window, minp, n0)
+ with nogil:
+ ring = <pairs*>stdlib.malloc(window * sizeof(pairs))
+ end = ring + window
+ last = ring
- ring = <pairs*>stdlib.malloc(window * sizeof(pairs))
- end = ring + window
- last = ring
-
- minpair = ring
- ai = a[0]
- if ai == ai:
- minpair.value = ai
- else:
- minpair.value = MAXfloat64
- minpair.death = window
-
- count = 0
- for i0 in range(n0):
- ai = a[i0]
+ minpair = ring
+ ai = a[0]
if ai == ai:
- count += 1
- else:
- ai = MAXfloat64
- if i0 >= window:
- aold = a[i0 - window]
- if aold == aold:
- count -= 1
- if minpair.death == i0:
- minpair += 1
- if minpair >= end:
- minpair = ring
- if ai <= minpair.value:
minpair.value = ai
- minpair.death = i0 + window
- last = minpair
- else:
- while last.value >= ai:
- if last == ring:
- last = end
- last -= 1
- last += 1
- if last == end:
- last = ring
- last.value = ai
- last.death = i0 + window
- if count >= minp:
- y[i0] = minpair.value
else:
- y[i0] = NaN
+ minpair.value = MAXfloat64
+ minpair.death = window
+
+ count = 0
+ for i0 in range(n0):
+ ai = a[i0]
+ if ai == ai:
+ count += 1
+ else:
+ ai = MAXfloat64
+ if i0 >= window:
+ aold = a[i0 - window]
+ if aold == aold:
+ count -= 1
+ if minpair.death == i0:
+ minpair += 1
+ if minpair >= end:
+ minpair = ring
+ if ai <= minpair.value:
+ minpair.value = ai
+ minpair.death = i0 + window
+ last = minpair
+ else:
+ while last.value >= ai:
+ if last == ring:
+ last = end
+ last -= 1
+ last += 1
+ if last == end:
+ last = ring
+ last.value = ai
+ last.death = i0 + window
+ if count >= minp:
+ y[i0] = minpair.value
+ else:
+ y[i0] = NaN
- for i0 in range(minp - 1):
- y[i0] = NaN
+ for i0 in range(minp - 1):
+ y[i0] = NaN
- stdlib.free(ring)
+ stdlib.free(ring)
return y
cdef double_t _get_min(object skiplist, int nobs, int minp):
diff --git a/pandas/src/skiplist.h b/pandas/src/skiplist.h
index 57b32005021b9..c7117f16c9496 100644
--- a/pandas/src/skiplist.h
+++ b/pandas/src/skiplist.h
@@ -43,19 +43,20 @@ static PANDAS_INLINE double Log2(double val) {
typedef struct node_t node_t;
struct node_t {
+ node_t **next;
+ int *width;
double value;
int is_nil;
int levels;
- node_t **next;
- int *width;
int ref_count;
};
typedef struct {
node_t *head;
- int size, maxlevels;
node_t **tmp_chain;
int *tmp_steps;
+ int size;
+ int maxlevels;
} skiplist_t;
static PANDAS_INLINE double urand(void) {
@@ -68,33 +69,37 @@ static PANDAS_INLINE int int_min(int a, int b) {
static PANDAS_INLINE node_t *node_init(double value, int levels) {
node_t *result;
- result = (node_t*) calloc(1, sizeof(node_t));
-
- result->value = value;
- result->levels = levels;
- result->is_nil = 0;
- result->ref_count = 0;
-
- result->next = (node_t**) malloc(levels * sizeof(node_t*));
- result->width = (int*) malloc(levels * sizeof(int));
-
+ result = (node_t*) malloc(sizeof(node_t));
+ if (result) {
+ result->value = value;
+ result->levels = levels;
+ result->is_nil = 0;
+ result->ref_count = 0;
+ result->next = (node_t**) malloc(levels * sizeof(node_t*));
+ result->width = (int*) malloc(levels * sizeof(int));
+ if (!(result->next && result->width) && (levels != 0)) {
+ free(result->next);
+ free(result->width);
+ free(result);
+ return NULL;
+ }
+ }
return result;
}
// do this ourselves
-
static PANDAS_INLINE void node_incref(node_t *node) {
- node->ref_count += 1;
+ ++(node->ref_count);
}
static PANDAS_INLINE void node_decref(node_t *node) {
- node->ref_count -= 1;
+ --(node->ref_count);
}
static void node_destroy(node_t *node) {
int i;
if (node) {
- if (node->ref_count == 1) {
+ if (node->ref_count <= 1) {
for (i = 0; i < node->levels; ++i) {
node_destroy(node->next[i]);
}
@@ -110,21 +115,41 @@ static void node_destroy(node_t *node) {
}
}
+static PANDAS_INLINE void skiplist_destroy(skiplist_t *skp) {
+ if (skp) {
+ node_destroy(skp->head);
+ free(skp->tmp_steps);
+ free(skp->tmp_chain);
+ free(skp);
+ }
+}
+
static PANDAS_INLINE skiplist_t *skiplist_init(int expected_size) {
skiplist_t *result;
node_t *NIL, *head;
int maxlevels, i;
- maxlevels = Log2((double) expected_size);
- result = (skiplist_t*) calloc(1, sizeof(skiplist_t));
+ maxlevels = 1 + Log2((double) expected_size);
+ result = (skiplist_t*) malloc(sizeof(skiplist_t));
+ if (!result) {
+ return NULL;
+ }
result->tmp_chain = (node_t**) malloc(maxlevels * sizeof(node_t*));
result->tmp_steps = (int*) malloc(maxlevels * sizeof(int));
result->maxlevels = maxlevels;
+ result->size = 0;
head = result->head = node_init(PANDAS_NAN, maxlevels);
+ NIL = node_init(0.0, 0);
+
+ if (!(result->tmp_chain && result->tmp_steps && result->head && NIL)) {
+ skiplist_destroy(result);
+ node_destroy(NIL);
+ return NULL;
+ }
+
node_incref(head);
- NIL = node_init(0, 0);
NIL->is_nil = 1;
for (i = 0; i < maxlevels; ++i)
@@ -137,18 +162,7 @@ static PANDAS_INLINE skiplist_t *skiplist_init(int expected_size) {
return result;
}
-static PANDAS_INLINE void skiplist_destroy(skiplist_t *skp) {
- if (skp) {
- node_destroy(skp->head);
- free(skp->tmp_steps);
- free(skp->tmp_chain);
- free(skp);
- }
-}
-
-
// 1 if left < right, 0 if left == right, -1 if left > right
-
static PANDAS_INLINE int _node_cmp(node_t* node, double value){
if (node->is_nil || node->value > value) {
return -1;
@@ -171,12 +185,12 @@ static PANDAS_INLINE double skiplist_get(skiplist_t *skp, int i, int *ret) {
}
node = skp->head;
- i++;
+ ++i;
for (level = skp->maxlevels - 1; level >= 0; --level)
{
while (node->width[level] <= i)
{
- i = i - node->width[level];
+ i -= node->width[level];
node = node->next[level];
}
}
@@ -212,6 +226,9 @@ static PANDAS_INLINE int skiplist_insert(skiplist_t *skp, double value) {
size = int_min(skp->maxlevels, 1 - ((int) Log2(urand())));
newnode = node_init(value, size);
+ if (!newnode) {
+ return -1;
+ }
steps = 0;
for (level = 0; level < size; ++level) {
@@ -231,7 +248,7 @@ static PANDAS_INLINE int skiplist_insert(skiplist_t *skp, double value) {
chain[level]->width[level] += 1;
}
- skp->size++;
+ ++(skp->size);
return 1;
}
@@ -273,9 +290,9 @@ static PANDAS_INLINE int skiplist_remove(skiplist_t *skp, double value) {
}
for (level = size; level < skp->maxlevels; ++level) {
- chain[level]->width[level] -= 1;
+ --(chain[level]->width[level]);
}
- skp->size--;
+ --(skp->size);
return 1;
}
diff --git a/pandas/src/skiplist.pxd b/pandas/src/skiplist.pxd
index c1221c4741c7b..69e9df5b542aa 100644
--- a/pandas/src/skiplist.pxd
+++ b/pandas/src/skiplist.pxd
@@ -1,21 +1,22 @@
cdef extern from "skiplist.h":
ctypedef struct node_t:
+ node_t **next
+ int *width
double value
int is_nil
int levels
- node_t **next
- int *width
int ref_count
ctypedef struct skiplist_t:
node_t *head
- int size, maxlevels
node_t **tmp_chain
int *tmp_steps
+ int size
+ int maxlevels
- inline skiplist_t* skiplist_init(int)
- inline void skiplist_destroy(skiplist_t*)
- inline double skiplist_get(skiplist_t*, int, int*)
- inline int skiplist_insert(skiplist_t*, double)
- inline int skiplist_remove(skiplist_t*, double)
+ inline skiplist_t* skiplist_init(int) nogil
+ inline void skiplist_destroy(skiplist_t*) nogil
+ inline double skiplist_get(skiplist_t*, int, int*) nogil
+ inline int skiplist_insert(skiplist_t*, double) nogil
+ inline int skiplist_remove(skiplist_t*, double) nogil
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index c4791c43278b9..3894dc3b02499 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -646,11 +646,11 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
return f
-rolling_max = _rolling_func(algos.roll_max2, 'Moving maximum.', how='max')
-rolling_min = _rolling_func(algos.roll_min2, 'Moving minimum.', how='min')
+rolling_max = _rolling_func(algos.roll_max, 'Moving maximum.', how='max')
+rolling_min = _rolling_func(algos.roll_min, 'Moving minimum.', how='min')
rolling_sum = _rolling_func(algos.roll_sum, 'Moving sum.')
rolling_mean = _rolling_func(algos.roll_mean, 'Moving mean.')
-rolling_median = _rolling_func(algos.roll_median_cython, 'Moving median.',
+rolling_median = _rolling_func(algos.roll_median_c, 'Moving median.',
how='median')
_ts_std = lambda *a, **kw: _zsqrt(algos.roll_var(*a, **kw))
@@ -888,11 +888,11 @@ def call_cython(arg, window, minp, args=(), kwargs={}, **kwds):
return f
-expanding_max = _expanding_func(algos.roll_max2, 'Expanding maximum.')
-expanding_min = _expanding_func(algos.roll_min2, 'Expanding minimum.')
+expanding_max = _expanding_func(algos.roll_max, 'Expanding maximum.')
+expanding_min = _expanding_func(algos.roll_min, 'Expanding minimum.')
expanding_sum = _expanding_func(algos.roll_sum, 'Expanding sum.')
expanding_mean = _expanding_func(algos.roll_mean, 'Expanding mean.')
-expanding_median = _expanding_func(algos.roll_median_cython, 'Expanding median.')
+expanding_median = _expanding_func(algos.roll_median_c, 'Expanding median.')
expanding_std = _expanding_func(_ts_std, 'Expanding standard deviation.',
check_minp=_require_min_periods(1),
| removes some unused code
reverts this commit https://github.com/pydata/pandas/commit/a40226e5c4c3d29e5e2383433c5f0b94c3c0ecc1
performance consideration
```
import pandas
import numpy
arr = numpy.random.rand(1000000)
%timeit pandas.rolling_median(arr, 1000)
```
master
```
1 loops, best of 3: 4.94 s per loop
```
branch
```
1 loops, best of 3: 821 ms per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11450 | 2015-10-28T00:13:33Z | 2015-11-02T11:44:57Z | 2015-11-02T11:44:57Z | 2015-11-02T11:45:02Z |
DOC: Improvements in panel apply docstring | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index da0ab7bc59440..f05e5a8357877 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -3,33 +3,33 @@
"""
# pylint: disable=E1103,W0231,W0212,W0621
from __future__ import division
-from pandas.compat import (map, zip, range, lrange, lmap, u, OrderedDict,
- OrderedDefaultdict)
-from pandas import compat
+
import warnings
+
import numpy as np
-from pandas.core.common import (PandasError, _try_sort, _default_index,
- _infer_dtype_from_scalar, notnull, is_list_like)
+
+import pandas.computation.expressions as expressions
+import pandas.core.common as com
+import pandas.core.ops as ops
+from pandas import compat
+from pandas import lib
+from pandas.compat import (map, zip, range, u, OrderedDict,
+ OrderedDefaultdict)
from pandas.core.categorical import Categorical
+from pandas.core.common import (PandasError, _try_sort, _default_index,
+ _infer_dtype_from_scalar, is_list_like)
+from pandas.core.frame import DataFrame
+from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.index import (Index, MultiIndex, _ensure_index,
_get_combined_index)
from pandas.core.indexing import maybe_droplevels
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays,
create_block_manager_from_blocks)
+from pandas.core.ops import _op_descriptions
from pandas.core.series import Series
-from pandas.core.frame import DataFrame
-from pandas.core.generic import NDFrame, _shared_docs
from pandas.tools.util import cartesian_product
-from pandas import compat
-from pandas.util.decorators import (deprecate, Appender, Substitution,
- deprecate_kwarg)
-import pandas.core.common as com
-import pandas.core.ops as ops
-import pandas.computation.expressions as expressions
-from pandas import lib
-from pandas.core.ops import _op_descriptions
-
+from pandas.util.decorators import (deprecate, Appender, deprecate_kwarg)
_shared_doc_kwargs = dict(
axes='items, major_axis, minor_axis',
@@ -105,7 +105,6 @@ def panel_index(time, panels, names=['time', 'panel']):
class Panel(NDFrame):
-
"""
Represents wide format panel data, stored as 3-dimensional array
@@ -149,7 +148,7 @@ def _init_data(self, data, copy, dtype, **kwargs):
if kwargs:
raise TypeError('_init_data() got an unexpected keyword '
- 'argument "{0}"'.format(list(kwargs.keys())[0]))
+ 'argument "{0}"'.format(list(kwargs.keys())[0]))
axes = None
if isinstance(data, BlockManager):
@@ -307,7 +306,7 @@ def _init_matrix(self, data, axes, dtype=None, copy=False):
return create_block_manager_from_blocks([values], fixed_axes)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Comparison methods
def _compare_constructor(self, other, func):
@@ -322,7 +321,7 @@ def _compare_constructor(self, other, func):
d = self._construct_axes_dict(copy=False)
return self._constructor(data=new_data, **d)
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Magic methods
def __unicode__(self):
@@ -378,7 +377,7 @@ def _get_plane_axes(self, axis):
(as compared with higher level planes),
as we are returning a DataFrame axes
"""
- return [ self._get_axis(axi) for axi in self._get_plane_axes_index(axis) ]
+ return [self._get_axis(axi) for axi in self._get_plane_axes_index(axis)]
fromDict = from_dict
@@ -458,7 +457,7 @@ def as_matrix(self):
self._consolidate_inplace()
return self._data.as_matrix()
- #----------------------------------------------------------------------
+ # ----------------------------------------------------------------------
# Getting and setting elements
def get_value(self, *args, **kwargs):
@@ -488,7 +487,7 @@ def get_value(self, *args, **kwargs):
if kwargs:
raise TypeError('get_value() got an unexpected keyword '
- 'argument "{0}"'.format(list(kwargs.keys())[0]))
+ 'argument "{0}"'.format(list(kwargs.keys())[0]))
if takeable is True:
lower = self._iget_item_cache(args[0])
@@ -527,7 +526,7 @@ def set_value(self, *args, **kwargs):
if kwargs:
raise TypeError('set_value() got an unexpected keyword '
- 'argument "{0}"'.format(list(kwargs.keys())[0]))
+ 'argument "{0}"'.format(list(kwargs.keys())[0]))
try:
if takeable is True:
@@ -681,8 +680,8 @@ def _combine(self, other, func, axis=0):
return self._combine_const(other, func)
else:
raise NotImplementedError(str(type(other)) +
- ' is not supported in combine operation with ' +
- str(type(self)))
+ ' is not supported in combine operation with ' +
+ str(type(self)))
def _combine_const(self, other, func):
new_values = func(self.values, other)
@@ -944,27 +943,41 @@ def construct_index_parts(idx, major=True):
def apply(self, func, axis='major', **kwargs):
"""
- Applies function along input axis of the Panel
+ Applies function along axis (or axes) of the Panel
Parameters
----------
func : function
Function to apply to each combination of 'other' axes
- e.g. if axis = 'items', then the combination of major_axis/minor_axis
- will be passed a Series
- axis : {'major', 'minor', 'items'}
+ e.g. if axis = 'items', the combination of major_axis/minor_axis
+ will each be passed as a Series; if axis = ('items', 'major'), DataFrames
+ of items & major axis will be passed
+ axis : {'items', 'minor', 'major'}, or {0, 1, 2}, or a tuple with two axes
Additional keyword arguments will be passed as keywords to the function
Examples
--------
- >>> p.apply(numpy.sqrt) # returns a Panel
- >>> p.apply(lambda x: x.sum(), axis=0) # equiv to p.sum(0)
- >>> p.apply(lambda x: x.sum(), axis=1) # equiv to p.sum(1)
- >>> p.apply(lambda x: x.sum(), axis=2) # equiv to p.sum(2)
+
+ Returns a Panel with the square root of each element
+
+ >>> p = pd.Panel(np.random.rand(4,3,2))
+ >>> p.apply(np.sqrt)
+
+ Equivalent to p.sum(1), returning a DataFrame
+
+ >>> p.apply(lambda x: x.sum(), axis=1)
+
+ Equivalent to previous:
+
+ >>> p.apply(lambda x: x.sum(), axis='minor')
+
+ Return the shapes of each DataFrame over axis 2 (i.e the shapes of items x major), as a Series
+
+ >>> p.apply(lambda x: x.shape, axis=(0,1))
Returns
-------
- result : Pandas Object
+ result : Panel, DataFrame, or Series
"""
if kwargs and not isinstance(func, np.ufunc):
@@ -973,7 +986,7 @@ def apply(self, func, axis='major', **kwargs):
f = func
# 2d-slabs
- if isinstance(axis, (tuple,list)) and len(axis) == 2:
+ if isinstance(axis, (tuple, list)) and len(axis) == 2:
return self._apply_2d(f, axis=axis)
axis = self._get_axis_number(axis)
@@ -998,13 +1011,13 @@ def _apply_1d(self, func, axis):
# iter thru the axes
slice_axis = self._get_axis(axis)
- slice_indexer = [0]*(ndim-1)
+ slice_indexer = [0] * (ndim - 1)
indexer = np.zeros(ndim, 'O')
indlist = list(range(ndim))
indlist.remove(axis)
indexer[axis] = slice(None, None)
indexer.put(indlist, slice_indexer)
- planes = [ self._get_axis(axi) for axi in indlist ]
+ planes = [self._get_axis(axi) for axi in indlist]
shape = np.array(self.shape).take(indlist)
# all the iteration points
@@ -1014,10 +1027,10 @@ def _apply_1d(self, func, axis):
for i in range(np.prod(shape)):
# construct the object
- pts = tuple([ p[i] for p in points ])
+ pts = tuple([p[i] for p in points])
indexer.put(indlist, slice_indexer)
- obj = Series(values[tuple(indexer)],index=slice_axis,name=pts)
+ obj = Series(values[tuple(indexer)], index=slice_axis, name=pts)
result = func(obj)
results.append(result)
@@ -1025,8 +1038,8 @@ def _apply_1d(self, func, axis):
# increment the indexer
slice_indexer[-1] += 1
n = -1
- while (slice_indexer[n] >= shape[n]) and (n > (1-ndim)):
- slice_indexer[n-1] += 1
+ while (slice_indexer[n] >= shape[n]) and (n > (1 - ndim)):
+ slice_indexer[n - 1] += 1
slice_indexer[n] = 0
n -= 1
@@ -1035,25 +1048,25 @@ def _apply_1d(self, func, axis):
return self._constructor(**self._construct_axes_dict())
# same ndim as current
- if isinstance(results[0],Series):
- arr = np.vstack([ r.values for r in results ])
+ if isinstance(results[0], Series):
+ arr = np.vstack([r.values for r in results])
arr = arr.T.reshape(tuple([len(slice_axis)] + list(shape)))
- tranp = np.array([axis]+indlist).argsort()
+ tranp = np.array([axis] + indlist).argsort()
arr = arr.transpose(tuple(list(tranp)))
- return self._constructor(arr,**self._construct_axes_dict())
+ return self._constructor(arr, **self._construct_axes_dict())
# ndim-1 shape
results = np.array(results).reshape(shape)
if results.ndim == 2 and axis_name != self._info_axis_name:
results = results.T
planes = planes[::-1]
- return self._construct_return_type(results,planes)
+ return self._construct_return_type(results, planes)
def _apply_2d(self, func, axis):
""" handle 2-d slices, equiv to iterating over the other axis """
ndim = self.ndim
- axis = [ self._get_axis_number(a) for a in axis ]
+ axis = [self._get_axis_number(a) for a in axis]
# construct slabs, in 2-d this is a DataFrame result
indexer_axis = list(range(ndim))
@@ -1061,17 +1074,16 @@ def _apply_2d(self, func, axis):
indexer_axis.remove(a)
indexer_axis = indexer_axis[0]
- slicer = [ slice(None,None) ] * ndim
+ slicer = [slice(None, None)] * ndim
ax = self._get_axis(indexer_axis)
results = []
for i, e in enumerate(ax):
-
slicer[indexer_axis] = i
sliced = self.iloc[tuple(slicer)]
obj = func(sliced)
- results.append((e,obj))
+ results.append((e, obj))
return self._construct_return_type(dict(results))
@@ -1095,12 +1107,12 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
def _construct_return_type(self, result, axes=None):
""" return the type for the ndim of the result """
- ndim = getattr(result,'ndim',None)
+ ndim = getattr(result, 'ndim', None)
# need to assume they are the same
if ndim is None:
- if isinstance(result,dict):
- ndim = getattr(list(compat.itervalues(result))[0],'ndim',0)
+ if isinstance(result, dict):
+ ndim = getattr(list(compat.itervalues(result))[0], 'ndim', 0)
# have a dict, so top-level is +1 dim
if ndim != 0:
@@ -1189,7 +1201,7 @@ def count(self, axis='major'):
values = self.values
mask = np.isfinite(values)
- result = mask.sum(axis=i,dtype='int64')
+ result = mask.sum(axis=i, dtype='int64')
return self._wrap_result(result, axis)
@@ -1496,6 +1508,7 @@ def na_op(x, y):
@Appender(doc)
def f(self, other, axis=0):
return self._combine(other, na_op, axis=axis)
+
f.__name__ = name
return f
@@ -1504,6 +1517,7 @@ def f(self, other, axis=0):
cls, _panel_arith_method, use_numexpr=use_numexpr,
flex_comp_method=ops._comp_method_PANEL)
+
Panel._setup_axes(axes=['items', 'major_axis', 'minor_axis'],
info_axis=0,
stat_axis=1,
@@ -1516,21 +1530,19 @@ def f(self, other, axis=0):
Panel._add_aggregate_operations()
Panel._add_numeric_operations()
+
# legacy
class WidePanel(Panel):
-
def __init__(self, *args, **kwargs):
-
# deprecation, #10892
warnings.warn("WidePanel is deprecated. Please use Panel",
FutureWarning, stacklevel=2)
super(WidePanel, self).__init__(*args, **kwargs)
-class LongPanel(DataFrame):
+class LongPanel(DataFrame):
def __init__(self, *args, **kwargs):
-
# deprecation, #10892
warnings.warn("LongPanel is deprecated. Please use DataFrame",
FutureWarning, stacklevel=2)
| Not sure what the bar is for a PR here - this is a mild but meaningful improvement - in particular re the option to supply numbers & a tuple - by no means perfect though
| https://api.github.com/repos/pandas-dev/pandas/pulls/11449 | 2015-10-28T00:04:37Z | 2015-11-07T14:54:27Z | 2015-11-07T14:54:27Z | 2015-11-07T17:25:24Z |
BUG: fix Panel.fillna() ignoring axis parameter (re-submission) | diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 462ead70c9f93..09aeac3c6ab65 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -10,7 +10,7 @@
from pandas.core.algorithms import factorize
from pandas.core.base import PandasObject, PandasDelegate, NoNewAttributesMixin
import pandas.core.common as com
-from pandas.core.missing import interpolate_2d
+from pandas.core.missing import pad
from pandas.util.decorators import cache_readonly, deprecate_kwarg
from pandas.core.common import (ABCSeries, ABCIndexClass, ABCPeriodIndex, ABCCategoricalIndex,
@@ -1340,8 +1340,7 @@ def fillna(self, value=None, method=None, limit=None):
if method is not None:
values = self.to_dense().reshape(-1, len(self))
- values = interpolate_2d(
- values, method, 0, None, value).astype(self.categories.dtype)[0]
+ values = pad(values, method, 0, None, value).astype(self.categories.dtype)[0]
values = _get_codes_for_values(values, self.categories)
else:
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 6aeb4d83649ef..bd23df101ec76 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -16,7 +16,7 @@
from pandas.tseries.period import PeriodIndex
from pandas.core.internals import BlockManager
import pandas.core.common as com
-import pandas.core.missing as mis
+import pandas.core.missing as missing
import pandas.core.datetools as datetools
from pandas import compat
from pandas.compat import map, zip, lrange, string_types, isidentifier
@@ -51,7 +51,7 @@ def _single_replace(self, to_replace, method, inplace, limit):
orig_dtype = self.dtype
result = self if inplace else self.copy()
- fill_f = mis._get_fill_func(method)
+ fill_f = missing._get_fill_func(method)
mask = com.mask_missing(result.values, to_replace)
values = fill_f(result.values, limit=limit, mask=mask)
@@ -1929,7 +1929,7 @@ def reindex(self, *args, **kwargs):
# construct the args
axes, kwargs = self._construct_axes_from_arguments(args, kwargs)
- method = mis._clean_reindex_fill_method(kwargs.pop('method', None))
+ method = missing._clean_reindex_fill_method(kwargs.pop('method', None))
level = kwargs.pop('level', None)
copy = kwargs.pop('copy', True)
limit = kwargs.pop('limit', None)
@@ -2042,7 +2042,7 @@ def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
axis_name = self._get_axis_name(axis)
axis_values = self._get_axis(axis_name)
- method = mis._clean_reindex_fill_method(method)
+ method = missing._clean_reindex_fill_method(method)
new_index, indexer = axis_values.reindex(labels, method, level,
limit=limit)
return self._reindex_with_indexers(
@@ -2774,40 +2774,28 @@ def fillna(self, value=None, method=None, axis=None, inplace=False,
# set the default here, so functions examining the signaure
# can detect if something was set (e.g. in groupby) (GH9221)
if axis is None:
- axis = 0
+ axis = self._stat_axis_name
axis = self._get_axis_number(axis)
- method = mis._clean_fill_method(method)
+ method = missing._clean_fill_method(method)
from pandas import DataFrame
if value is None:
if method is None:
raise ValueError('must specify a fill method or value')
- if self._is_mixed_type and axis == 1:
+ if self._is_mixed_type:
+ if (self.ndim > 2) and (axis == 0):
+ raise NotImplementedError('cannot fill across axis 0 for mixed dtypes')
if inplace:
- raise NotImplementedError()
- result = self.T.fillna(method=method, limit=limit).T
-
- # need to downcast here because of all of the transposes
- result._data = result._data.downcast()
-
- return result
-
- # > 3d
- if self.ndim > 3:
- raise NotImplementedError(
- 'Cannot fillna with a method for > 3dims'
- )
+ raise NotImplementedError('cannot fill inplace for mixed dtypes')
+ elif (self.ndim == 2) and (axis == 1):
+ result = self.T.fillna(method=method, limit=limit).T
- # 3d
- elif self.ndim == 3:
+ # need to downcast here because of all of the transposes
+ result._data = result._data.downcast()
- # fill in 2d chunks
- result = dict([(col, s.fillna(method=method, value=value))
- for col, s in compat.iteritems(self)])
- return self._constructor.from_dict(result).__finalize__(self)
+ return result
- # 2d or less
- method = mis._clean_fill_method(method)
+ method = missing._clean_fill_method(method)
new_data = self._data.interpolate(method=method,
axis=axis,
limit=limit,
@@ -3750,7 +3738,7 @@ def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
from pandas import DataFrame, Series
- method = mis._clean_fill_method(method)
+ method = missing._clean_fill_method(method)
if broadcast_axis == 1 and self.ndim != other.ndim:
if isinstance(self, Series):
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 1b08140ebec09..acfec52162658 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -25,7 +25,7 @@
from pandas.core.categorical import Categorical, maybe_to_categorical
from pandas.tseries.index import DatetimeIndex
import pandas.core.common as com
-import pandas.core.missing as mis
+import pandas.core.missing as missing
import pandas.core.convert as convert
from pandas.sparse.array import _maybe_to_sparse, SparseArray
import pandas.lib as lib
@@ -853,7 +853,7 @@ def check_int_bool(self, inplace):
# a fill na type method
try:
- m = mis._clean_fill_method(method)
+ m = missing._clean_fill_method(method)
except:
m = None
@@ -871,7 +871,7 @@ def check_int_bool(self, inplace):
mgr=mgr)
# try an interp method
try:
- m = mis._clean_interp_method(method, **kwargs)
+ m = missing._clean_interp_method(method, **kwargs)
except:
m = None
@@ -910,12 +910,12 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
values = self.values if inplace else self.values.copy()
values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
- values = mis.interpolate_2d(values,
- method=method,
- axis=axis,
- limit=limit,
- fill_value=fill_value,
- dtype=self.dtype)
+ values = missing.pad(values,
+ method=method,
+ axis=axis,
+ limit=limit,
+ fill_value=fill_value,
+ dtype=self.dtype)
values = self._try_coerce_result(values)
blocks = [self.make_block(values,
@@ -950,8 +950,8 @@ def func(x):
# process a 1-d slice, returning it
# should the axis argument be handled below in apply_along_axis?
- # i.e. not an arg to mis.interpolate_1d
- return mis.interpolate_1d(index, x, method=method, limit=limit,
+ # i.e. not an arg to missing.interpolate
+ return missing.interpolate(index, x, method=method, limit=limit,
limit_direction=limit_direction,
fill_value=fill_value,
bounds_error=False, **kwargs)
@@ -2358,7 +2358,7 @@ def make_block_same_class(self, values, placement,
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
- values = mis.interpolate_2d(
+ values = missing.pad(
self.values.to_dense(), method, axis, limit, fill_value)
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
@@ -3774,8 +3774,8 @@ def reindex(self, new_axis, indexer=None, method=None, fill_value=None,
# fill if needed
if method is not None or limit is not None:
- new_values = mis.interpolate_2d(new_values, method=method,
- limit=limit, fill_value=fill_value)
+ new_values = missing.pad(new_values, method=method,
+ limit=limit, fill_value=fill_value)
if self._block.is_sparse:
make_block = self._block.make_block_same_class
diff --git a/pandas/core/missing.py b/pandas/core/missing.py
index f1143ad808b91..67aeea5878ef6 100644
--- a/pandas/core/missing.py
+++ b/pandas/core/missing.py
@@ -49,9 +49,9 @@ def _clean_interp_method(method, **kwargs):
return method
-def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
- limit_direction='forward',
- fill_value=None, bounds_error=False, order=None, **kwargs):
+def interpolate(xvalues, yvalues, method='linear', limit=None,
+ limit_direction='forward',
+ fill_value=None, bounds_error=False, order=None, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
@@ -219,20 +219,42 @@ def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
return new_y
-def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
- """ perform an actual interpolation of values, values will be make 2-d if
- needed fills inplace, returns the result
+def pad(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
+ """
+ Perform an actual interpolation of values. 1-d values will be made 2-d temporarily.
+ Returns the result
"""
- transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
+ ndim = values.ndim
# reshape a 1 dim if needed
- ndim = values.ndim
- if values.ndim == 1:
+ if ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
+ # recursively slice n-dimension frames (n>2) into (n-1)-dimension frames
+ elif ndim > 2:
+ slice_axis = 1 if axis == 0 else 0
+ slicer = [slice(None)]*ndim
+
+ if ndim == 3:
+ axis = 0 if (axis > 1) else 1
+ else:
+ axis = axis - 1 if (axis > 0) else 0
+
+ for n in range(values.shape[slice_axis]):
+ slicer[slice_axis] = n
+ values[slicer] = pad(values[slicer],
+ method=method,
+ axis=axis,
+ limit=limit,
+ fill_value=fill_value,
+ dtype=dtype)
+
+ return values
+
+ transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
if fill_value is None:
mask = None
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 1f8bcf8c9879f..7b130fab92f16 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1454,20 +1454,98 @@ def test_fillna(self):
assert_frame_equal(filled['ItemA'],
panel['ItemA'].fillna(method='backfill'))
+ # GH 11445
+ # Fill forward.
+ filled = self.panel.fillna(method='ffill')
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='ffill'))
+
+ # With limit.
+ filled = self.panel.fillna(method='backfill', limit=1)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', limit=1))
+
+ # With downcast.
+ rounded = self.panel.apply(lambda x: x.apply(np.round))
+ filled = rounded.fillna(method='backfill', downcast='infer')
+ assert_frame_equal(filled['ItemA'],
+ rounded['ItemA'].fillna(method='backfill', downcast='infer'))
+
+ # Now explicitly request axis 1.
+ filled = self.panel.fillna(method='backfill', axis=1)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', axis=0))
+
+ # Fill along axis 2, equivalent to filling along axis 1 of each
+ # DataFrame.
+ filled = self.panel.fillna(method='backfill', axis=2)
+ assert_frame_equal(filled['ItemA'],
+ self.panel['ItemA'].fillna(method='backfill', axis=1))
+
+ # Fill an empty panel.
empty = self.panel.reindex(items=[])
filled = empty.fillna(0)
assert_panel_equal(filled, empty)
+ # either method or value must be specified
self.assertRaises(ValueError, self.panel.fillna)
+
+ # method and value can not both be specified
self.assertRaises(ValueError, self.panel.fillna, 5, method='ffill')
+ # can't pass list or tuple, only scalar
self.assertRaises(TypeError, self.panel.fillna, [1, 2])
self.assertRaises(TypeError, self.panel.fillna, (1, 2))
# limit not implemented when only value is specified
p = Panel(np.random.randn(3,4,5))
p.iloc[0:2,0:2,0:2] = np.nan
- self.assertRaises(NotImplementedError, lambda : p.fillna(999,limit=1))
+ self.assertRaises(NotImplementedError, lambda : p.fillna(999, limit=1))
+
+ def test_fillna_axis_0(self):
+ # GH 11445
+
+ # Forward fill along axis 0, interpolating values across DataFrames.
+ filled = self.panel.fillna(method='ffill', axis=0)
+ nan_indexes = self.panel.loc['ItemB', :, 'C'].index[
+ self.panel.loc['ItemB', :, 'C'].apply(np.isnan)]
+
+ # Values from ItemA are filled into ItemB.
+ assert_series_equal(filled.loc['ItemB', :, 'C'][nan_indexes],
+ self.panel.loc['ItemA', :, 'C'][nan_indexes])
+
+ # Backfill along axis 0.
+ filled = self.panel.fillna(method='backfill', axis=0)
+
+ # The test data lacks values that can be backfilled on axis 0.
+ assert_panel_equal(filled, self.panel)
+
+ # Reverse the panel and backfill along axis 0, to properly test
+ # backfill.
+ reverse_panel = self.panel.reindex_axis(reversed(self.panel.axes[0]))
+ filled = reverse_panel.fillna(method='bfill', axis=0)
+ nan_indexes = reverse_panel.loc['ItemB', :, 'C'].index[
+ reverse_panel.loc['ItemB', :, 'C'].isnull()]
+ assert_series_equal(filled.loc['ItemB', :, 'C'][nan_indexes],
+ reverse_panel.loc['ItemA', :, 'C'][nan_indexes])
+
+ # Fill along axis 0 with limit.
+ filled = self.panel.fillna(method='ffill', axis=0, limit=1)
+ a_nan = self.panel.loc['ItemA', :, 'C'].index[
+ self.panel.loc['ItemA', :, 'C'].apply(np.isnan)]
+ b_nan = self.panel.loc['ItemB', :, 'C'].index[
+ self.panel.loc['ItemB', :, 'C'].apply(np.isnan)]
+
+ # Cells that are nan in ItemB but not in ItemA remain unfilled in
+ # ItemC.
+ self.assertTrue(
+ filled.loc['ItemC', :, 'C'][b_nan.difference(a_nan)].apply(np.isnan).all())
+
+ # limit not implemented when only value is specified
+ panel = self.panel.copy()
+ panel['str'] = 'foo'
+ self.assertRaises(NotImplementedError,
+ lambda: panel.fillna(method='ffill', axis=0))
def test_ffill_bfill(self):
assert_panel_equal(self.panel.ffill(),
diff --git a/pandas/tests/test_panel4d.py b/pandas/tests/test_panel4d.py
index 3772d4b9c272b..6f8bcebd6e591 100644
--- a/pandas/tests/test_panel4d.py
+++ b/pandas/tests/test_panel4d.py
@@ -909,11 +909,106 @@ def test_sort_index(self):
# assert_panel_equal(sorted_panel, self.panel)
def test_fillna(self):
+ # GH 11445
self.assertFalse(np.isfinite(self.panel4d.values).all())
filled = self.panel4d.fillna(0)
self.assertTrue(np.isfinite(filled.values).all())
- self.assertRaises(NotImplementedError, self.panel4d.fillna, method='pad')
+ filled = self.panel4d.fillna(method='backfill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill'))
+
+ panel4d = self.panel4d.copy()
+ panel4d['str'] = 'foo'
+
+ filled = panel4d.fillna(method='backfill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ panel4d['l1']['ItemA'].fillna(method='backfill'))
+
+ # Fill forward.
+ filled = self.panel4d.fillna(method='ffill')
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='ffill'))
+
+ # With limit.
+ filled = self.panel4d.fillna(method='backfill', limit=1)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', limit=1))
+
+ # With downcast.
+ rounded = self.panel4d.apply(lambda x: x.apply(np.round))
+ filled = rounded.fillna(method='backfill', downcast='infer')
+ assert_frame_equal(filled['l1']['ItemA'],
+ rounded['l1']['ItemA'].fillna(method='backfill', downcast='infer'))
+
+ # Now explicitly request axis 2.
+ filled = self.panel4d.fillna(method='backfill', axis=2)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', axis=0))
+
+ # Fill along axis 3, equivalent to filling along axis 1 of each
+ # DataFrame.
+ filled = self.panel4d.fillna(method='backfill', axis=3)
+ assert_frame_equal(filled['l1']['ItemA'],
+ self.panel4d['l1']['ItemA'].fillna(method='backfill', axis=1))
+
+ # Fill an empty panel.
+ empty = self.panel4d.reindex(items=[])
+ filled = empty.fillna(0)
+ assert_panel4d_equal(filled, empty)
+
+ # either method or value must be specified
+ self.assertRaises(ValueError, self.panel4d.fillna)
+ # method and value can not both be specified
+ self.assertRaises(ValueError, self.panel4d.fillna, 5, method='ffill')
+
+ # can't pass list or tuple, only scalar
+ self.assertRaises(TypeError, self.panel4d.fillna, [1, 2])
+ self.assertRaises(TypeError, self.panel4d.fillna, (1, 2))
+
+ # limit not implemented when only value is specified
+ p = Panel4D(np.random.randn(3,4,5,6))
+ p.iloc[0:2,0:2,0:2,0:2] = np.nan
+ self.assertRaises(NotImplementedError, lambda : p.fillna(999, limit=1))
+
+ def test_fillna_axis_0(self):
+ # GH 11445
+
+ # Back fill along axis 0, interpolating values across Panels
+ filled = self.panel4d.fillna(method='bfill', axis=0)
+ nan_indexes = self.panel4d.loc['l1', 'ItemB', :, 'C'].index[
+ self.panel4d.loc['l1', 'ItemB', :, 'C'].apply(np.isnan)]
+
+ # Values from ItemC are filled into ItemB.
+ assert_series_equal(filled.loc['l1', 'ItemB', :, 'C'][nan_indexes],
+ self.panel4d.loc['l1', 'ItemC', :, 'C'][nan_indexes])
+
+ # Forward fill along axis 0.
+ filled = self.panel4d.fillna(method='ffill', axis=0)
+
+ # The test data lacks values that can be backfilled on axis 0.
+ assert_panel4d_equal(filled, self.panel4d)
+
+ # Reverse the panel and backfill along axis 0, to properly test
+ # forward fill.
+ reverse_panel = self.panel4d.reindex_axis(reversed(self.panel4d.axes[0]))
+ filled = reverse_panel.fillna(method='ffill', axis=0)
+ nan_indexes = reverse_panel.loc['l3', 'ItemB', :, 'C'].index[
+ reverse_panel.loc['l3', 'ItemB', :, 'C'].apply(np.isnan)]
+ assert_series_equal(filled.loc['l3', 'ItemB', :, 'C'][nan_indexes],
+ reverse_panel.loc['l1', 'ItemB', :, 'C'][nan_indexes])
+
+ # Fill along axis 0 with limit.
+ filled = self.panel4d.fillna(method='bfill', axis=0, limit=1)
+ c_nan = self.panel4d.loc['l1', 'ItemC', :, 'C'].index[
+ self.panel4d.loc['l1', 'ItemC', :, 'C'].apply(np.isnan)]
+ b_nan = self.panel4d.loc['l1', 'ItemB', :, 'C'].index[
+ self.panel4d.loc['l1', 'ItemB', :, 'C'].apply(np.isnan)]
+
+ # Cells that are nan in ItemB but not in ItemC remain unfilled in
+ # ItemA.
+ self.assertTrue(
+ filled.loc['l1', 'ItemA', :, 'C'][b_nan.difference(c_nan)].apply(np.isnan).all())
def test_swapaxes(self):
result = self.panel4d.swapaxes('labels', 'items')
| closes #3570
closes #8251
This is a re-submission of PR #8395 and addresses issue #8251. My apologies for the duplicate PR, but despite rebasing to pydata/pandas master, the original PR would not update with my new commits and I couldn't get rid of the `LooseVersion()` errors in the Travis build.
This PR may need some fleshing out still, but I wanted to submit this to see what the thoughts are on this approach. For this PR I've created a new interpolation mechanism at the block level to implement filling across 3 or more dimensions. This avoids the fiddly problems of trying to implement filling of 3+ dimensions at the frame level. However, it isn't possible with this technique to fill across blocks of different dtypes, although that seems like that should be a rare occurrence.
Incidentally, this will also address #3570, which is one of the issues referenced in #9862.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11445 | 2015-10-27T21:31:46Z | 2016-01-20T14:12:24Z | null | 2016-01-20T14:12:47Z |
DOC:Remove supression from ipython directive to make object visible | diff --git a/doc/source/groupby.rst b/doc/source/groupby.rst
index b5a382ce24342..e517e9ab9935d 100644
--- a/doc/source/groupby.rst
+++ b/doc/source/groupby.rst
@@ -257,20 +257,21 @@ GroupBy with MultiIndex
With :ref:`hierarchically-indexed data <advanced.hierarchical>`, it's quite
natural to group by one of the levels of the hierarchy.
+Let's create a series with a two-level ``MultiIndex``.
+
.. ipython:: python
- :suppress:
arrays = [['bar', 'bar', 'baz', 'baz', 'foo', 'foo', 'qux', 'qux'],
['one', 'two', 'one', 'two', 'one', 'two', 'one', 'two']]
- tuples = list(zip(*arrays))
- tuples
- index = pd.MultiIndex.from_tuples(tuples, names=['first', 'second'])
+ index = pd.MultiIndex.from_arrays(arrays, names=['first', 'second'])
s = pd.Series(np.random.randn(8), index=index)
+ s
+
+We can then group by one of the levels in ``s``.
.. ipython:: python
- s
grouped = s.groupby(level=0)
grouped.sum()
| Fix for https://github.com/pydata/pandas/issues/11223
@TomAugspurger should these two Ipython blocks be merged into one?
(this is what it looks like at the mo)
![pandas-11223](https://cloud.githubusercontent.com/assets/4982858/10770424/2013049a-7ce3-11e5-9f64-f8a66c98b961.png)
| https://api.github.com/repos/pandas-dev/pandas/pulls/11443 | 2015-10-27T19:37:12Z | 2015-11-14T19:32:53Z | 2015-11-14T19:32:53Z | 2015-11-14T19:32:56Z |
Fix for DataFrame.hist() with by- and weights-keyword | diff --git a/pandas/tests/test_graphics_others.py b/pandas/tests/test_graphics_others.py
index b18cbae600b43..54f6cf50ea5ec 100644
--- a/pandas/tests/test_graphics_others.py
+++ b/pandas/tests/test_graphics_others.py
@@ -302,6 +302,30 @@ def test_boxplot_empty_column(self):
df.loc[:, 0] = np.nan
_check_plot_works(df.boxplot, return_type='axes')
+ @slow
+ def test_hist_df_nan_and_weights(self):
+ d = {'category' : ['A', 'A', 'B', 'B', 'C'],
+ 'items' : [4., 3., 2., np.nan, 1],
+ 'val' : [10., 8., np.nan, 5, 7.]}
+ df = DataFrame(d)
+ orig_columns = df.columns
+ orig_rows = len(df)
+ _check_plot_works(df.hist, column='items', by='category',
+ weights='val', bins=range(0, 10))
+ _check_plot_works(df.hist, column='items', by='category',
+ weights=df.val.values, bins=range(0, 10))
+ # check without weights functionality
+ _check_plot_works(df.hist, column='items', by='category',
+ bins=range(0, 10))
+ _check_plot_works(df.hist, column='items', weights='val',
+ bins=range(0, 10))
+ _check_plot_works(df.hist, column='items', weights=df.val.values,
+ bins=range(0, 10))
+ # also check that we have not changed the original df that had
+ # nan values in it.
+ self.assertEqual(len(orig_columns), len(df.columns))
+ self.assertEqual(orig_rows, len(df))
+
@slow
def test_hist_df_legacy(self):
from matplotlib.patches import Rectangle
diff --git a/pandas/tools/plotting.py b/pandas/tools/plotting.py
index 98d6f5e8eb797..3acf2f39b8864 100644
--- a/pandas/tools/plotting.py
+++ b/pandas/tools/plotting.py
@@ -2772,7 +2772,8 @@ def plot_group(group, ax):
def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
- sharey=False, figsize=None, layout=None, bins=10, **kwds):
+ sharey=False, figsize=None, layout=None, bins=10, weights=None,
+ **kwds):
"""
Draw histogram of the DataFrame's series using matplotlib / pylab.
@@ -2807,17 +2808,37 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
layout: (optional) a tuple (rows, columns) for the layout of the histograms
bins: integer, default 10
Number of histogram bins to be used
+ weights : string or sequence
+ If passed, will be used to weight the data
kwds : other plotting keyword arguments
To be passed to hist function
"""
+ subset_cols_drop_nan = []
+ if weights is not None:
+ if isinstance(weights, np.ndarray):
+ # weights supplied as an array instead of a part of the dataframe
+ if 'weights' in data.columns:
+ raise NameError('weights already in data.columns. Could not ' +
+ 'add dummy column')
+ data = data.copy()
+ data['weights'] = weights
+ weights = 'weights'
+ subset_cols_drop_nan.append(weights)
+ if column is not None:
+ subset_cols_drop_nan.append(column)
+ data = data.dropna(subset=subset_cols_drop_nan)
if by is not None:
axes = grouped_hist(data, column=column, by=by, ax=ax, grid=grid, figsize=figsize,
sharex=sharex, sharey=sharey, layout=layout, bins=bins,
xlabelsize=xlabelsize, xrot=xrot, ylabelsize=ylabelsize, yrot=yrot,
- **kwds)
+ weights=weights, **kwds)
return axes
+ if weights is not None:
+ weights = data[weights]
+ weights = weights._get_numeric_data()
+
if column is not None:
if not isinstance(column, (list, np.ndarray, Index)):
column = [column]
@@ -2832,7 +2853,7 @@ def hist_frame(data, column=None, by=None, grid=True, xlabelsize=None,
for i, col in enumerate(com._try_sort(data.columns)):
ax = _axes[i]
- ax.hist(data[col].dropna().values, bins=bins, **kwds)
+ ax.hist(data[col].values, bins=bins, weights=weights, **kwds)
ax.set_title(col)
ax.grid(grid)
@@ -2916,10 +2937,10 @@ def hist_series(self, by=None, ax=None, grid=True, xlabelsize=None,
return axes
-def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
- layout=None, sharex=False, sharey=False, rot=90, grid=True,
- xlabelsize=None, xrot=None, ylabelsize=None, yrot=None,
- **kwargs):
+def grouped_hist(data, column=None, by=None, ax=None, bins=50,
+ figsize=None, layout=None, sharex=False, sharey=False, rot=90,
+ grid=True, xlabelsize=None, xrot=None, ylabelsize=None,
+ yrot=None, weights=None, **kwargs):
"""
Grouped histogram
@@ -2936,20 +2957,30 @@ def grouped_hist(data, column=None, by=None, ax=None, bins=50, figsize=None,
sharey: boolean, default False
rot: int, default 90
grid: bool, default True
+ weights: object, optional
kwargs: dict, keyword arguments passed to matplotlib.Axes.hist
Returns
-------
axes: collection of Matplotlib Axes
"""
- def plot_group(group, ax):
- ax.hist(group.dropna().values, bins=bins, **kwargs)
+ def plot_group(group, ax, weights=None):
+ if isinstance(group, np.ndarray) == False:
+ group = group.values
+ if weights is not None:
+ if isinstance(weights, np.ndarray) == False:
+ weights = weights.values
+ if len(group) > 0:
+ # if length is less than 0, we had only NaN's for this group
+ # nothing to print!
+ ax.hist(group, weights=weights, bins=bins, **kwargs)
xrot = xrot or rot
- fig, axes = _grouped_plot(plot_group, data, column=column,
- by=by, sharex=sharex, sharey=sharey, ax=ax,
- figsize=figsize, layout=layout, rot=rot)
+ fig, axes = _grouped_plot(plot_group, data, column=column, by=by,
+ sharex=sharex, sharey=sharey, ax=ax,
+ figsize=figsize, layout=layout, rot=rot,
+ weights=weights)
_set_ticks_props(axes, xlabelsize=xlabelsize, xrot=xrot,
ylabelsize=ylabelsize, yrot=yrot)
@@ -3034,9 +3065,9 @@ def boxplot_frame_groupby(grouped, subplots=True, column=None, fontsize=None,
return ret
-def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
- figsize=None, sharex=True, sharey=True, layout=None,
- rot=0, ax=None, **kwargs):
+def _grouped_plot(plotf, data, column=None, by=None,
+ numeric_only=True, figsize=None, sharex=True, sharey=True,
+ layout=None, rot=0, ax=None, weights=None, **kwargs):
from pandas import DataFrame
if figsize == 'default':
@@ -3046,6 +3077,9 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
figsize = None
grouped = data.groupby(by)
+
+ if weights is not None:
+ weights = grouped[weights]
if column is not None:
grouped = grouped[column]
@@ -3056,11 +3090,20 @@ def _grouped_plot(plotf, data, column=None, by=None, numeric_only=True,
_axes = _flatten(axes)
+ weight = None
for i, (key, group) in enumerate(grouped):
ax = _axes[i]
+ if weights is not None:
+ weight = weights.get_group(key)
if numeric_only and isinstance(group, DataFrame):
group = group._get_numeric_data()
- plotf(group, ax, **kwargs)
+ if weight is not None:
+ weight = weight._get_numeric_data()
+ if weight is not None:
+ plotf(group, ax, weight, **kwargs)
+ else:
+ # scatterplot etc has not the weight implemented in plotf
+ plotf(group, ax, **kwargs)
ax.set_title(com.pprint_thing(key))
return fig, axes
| will make the following work
``` python
import numpy as np
import pandas as pd
d = {'one' : ['A', 'A', 'B', 'B', 'C'],
'two' : [4., 3., 2., 1., np.nan],
'three' : [10., 8., np.nan, 5., 7.]}
df = pd.DataFrame(d)
df.hist('two', by='one', weights='three', bins=range(0, 10))
# or
df.hist('two', by=df.one.values, weights='three', bins=range(0, 10))
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11441 | 2015-10-27T14:23:49Z | 2017-04-03T15:25:51Z | null | 2017-04-03T15:25:51Z |
CI: Py3.5 / numpy 1.10 testing | diff --git a/ci/requirements-3.5.build b/ci/requirements-3.5.build
index de36b1afb9fa4..9558cf00ddf5c 100644
--- a/ci/requirements-3.5.build
+++ b/ci/requirements-3.5.build
@@ -1,4 +1,4 @@
python-dateutil
pytz
-numpy=1.9.3
+numpy
cython
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
index 91938675280d9..8de8f7d8f0630 100644
--- a/ci/requirements-3.5.run
+++ b/ci/requirements-3.5.run
@@ -1,6 +1,6 @@
python-dateutil
pytz
-numpy=1.9.3
+numpy
openpyxl
xlsxwriter
xlrd
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index e97010e1cb552..7ae39571c6e8e 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -87,11 +87,7 @@ def test_constructor_unsortable(self):
self.assertFalse(factor.ordered)
# this however will raise as cannot be sorted
- # but fixed in newer versions of numpy
- if LooseVersion(np.__version__) < "1.10":
- self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
- else:
- Categorical.from_array(arr, ordered=True)
+ self.assertRaises(TypeError, lambda : Categorical.from_array(arr, ordered=True))
def test_is_equal_dtype(self):
| closes #11187
closes #11138
| https://api.github.com/repos/pandas-dev/pandas/pulls/11439 | 2015-10-27T12:38:40Z | 2015-10-27T13:18:42Z | 2015-10-27T13:18:42Z | 2015-10-27T13:18:42Z |
BUG: fix UnicodeEncodeError with to_sql and unicode column names (GH11431) | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 7178be1ffefd2..025aee17acec4 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -130,6 +130,13 @@ Bug Fixes
- Bug in ``squeeze()`` with zero length arrays (:issue:`11230`, :issue:`8999`)
- Bug in ``describe()`` dropping column names for hierarchical indexes (:issue:`11517`)
- Bug in ``DataFrame.pct_change()`` not propagating ``axis`` keyword on ``.fillna`` method (:issue:`11150`)
+
+
+
+
+
+
+- Bug in ``to_sql`` using unicode column names giving UnicodeEncodeError with (:issue:`11431`).
- Fix regression in setting of ``xticks`` in ``plot`` (:issue:`11529`).
- Bug in ``holiday.dates`` where observance rules could not be applied to holiday and doc enhancement (:issue:`11477`, :issue:`11533`)
- Fix plotting issues when having plain ``Axes`` instances instead of ``SubplotAxes`` (:issue:`11520`, :issue:`11556`).
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 799d1e88260f2..bedc71379354d 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -14,7 +14,7 @@
import pandas.lib as lib
import pandas.core.common as com
-from pandas.compat import lzip, map, zip, raise_with_traceback, string_types
+from pandas.compat import lzip, map, zip, raise_with_traceback, string_types, text_type
from pandas.core.api import DataFrame, Series
from pandas.core.common import isnull
from pandas.core.base import PandasObject
@@ -711,7 +711,7 @@ def insert_data(self):
else:
temp = self.frame
- column_names = list(map(str, temp.columns))
+ column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
@@ -853,7 +853,7 @@ def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types.append((idx_label, idx_type, True))
column_names_and_types += [
- (str(self.frame.columns[i]),
+ (text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
@@ -1400,7 +1400,7 @@ def _execute_create(self):
conn.execute(stmt)
def insert_statement(self):
- names = list(map(str, self.frame.columns))
+ names = list(map(text_type, self.frame.columns))
flv = self.pd_sql.flavor
wld = _SQL_WILDCARD[flv] # wildcard char
escape = _SQL_GET_IDENTIFIER[flv]
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index aced92ec8abd0..bcaff8aaab768 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -834,6 +834,11 @@ def test_categorical(self):
tm.assert_frame_equal(res, df)
+ def test_unicode_column_name(self):
+ # GH 11431
+ df = DataFrame([[1,2],[3,4]], columns = [u'\xe9',u'b'])
+ df.to_sql('test_unicode', self.conn, index=False)
+
class TestSQLApi(SQLAlchemyMixIn, _TestSQLApi):
"""
@@ -1992,7 +1997,7 @@ def test_illegal_names(self):
for ndx, weird_name in enumerate(['test_weird_name]','test_weird_name[',
'test_weird_name`','test_weird_name"', 'test_weird_name\'',
'_b.test_weird_name_01-30', '"_b.test_weird_name_01-30"',
- '99beginswithnumber', '12345']):
+ '99beginswithnumber', '12345', u'\xe9']):
df.to_sql(weird_name, self.conn, flavor=self.flavor)
sql.table_exists(weird_name, self.conn)
| Closes #11431
Still have to add tests
| https://api.github.com/repos/pandas-dev/pandas/pulls/11432 | 2015-10-26T10:09:32Z | 2015-11-16T12:15:32Z | 2015-11-16T12:15:32Z | 2015-11-16T12:15:32Z |
CLN: (more) boundscheck warnings in tslib | diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 2fa406f880cdd..afb15badf433c 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4240,6 +4240,8 @@ cdef inline int m8_weekday(int64_t val):
cdef int64_t DAY_NS = 86400000000000LL
[email protected](False)
[email protected](False)
def date_normalize(ndarray[int64_t] stamps, tz=None):
cdef:
Py_ssize_t i, n = len(stamps)
@@ -4262,6 +4264,8 @@ def date_normalize(ndarray[int64_t] stamps, tz=None):
return result
[email protected](False)
[email protected](False)
cdef _normalize_local(ndarray[int64_t] stamps, object tz):
cdef:
Py_ssize_t n = len(stamps)
| https://github.com/pydata/pandas/pull/11404#issuecomment-150928024
| https://api.github.com/repos/pandas-dev/pandas/pulls/11429 | 2015-10-25T15:58:35Z | 2015-10-25T19:59:49Z | 2015-10-25T19:59:49Z | 2015-10-27T02:08:54Z |
BUG: vectorized DateOffset match non-vectorized | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 01b342213de07..50f104f4529fc 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -954,6 +954,52 @@ These can be used as arguments to ``date_range``, ``bdate_range``, constructors
for ``DatetimeIndex``, as well as various other timeseries-related functions
in pandas.
+Anchored Offset Semantics
+~~~~~~~~~~~~~~~~~~~~~~~~~
+
+For those offsets that are anchored to the start or end of specific
+frequency (``MonthEnd``, ``MonthBegin``, ``WeekEnd``, etc) the following
+rules apply to rolling forward and backwards.
+
+When ``n`` is not 0, if the given date is not on an anchor point, it snapped to the next(previous)
+anchor point, and moved ``|n|-1`` additional steps forwards or backwards.
+
+.. ipython:: python
+
+ pd.Timestamp('2014-01-02') + MonthBegin(n=1)
+ pd.Timestamp('2014-01-02') + MonthEnd(n=1)
+
+ pd.Timestamp('2014-01-02') - MonthBegin(n=1)
+ pd.Timestamp('2014-01-02') - MonthEnd(n=1)
+
+ pd.Timestamp('2014-01-02') + MonthBegin(n=4)
+ pd.Timestamp('2014-01-02') - MonthBegin(n=4)
+
+If the given date *is* on an anchor point, it is moved ``|n|`` points forwards
+or backwards.
+
+.. ipython:: python
+
+ pd.Timestamp('2014-01-01') + MonthBegin(n=1)
+ pd.Timestamp('2014-01-31') + MonthEnd(n=1)
+
+ pd.Timestamp('2014-01-01') - MonthBegin(n=1)
+ pd.Timestamp('2014-01-31') - MonthEnd(n=1)
+
+ pd.Timestamp('2014-01-01') + MonthBegin(n=4)
+ pd.Timestamp('2014-01-31') - MonthBegin(n=4)
+
+For the case when ``n=0``, the date is not moved if on an anchor point, otherwise
+it is rolled forward to the next anchor point.
+
+.. ipython:: python
+
+ pd.Timestamp('2014-01-02') + MonthBegin(n=0)
+ pd.Timestamp('2014-01-02') + MonthEnd(n=0)
+
+ pd.Timestamp('2014-01-01') + MonthBegin(n=0)
+ pd.Timestamp('2014-01-31') + MonthEnd(n=0)
+
.. _timeseries.legacyaliases:
Legacy Aliases
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 651a6a45580cf..e71830d7dd8d8 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -190,7 +190,7 @@ Bug Fixes
-
+ - Bug in vectorized ``DateOffset`` when ``n`` parameter is ``0`` (:issue:`11370`)
diff --git a/pandas/tseries/offsets.py b/pandas/tseries/offsets.py
index caad86dfdb728..82ea9eebaefa8 100644
--- a/pandas/tseries/offsets.py
+++ b/pandas/tseries/offsets.py
@@ -444,7 +444,7 @@ def _beg_apply_index(self, i, freq):
from pandas.tseries.frequencies import get_freq_code
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
- if self.n < 0:
+ if self.n <= 0:
# when subtracting, dates on start roll to prior
roll = np.where(base_period.to_timestamp() == i - off,
self.n, self.n + 1)
@@ -464,7 +464,7 @@ def _end_apply_index(self, i, freq):
base, mult = get_freq_code(freq)
base_period = i.to_period(base)
if self.n > 0:
- # when adding, dtates on end roll to next
+ # when adding, dates on end roll to next
roll = np.where(base_period.to_timestamp(how='end') == i - off,
self.n, self.n - 1)
else:
@@ -1081,8 +1081,7 @@ def apply(self, other):
@apply_index_wraps
def apply_index(self, i):
- months = self.n - 1 if self.n >= 0 else self.n
- shifted = tslib.shift_months(i.asi8, months, 'end')
+ shifted = tslib.shift_months(i.asi8, self.n, 'end')
return i._shallow_copy(shifted)
def onOffset(self, dt):
@@ -1108,8 +1107,7 @@ def apply(self, other):
@apply_index_wraps
def apply_index(self, i):
- months = self.n + 1 if self.n < 0 else self.n
- shifted = tslib.shift_months(i.asi8, months, 'start')
+ shifted = tslib.shift_months(i.asi8, self.n, 'start')
return i._shallow_copy(shifted)
def onOffset(self, dt):
@@ -1777,6 +1775,7 @@ def apply(self, other):
@apply_index_wraps
def apply_index(self, i):
freq_month = 12 if self.startingMonth == 1 else self.startingMonth - 1
+ # freq_month = self.startingMonth
freqstr = 'Q-%s' % (_int_to_month[freq_month],)
return self._beg_apply_index(i, freqstr)
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index de264f5559fd0..74b9f52a7eb0a 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2622,7 +2622,8 @@ def test_datetime64_with_DateOffset(self):
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'), Timestamp('2000-01-31 00:23:00'),
- Timestamp('2000-01-01'), Timestamp('2000-02-29'), Timestamp('2000-12-31')])
+ Timestamp('2000-01-01'), Timestamp('2000-03-31'),
+ Timestamp('2000-02-29'), Timestamp('2000-12-31')])
#DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
@@ -2659,11 +2660,15 @@ def test_datetime64_with_DateOffset(self):
else:
do = do
kwargs = {}
- op = getattr(pd.offsets,do)(5, normalize=normalize, **kwargs)
- assert_func(klass([x + op for x in s]), s + op)
- assert_func(klass([x - op for x in s]), s - op)
- assert_func(klass([op + x for x in s]), op + s)
+ for n in [0, 5]:
+ if (do in ['WeekOfMonth','LastWeekOfMonth',
+ 'FY5253Quarter','FY5253'] and n == 0):
+ continue
+ op = getattr(pd.offsets,do)(n, normalize=normalize, **kwargs)
+ assert_func(klass([x + op for x in s]), s + op)
+ assert_func(klass([x - op for x in s]), s - op)
+ assert_func(klass([op + x for x in s]), op + s)
# def test_add_timedelta64(self):
# rng = date_range('1/1/2000', periods=5)
# delta = rng.values[3] - rng.values[1]
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index d1bc8025ba109..a6908a0c36ad4 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4458,7 +4458,8 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
Py_ssize_t i
pandas_datetimestruct dts
int count = len(dtindex)
- int days_in_current_month
+ int months_to_roll
+ bint roll_check
int64_t[:] out = np.empty(count, dtype='int64')
if day is None:
@@ -4472,36 +4473,44 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
dts.day = min(dts.day, days_in_month(dts))
out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
elif day == 'start':
+ roll_check = False
+ if months <= 0:
+ months += 1
+ roll_check = True
with nogil:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- dts.year = _year_add_months(dts, months)
- dts.month = _month_add_months(dts, months)
+ months_to_roll = months
+
+ # offset semantics - if on the anchor point and going backwards
+ # shift to next
+ if roll_check and dts.day == 1:
+ months_to_roll -= 1
+
+ dts.year = _year_add_months(dts, months_to_roll)
+ dts.month = _month_add_months(dts, months_to_roll)
+ dts.day = 1
- # offset semantics - when subtracting if at the start anchor
- # point, shift back by one more month
- if months <= 0 and dts.day == 1:
- dts.year = _year_add_months(dts, -1)
- dts.month = _month_add_months(dts, -1)
- else:
- dts.day = 1
out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
elif day == 'end':
+ roll_check = False
+ if months > 0:
+ months -= 1
+ roll_check = True
with nogil:
for i in range(count):
if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- days_in_current_month = days_in_month(dts)
-
- dts.year = _year_add_months(dts, months)
- dts.month = _month_add_months(dts, months)
+ months_to_roll = months
# similar semantics - when adding shift forward by one
# month if already at an end of month
- if months >= 0 and dts.day == days_in_current_month:
- dts.year = _year_add_months(dts, 1)
- dts.month = _month_add_months(dts, 1)
+ if roll_check and dts.day == days_in_month(dts):
+ months_to_roll += 1
+
+ dts.year = _year_add_months(dts, months_to_roll)
+ dts.month = _month_add_months(dts, months_to_roll)
dts.day = days_in_month(dts)
out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
| closes #11370
Also added a bit of documentation on the semantics for anchored offsets
currently blocked by #11406 - either need to change the vectorized or non-vectorized implementation of `QuarterBegin` As discussed in that issue, my opinion is the vectorized version is the "right" (or at least consistent) one.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11427 | 2015-10-24T21:09:45Z | 2015-12-13T20:42:04Z | 2015-12-13T20:42:03Z | 2016-01-15T01:03:40Z |
DOC: fix cython example (failed on Windows) (GH6002) | diff --git a/doc/source/enhancingperf.rst b/doc/source/enhancingperf.rst
index 028e6d064a561..d98801f4a7afe 100644
--- a/doc/source/enhancingperf.rst
+++ b/doc/source/enhancingperf.rst
@@ -274,8 +274,8 @@ advanced cython techniques:
...: return s * dx
...: @cython.boundscheck(False)
...: @cython.wraparound(False)
- ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, np.ndarray[double] col_b, np.ndarray[Py_ssize_t] col_N):
- ...: cdef Py_ssize_t i, n = len(col_N)
+ ...: cpdef np.ndarray[double] apply_integrate_f_wrap(np.ndarray[double] col_a, np.ndarray[double] col_b, np.ndarray[int] col_N):
+ ...: cdef int i, n = len(col_N)
...: assert len(col_a) == len(col_b) == n
...: cdef np.ndarray[double] res = np.empty(n)
...: for i in range(n):
| Closes #6002
The signature of `apply_integrate_f_wrap` (http://pandas-docs.github.io/pandas-docs-travis/enhancingperf.html#more-advanced-techniques) used `Py_ssize_t` but on Windows that gives an error, as the dataframe is generated with numpy random arrays, which gives int32 on Windows, while `Py_ssize_t` is int64 on 64 bit Windows.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11426 | 2015-10-24T10:51:23Z | 2015-11-01T21:09:58Z | 2015-11-01T21:09:58Z | 2015-11-01T21:09:58Z |
DOC: add some examples to Index set operations | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 7049ac33feac6..003e444672d85 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1469,7 +1469,7 @@ def __xor__(self, other):
def union(self, other):
"""
- Form the union of two Index objects and sorts if possible
+ Form the union of two Index objects and sorts if possible.
Parameters
----------
@@ -1478,6 +1478,15 @@ def union(self, other):
Returns
-------
union : Index
+
+ Examples
+ --------
+
+ >>> idx1 = pd.Index([1, 2, 3, 4])
+ >>> idx2 = pd.Index([3, 4, 5, 6])
+ >>> idx1.union(idx2)
+ Int64Index([1, 2, 3, 4, 5, 6], dtype='int64')
+
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
@@ -1545,8 +1554,10 @@ def _wrap_union_result(self, other, result):
def intersection(self, other):
"""
- Form the intersection of two Index objects. Sortedness of the result is
- not guaranteed
+ Form the intersection of two Index objects.
+
+ This returns a new Index with elements common to the index and `other`.
+ Sortedness of the result is not guaranteed.
Parameters
----------
@@ -1555,6 +1566,15 @@ def intersection(self, other):
Returns
-------
intersection : Index
+
+ Examples
+ --------
+
+ >>> idx1 = pd.Index([1, 2, 3, 4])
+ >>> idx2 = pd.Index([3, 4, 5, 6])
+ >>> idx1.intersection(idx2)
+ Int64Index([3, 4], dtype='int64')
+
"""
self._assert_can_do_setop(other)
other = _ensure_index(other)
@@ -1589,7 +1609,9 @@ def intersection(self, other):
def difference(self, other):
"""
- Compute sorted set difference of two Index objects
+ Return a new Index with elements from the index that are not in `other`.
+
+ This is the sorted set difference of two Index objects.
Parameters
----------
@@ -1597,13 +1619,16 @@ def difference(self, other):
Returns
-------
- diff : Index
+ difference : Index
- Notes
- -----
- One can do either of these and achieve the same result
+ Examples
+ --------
+
+ >>> idx1 = pd.Index([1, 2, 3, 4])
+ >>> idx2 = pd.Index([3, 4, 5, 6])
+ >>> idx1.difference(idx2)
+ Int64Index([1, 2], dtype='int64')
- >>> index.difference(index2)
"""
self._assert_can_do_setop(other)
@@ -1623,7 +1648,6 @@ def sym_diff(self, other, result_name=None):
Parameters
----------
-
other : Index or array-like
result_name : str
| I was wondering for a second in which direction the Index.difference worked, so thought some examples could clarify this
| https://api.github.com/repos/pandas-dev/pandas/pulls/11425 | 2015-10-24T10:03:17Z | 2015-10-25T14:06:38Z | 2015-10-25T14:06:38Z | 2015-10-25T14:06:38Z |
BUG: implement .sort_index(...inplace=True) for #11402 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 2762d84d73ba0..e50919175beb2 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -43,6 +43,7 @@ API changes
- Prettyprinting sets (e.g. in DataFrame cells) now uses set literal syntax (``{x, y}``) instead of
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
- Indexing with a null key will raise a ``TypeError``, instead of a ``ValueError`` (:issue:`11356`)
+- Series.sort_index() now correctly handles ``inplace`` option (:issue:`11402`)
.. _whatsnew_0171.deprecations:
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 2fc90ef8596f1..b12a31d64eaf7 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -1676,8 +1676,12 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
ascending=ascending)
new_values = self._values.take(indexer)
- return self._constructor(new_values,
- index=new_index).__finalize__(self)
+ result = self._constructor(new_values, index=new_index)
+
+ if inplace:
+ self._update_inplace(result)
+ else:
+ return result.__finalize__(self)
def sort(self, axis=0, ascending=True, kind='quicksort', na_position='last', inplace=True):
"""
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 5ce25f5d93800..3b866e6965c2a 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -5328,6 +5328,30 @@ def test_sort_index(self):
assert_series_equal(sorted_series,
self.ts.reindex(self.ts.index[::-1]))
+ def test_sort_index_inplace(self):
+
+ # For #11402
+ rindex = list(self.ts.index)
+ random.shuffle(rindex)
+
+ # descending
+ random_order = self.ts.reindex(rindex)
+ result = random_order.sort_index(ascending=False, inplace=True)
+ self.assertIs(result, None,
+ msg='sort_index() inplace should return None')
+ assert_index_equal(random_order.index,
+ self.ts.index[::-1])
+ assert_series_equal(random_order,
+ self.ts.reindex(self.ts.index[::-1]))
+
+ # ascending
+ random_order = self.ts.reindex(rindex)
+ result = random_order.sort_index(ascending=True, inplace=True)
+ self.assertIs(result, None,
+ msg='sort_index() inplace should return None')
+ assert_index_equal(random_order.index, self.ts.index)
+ assert_series_equal(random_order, self.ts)
+
def test_sort_API(self):
# API for 9816
| closes #11402
Adds inplace sort_index() for Series.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11422 | 2015-10-23T20:26:25Z | 2015-10-27T11:11:42Z | null | 2015-10-27T11:11:42Z |
ENH: namedtuple's fields as columns | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 2762d84d73ba0..418d306a2de7f 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -30,6 +30,7 @@ Other Enhancements
- ``pd.read_*`` functions can now also accept :class:`python:pathlib.Path`, or :class:`py:py._path.local.LocalPath`
objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
+- ``DataFrame`` now uses the fields of a ``namedtuple`` as columns, if columns are not supplied (:issue:`11181`)
- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
.. _whatsnew_0171.api:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index ac3e61a500bb6..d6aa6e6bb90cc 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2676,6 +2676,9 @@ def is_list_like(arg):
return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
+def is_named_tuple(arg):
+ return isinstance(arg, tuple) and hasattr(arg, '_fields')
+
def is_null_slice(obj):
""" we have a null slice """
return (isinstance(obj, slice) and obj.start is None and
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 827373c9a330b..31b7aacefcb60 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -261,6 +261,8 @@ def __init__(self, data=None, index=None, columns=None, dtype=None,
data = list(data)
if len(data) > 0:
if is_list_like(data[0]) and getattr(data[0], 'ndim', 1) == 1:
+ if com.is_named_tuple(data[0]) and columns is None:
+ columns = data[0]._fields
arrays, columns = _to_arrays(data, columns, dtype=dtype)
columns = _ensure_index(columns)
diff --git a/pandas/tests/test_common.py b/pandas/tests/test_common.py
index 003fd134cf210..89826209fa46d 100644
--- a/pandas/tests/test_common.py
+++ b/pandas/tests/test_common.py
@@ -538,6 +538,15 @@ def test_is_list_like():
for f in fails:
assert not com.is_list_like(f)
+def test_is_named_tuple():
+ passes = (collections.namedtuple('Test',list('abc'))(1,2,3),)
+ fails = ((1,2,3), 'a', Series({'pi':3.14}))
+
+ for p in passes:
+ assert com.is_named_tuple(p)
+
+ for f in fails:
+ assert not com.is_named_tuple(f)
def test_is_hashable():
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index dc0e0e2670565..5c7f1ec9e0037 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -16,8 +16,7 @@
from pandas.compat import(
map, zip, range, long, lrange, lmap, lzip,
- OrderedDict, u, StringIO, string_types,
- is_platform_windows
+ OrderedDict, u, StringIO, is_platform_windows
)
from pandas import compat
@@ -33,8 +32,7 @@
import pandas.core.datetools as datetools
from pandas import (DataFrame, Index, Series, Panel, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp, date_range,
- read_csv, timedelta_range, Timedelta, CategoricalIndex,
- option_context, period_range)
+ read_csv, timedelta_range, Timedelta, option_context, period_range)
from pandas.core.dtypes import DatetimeTZDtype
import pandas as pd
from pandas.parser import CParserError
@@ -2239,7 +2237,6 @@ class TestDataFrame(tm.TestCase, CheckIndexing,
_multiprocess_can_split_ = True
def setUp(self):
- import warnings
self.frame = _frame.copy()
self.frame2 = _frame2.copy()
@@ -3568,6 +3565,20 @@ def test_constructor_tuples(self):
expected = DataFrame({'A': Series([(1, 2), (3, 4)])})
assert_frame_equal(result, expected)
+ def test_constructor_namedtuples(self):
+ # GH11181
+ from collections import namedtuple
+ named_tuple = namedtuple("Pandas", list('ab'))
+ tuples = [named_tuple(1, 3), named_tuple(2, 4)]
+ expected = DataFrame({'a': [1, 2], 'b': [3, 4]})
+ result = DataFrame(tuples)
+ assert_frame_equal(result, expected)
+
+ # with columns
+ expected = DataFrame({'y': [1, 2], 'z': [3, 4]})
+ result = DataFrame(tuples, columns=['y', 'z'])
+ assert_frame_equal(result, expected)
+
def test_constructor_orient(self):
data_dict = self.mixed_frame.T._series
recons = DataFrame.from_dict(data_dict, orient='index')
@@ -4418,7 +4429,7 @@ def test_timedeltas(self):
def test_operators_timedelta64(self):
- from datetime import datetime, timedelta
+ from datetime import timedelta
df = DataFrame(dict(A = date_range('2012-1-1', periods=3, freq='D'),
B = date_range('2012-1-2', periods=3, freq='D'),
C = Timestamp('20120101')-timedelta(minutes=5,seconds=5)))
@@ -9645,7 +9656,6 @@ def test_replace_mixed(self):
assert_frame_equal(result,expected)
# test case from
- from pandas.util.testing import makeCustomDataframe as mkdf
df = DataFrame({'A' : Series([3,0],dtype='int64'), 'B' : Series([0,3],dtype='int64') })
result = df.replace(3, df.mean().to_dict())
expected = df.copy().astype('float64')
@@ -12227,7 +12237,6 @@ def test_sort_index_inplace(self):
assert_frame_equal(df, expected)
def test_sort_index_different_sortorder(self):
- import random
A = np.arange(20).repeat(5)
B = np.tile(np.arange(5), 20)
@@ -13301,7 +13310,6 @@ def test_quantile(self):
def test_quantile_axis_parameter(self):
# GH 9543/9544
- from numpy import percentile
df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3])
@@ -16093,8 +16101,6 @@ def test_query_doesnt_pickup_local(self):
n = m = 10
df = DataFrame(np.random.randint(m, size=(n, 3)), columns=list('abc'))
- from numpy import sin
-
# we don't pick up the local 'sin'
with tm.assertRaises(UndefinedVariableError):
df.query('sin > 5', engine=engine, parser=parser)
@@ -16392,7 +16398,6 @@ def setUpClass(cls):
cls.frame = _frame.copy()
def test_query_builtin(self):
- from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
@@ -16413,7 +16418,6 @@ def setUpClass(cls):
cls.frame = _frame.copy()
def test_query_builtin(self):
- from pandas.computation.engines import NumExprClobberingError
engine, parser = self.engine, self.parser
n = m = 10
| Resolves https://github.com/pydata/pandas/issues/11181
Is this testing OK? Or do we need to test with differing lengths of tuples etc?
| https://api.github.com/repos/pandas-dev/pandas/pulls/11416 | 2015-10-23T00:44:21Z | 2015-10-23T20:30:33Z | 2015-10-23T20:30:33Z | 2015-10-24T01:11:48Z |
Bug in merging datetime64[ns, tz] dtypes #11405 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 989b05003d76f..06ab19b6e4a40 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -74,7 +74,7 @@ Bug Fixes
- Bug in ``.to_latex()`` output broken when the index has a name (:issue: `10660`)
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
-
+- Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`)
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index c2c50bce04309..ac3e61a500bb6 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1081,6 +1081,9 @@ def _maybe_promote(dtype, fill_value=np.nan):
fill_value = tslib.iNaT
else:
fill_value = tslib.iNaT
+ elif is_datetimetz(dtype):
+ if isnull(fill_value):
+ fill_value = tslib.iNaT
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
@@ -1107,7 +1110,9 @@ def _maybe_promote(dtype, fill_value=np.nan):
# in case we have a string that looked like a number
if is_categorical_dtype(dtype):
- dtype = dtype
+ pass
+ elif is_datetimetz(dtype):
+ pass
elif issubclass(np.dtype(dtype).type, compat.string_types):
dtype = np.object_
@@ -2497,7 +2502,6 @@ def is_int64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.int64)
-
def is_int_or_datetime_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index f1d82ec1f3b2e..b3e7e82b5feb7 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -4114,7 +4114,7 @@ def _interleaved_dtype(blocks):
if not len(blocks):
return None
- counts = defaultdict(lambda: [])
+ counts = defaultdict(list)
for x in blocks:
counts[type(x)].append(x)
@@ -4482,9 +4482,8 @@ def get_empty_dtype_and_na(join_units):
else:
dtypes[i] = unit.dtype
- # dtypes = set()
- upcast_classes = set()
- null_upcast_classes = set()
+ upcast_classes = defaultdict(list)
+ null_upcast_classes = defaultdict(list)
for dtype, unit in zip(dtypes, join_units):
if dtype is None:
continue
@@ -4508,9 +4507,9 @@ def get_empty_dtype_and_na(join_units):
# are only null blocks, when same upcasting rules must be applied to
# null upcast classes.
if unit.is_null:
- null_upcast_classes.add(upcast_cls)
+ null_upcast_classes[upcast_cls].append(dtype)
else:
- upcast_classes.add(upcast_cls)
+ upcast_classes[upcast_cls].append(dtype)
if not upcast_classes:
upcast_classes = null_upcast_classes
@@ -4528,7 +4527,8 @@ def get_empty_dtype_and_na(join_units):
elif 'float' in upcast_classes:
return np.dtype(np.float64), np.nan
elif 'datetimetz' in upcast_classes:
- return np.dtype('M8[ns]'), tslib.iNaT
+ dtype = upcast_classes['datetimetz']
+ return dtype[0], tslib.iNaT
elif 'datetime' in upcast_classes:
return np.dtype('M8[ns]'), tslib.iNaT
elif 'timedelta' in upcast_classes:
@@ -4788,6 +4788,7 @@ def is_null(self):
return True
def get_reindexed_values(self, empty_dtype, upcasted_na):
+
if upcasted_na is None:
# No upcasting is necessary
fill_value = self.block.fill_value
diff --git a/pandas/tools/merge.py b/pandas/tools/merge.py
index 95c68aaa00b18..722ce439722c9 100644
--- a/pandas/tools/merge.py
+++ b/pandas/tools/merge.py
@@ -220,8 +220,8 @@ def get_result(self):
return result
def _indicator_pre_merge(self, left, right):
-
- columns = left.columns.union(right.columns)
+
+ columns = left.columns.union(right.columns)
for i in ['_left_indicator', '_right_indicator']:
if i in columns:
@@ -232,12 +232,12 @@ def _indicator_pre_merge(self, left, right):
left = left.copy()
right = right.copy()
- left['_left_indicator'] = 1
- left['_left_indicator'] = left['_left_indicator'].astype('int8')
-
- right['_right_indicator'] = 2
- right['_right_indicator'] = right['_right_indicator'].astype('int8')
-
+ left['_left_indicator'] = 1
+ left['_left_indicator'] = left['_left_indicator'].astype('int8')
+
+ right['_right_indicator'] = 2
+ right['_right_indicator'] = right['_right_indicator'].astype('int8')
+
return left, right
def _indicator_post_merge(self, result):
@@ -246,8 +246,8 @@ def _indicator_post_merge(self, result):
result['_right_indicator'] = result['_right_indicator'].fillna(0)
result[self.indicator_name] = Categorical((result['_left_indicator'] + result['_right_indicator']), categories=[1,2,3])
- result[self.indicator_name] = result[self.indicator_name].cat.rename_categories(['left_only', 'right_only', 'both'])
-
+ result[self.indicator_name] = result[self.indicator_name].cat.rename_categories(['left_only', 'right_only', 'both'])
+
result = result.drop(labels=['_left_indicator', '_right_indicator'], axis=1)
return result
@@ -261,7 +261,7 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
continue
if name in result:
- key_col = result[name]
+ key_indexer = result.columns.get_loc(name)
if left_indexer is not None and right_indexer is not None:
@@ -274,9 +274,8 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
continue
right_na_indexer = right_indexer.take(na_indexer)
- key_col.put(
- na_indexer, com.take_1d(self.right_join_keys[i],
- right_na_indexer))
+ result.iloc[na_indexer,key_indexer] = com.take_1d(self.right_join_keys[i],
+ right_na_indexer)
elif name in self.right:
if len(self.right) == 0:
continue
@@ -286,9 +285,8 @@ def _maybe_add_join_keys(self, result, left_indexer, right_indexer):
continue
left_na_indexer = left_indexer.take(na_indexer)
- key_col.put(na_indexer, com.take_1d(self.left_join_keys[i],
- left_na_indexer))
-
+ result.iloc[na_indexer,key_indexer] = com.take_1d(self.left_join_keys[i],
+ left_na_indexer)
elif left_indexer is not None \
and isinstance(self.left_join_keys[i], np.ndarray):
@@ -664,10 +662,13 @@ def _right_outer_join(x, y, max_groups):
def _factorize_keys(lk, rk, sort=True):
+ if com.is_datetime64tz_dtype(lk) and com.is_datetime64tz_dtype(rk):
+ lk = lk.values
+ rk = rk.values
if com.is_int_or_datetime_dtype(lk) and com.is_int_or_datetime_dtype(rk):
klass = _hash.Int64Factorizer
- lk = com._ensure_int64(lk)
- rk = com._ensure_int64(rk)
+ lk = com._ensure_int64(com._values_from_object(lk))
+ rk = com._ensure_int64(com._values_from_object(rk))
else:
klass = _hash.Factorizer
lk = com._ensure_object(lk)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index b555a7dc2b3a1..3a77cfec5fbc3 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -13,6 +13,7 @@
from pandas import compat
from pandas.tseries.index import DatetimeIndex
from pandas.tools.merge import merge, concat, ordered_merge, MergeError
+from pandas import Categorical, Timestamp
from pandas.util.testing import (assert_frame_equal, assert_series_equal,
assert_almost_equal,
makeCustomDataframe as mkdf,
@@ -947,29 +948,53 @@ def test_overlapping_columns_error_message(self):
df2.columns = ['key1', 'foo', 'foo']
self.assertRaises(ValueError, merge, df, df2)
+ def test_merge_on_datetime64tz(self):
+
+ # GH11405
+ left = pd.DataFrame({'key' : pd.date_range('20151010',periods=2,tz='US/Eastern'),
+ 'value' : [1,2]})
+ right = pd.DataFrame({'key' : pd.date_range('20151011',periods=3,tz='US/Eastern'),
+ 'value' : [1,2,3]})
+
+ expected = DataFrame({'key' : pd.date_range('20151010',periods=4,tz='US/Eastern'),
+ 'value_x' : [1,2,np.nan,np.nan],
+ 'value_y' : [np.nan,1,2,3]})
+ result = pd.merge(left, right, on='key', how='outer')
+ assert_frame_equal(result, expected)
+
+ left = pd.DataFrame({'value' : pd.date_range('20151010',periods=2,tz='US/Eastern'),
+ 'key' : [1,2]})
+ right = pd.DataFrame({'value' : pd.date_range('20151011',periods=2,tz='US/Eastern'),
+ 'key' : [2,3]})
+ expected = DataFrame({'value_x' : list(pd.date_range('20151010',periods=2,tz='US/Eastern')) + [pd.NaT],
+ 'value_y' : [pd.NaT] + list(pd.date_range('20151011',periods=2,tz='US/Eastern')),
+ 'key' : [1.,2,3]})
+ result = pd.merge(left, right, on='key', how='outer')
+ assert_frame_equal(result, expected)
+
def test_indicator(self):
# PR #10054. xref #7412 and closes #8790.
- df1 = pd.DataFrame({'col1':[0,1], 'col_left':['a','b'], 'col_conflict':[1,2]})
+ df1 = DataFrame({'col1':[0,1], 'col_left':['a','b'], 'col_conflict':[1,2]})
df1_copy = df1.copy()
- df2 = pd.DataFrame({'col1':[1,2,3,4,5],'col_right':[2,2,2,2,2],
- 'col_conflict':[1,2,3,4,5]})
+ df2 = DataFrame({'col1':[1,2,3,4,5],'col_right':[2,2,2,2,2],
+ 'col_conflict':[1,2,3,4,5]})
df2_copy = df2.copy()
- df_result = pd.DataFrame({'col1':[0,1,2,3,4,5],
+ df_result = DataFrame({'col1':[0,1,2,3,4,5],
'col_conflict_x':[1,2,np.nan,np.nan,np.nan,np.nan],
'col_left':['a','b', np.nan,np.nan,np.nan,np.nan],
'col_conflict_y':[np.nan,1,2,3,4,5],
'col_right':[np.nan, 2,2,2,2,2]},
dtype='float64')
- df_result['_merge'] = pd.Categorical(['left_only','both','right_only',
+ df_result['_merge'] = Categorical(['left_only','both','right_only',
'right_only','right_only','right_only']
, categories=['left_only', 'right_only', 'both'])
df_result = df_result[['col1', 'col_conflict_x', 'col_left',
'col_conflict_y', 'col_right', '_merge' ]]
- test = pd.merge(df1, df2, on='col1', how='outer', indicator=True)
+ test = merge(df1, df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
test = df1.merge(df2, on='col1', how='outer', indicator=True)
assert_frame_equal(test, df_result)
@@ -982,63 +1007,63 @@ def test_indicator(self):
df_result_custom_name = df_result
df_result_custom_name = df_result_custom_name.rename(columns={'_merge':'custom_name'})
- test_custom_name = pd.merge(df1, df2, on='col1', how='outer', indicator='custom_name')
+ test_custom_name = merge(df1, df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
test_custom_name = df1.merge(df2, on='col1', how='outer', indicator='custom_name')
assert_frame_equal(test_custom_name, df_result_custom_name)
# Check only accepts strings and booleans
with tm.assertRaises(ValueError):
- pd.merge(df1, df2, on='col1', how='outer', indicator=5)
+ merge(df1, df2, on='col1', how='outer', indicator=5)
with tm.assertRaises(ValueError):
df1.merge(df2, on='col1', how='outer', indicator=5)
# Check result integrity
- test2 = pd.merge(df1, df2, on='col1', how='left', indicator=True)
+ test2 = merge(df1, df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
test2 = df1.merge(df2, on='col1', how='left', indicator=True)
self.assertTrue((test2._merge != 'right_only').all())
- test3 = pd.merge(df1, df2, on='col1', how='right', indicator=True)
+ test3 = merge(df1, df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
test3 = df1.merge(df2, on='col1', how='right', indicator=True)
self.assertTrue((test3._merge != 'left_only').all())
- test4 = pd.merge(df1, df2, on='col1', how='inner', indicator=True)
+ test4 = merge(df1, df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
test4 = df1.merge(df2, on='col1', how='inner', indicator=True)
self.assertTrue((test4._merge == 'both').all())
# Check if working name in df
for i in ['_right_indicator', '_left_indicator', '_merge']:
- df_badcolumn = pd.DataFrame({'col1':[1,2], i:[2,2]})
+ df_badcolumn = DataFrame({'col1':[1,2], i:[2,2]})
with tm.assertRaises(ValueError):
- pd.merge(df1, df_badcolumn, on='col1', how='outer', indicator=True)
+ merge(df1, df_badcolumn, on='col1', how='outer', indicator=True)
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator=True)
# Check for name conflict with custom name
- df_badcolumn = pd.DataFrame({'col1':[1,2], 'custom_column_name':[2,2]})
+ df_badcolumn = DataFrame({'col1':[1,2], 'custom_column_name':[2,2]})
with tm.assertRaises(ValueError):
- pd.merge(df1, df_badcolumn, on='col1', how='outer', indicator='custom_column_name')
+ merge(df1, df_badcolumn, on='col1', how='outer', indicator='custom_column_name')
with tm.assertRaises(ValueError):
df1.merge(df_badcolumn, on='col1', how='outer', indicator='custom_column_name')
# Merge on multiple columns
- df3 = pd.DataFrame({'col1':[0,1], 'col2':['a','b']})
+ df3 = DataFrame({'col1':[0,1], 'col2':['a','b']})
- df4 = pd.DataFrame({'col1':[1,1,3], 'col2':['b','x','y']})
+ df4 = DataFrame({'col1':[1,1,3], 'col2':['b','x','y']})
- hand_coded_result = pd.DataFrame({'col1':[0,1,1,3.0],
+ hand_coded_result = DataFrame({'col1':[0,1,1,3.0],
'col2':['a','b','x','y']})
- hand_coded_result['_merge'] = pd.Categorical(
+ hand_coded_result['_merge'] = Categorical(
['left_only','both','right_only','right_only']
, categories=['left_only', 'right_only', 'both'])
- test5 = pd.merge(df3, df4, on=['col1', 'col2'], how='outer', indicator=True)
+ test5 = merge(df3, df4, on=['col1', 'col2'], how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
test5 = df3.merge(df4, on=['col1', 'col2'], how='outer', indicator=True)
assert_frame_equal(test5, hand_coded_result)
@@ -1464,18 +1489,18 @@ def test_int64_overflow_issues(self):
columns=list('ABCDEFG'))
# confirm that this is checking what it is supposed to check
- shape = left.apply(pd.Series.nunique).values
+ shape = left.apply(Series.nunique).values
self.assertTrue(_int64_overflow_possible(shape))
# add duplicates to left frame
- left = pd.concat([left, left], ignore_index=True)
+ left = concat([left, left], ignore_index=True)
right = DataFrame(np.random.randint(low, high, (n // 2, 7)).astype('int64'),
columns=list('ABCDEFG'))
# add duplicates & overlap with left to the right frame
i = np.random.choice(len(left), n)
- right = pd.concat([right, right, left.iloc[i]], ignore_index=True)
+ right = concat([right, right, left.iloc[i]], ignore_index=True)
left['left'] = np.random.randn(len(left))
right['right'] = np.random.randn(len(right))
@@ -1980,19 +2005,19 @@ def test_concat_dataframe_keys_bug(self):
def test_concat_series_partial_columns_names(self):
# GH10698
- foo = pd.Series([1,2], name='foo')
- bar = pd.Series([1,2])
- baz = pd.Series([4,5])
+ foo = Series([1,2], name='foo')
+ bar = Series([1,2])
+ baz = Series([4,5])
- result = pd.concat([foo, bar, baz], axis=1)
+ result = concat([foo, bar, baz], axis=1)
expected = DataFrame({'foo' : [1,2], 0 : [1,2], 1 : [4,5]}, columns=['foo',0,1])
tm.assert_frame_equal(result, expected)
- result = pd.concat([foo, bar, baz], axis=1, keys=['red','blue','yellow'])
+ result = concat([foo, bar, baz], axis=1, keys=['red','blue','yellow'])
expected = DataFrame({'red' : [1,2], 'blue' : [1,2], 'yellow' : [4,5]}, columns=['red','blue','yellow'])
tm.assert_frame_equal(result, expected)
- result = pd.concat([foo, bar, baz], axis=1, ignore_index=True)
+ result = concat([foo, bar, baz], axis=1, ignore_index=True)
expected = DataFrame({0 : [1,2], 1 : [1,2], 2 : [4,5]})
tm.assert_frame_equal(result, expected)
@@ -2059,13 +2084,13 @@ def test_concat_multiindex_with_tz(self):
datetime(2014, 1, 3)],
'b': ['A', 'B', 'C'],
'c': [1, 2, 3], 'd': [4, 5, 6]})
- df['dt'] = df['dt'].apply(lambda d: pd.Timestamp(d, tz='US/Pacific'))
+ df['dt'] = df['dt'].apply(lambda d: Timestamp(d, tz='US/Pacific'))
df = df.set_index(['dt', 'b'])
- exp_idx1 = pd.DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'] * 2,
+ exp_idx1 = DatetimeIndex(['2014-01-01', '2014-01-02', '2014-01-03'] * 2,
tz='US/Pacific', name='dt')
exp_idx2 = Index(['A', 'B', 'C'] * 2, name='b')
- exp_idx = pd.MultiIndex.from_arrays([exp_idx1, exp_idx2])
+ exp_idx = MultiIndex.from_arrays([exp_idx1, exp_idx2])
expected = DataFrame({'c': [1, 2, 3] * 2, 'd': [4, 5, 6] * 2},
index=exp_idx, columns=['c', 'd'])
@@ -2640,10 +2665,10 @@ def test_concat_iterables(self):
df1 = DataFrame([1, 2, 3])
df2 = DataFrame([4, 5, 6])
expected = DataFrame([1, 2, 3, 4, 5, 6])
- assert_frame_equal(pd.concat((df1, df2), ignore_index=True), expected)
- assert_frame_equal(pd.concat([df1, df2], ignore_index=True), expected)
- assert_frame_equal(pd.concat((df for df in (df1, df2)), ignore_index=True), expected)
- assert_frame_equal(pd.concat(deque((df1, df2)), ignore_index=True), expected)
+ assert_frame_equal(concat((df1, df2), ignore_index=True), expected)
+ assert_frame_equal(concat([df1, df2], ignore_index=True), expected)
+ assert_frame_equal(concat((df for df in (df1, df2)), ignore_index=True), expected)
+ assert_frame_equal(concat(deque((df1, df2)), ignore_index=True), expected)
class CustomIterator1(object):
def __len__(self):
return 2
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 2f4858300293e..50137493e6b01 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -180,7 +180,7 @@ def sort_values(self, return_indexer=False, ascending=True):
return self._simple_new(sorted_values, **attribs)
- def take(self, indices, axis=0, **kwargs):
+ def take(self, indices, axis=0, allow_fill=True, fill_value=None):
"""
Analogous to ndarray.take
"""
@@ -189,6 +189,12 @@ def take(self, indices, axis=0, **kwargs):
if isinstance(maybe_slice, slice):
return self[maybe_slice]
taken = self.asi8.take(com._ensure_platform_int(indices))
+
+ # only fill if we are passing a non-None fill_value
+ if allow_fill and fill_value is not None:
+ mask = indices == -1
+ if mask.any():
+ taken[mask] = tslib.iNaT
return self._shallow_copy(taken, freq=None)
def get_duplicates(self):
| closes #11405
| https://api.github.com/repos/pandas-dev/pandas/pulls/11410 | 2015-10-22T12:19:57Z | 2015-10-23T16:43:22Z | 2015-10-23T16:43:22Z | 2017-03-24T17:23:46Z |
CLN: period boundscheck warnings | diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index b431bb58bc991..cfc50afc8f9f3 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -20,6 +20,7 @@ from pandas import compat
from pandas.tseries import offsets
from pandas.tseries.tools import parse_time_string
+cimport cython
from datetime cimport *
cimport util
cimport lib
@@ -124,6 +125,8 @@ cdef inline int64_t remove_mult(int64_t period_ord_w_mult, int64_t mult):
return period_ord_w_mult * mult + 1;
[email protected](False)
[email protected](False)
def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
"""
Convert array of datetime64 values (passed in as 'i8' dtype) to a set of
@@ -151,6 +154,8 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
out = localize_dt64arr_to_period(dtarr, freq, tz)
return out
[email protected](False)
[email protected](False)
def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
"""
Convert array to datetime64 values from a set of ordinals corresponding to
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 8e6d4019c69a3..2fa406f880cdd 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -4431,7 +4431,7 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
Py_ssize_t i
pandas_datetimestruct dts
int count = len(dtindex)
- cdef int days_in_current_month
+ int days_in_current_month
int64_t[:] out = np.empty(count, dtype='int64')
if day is None:
| follow up to fix warnings in https://github.com/pydata/pandas/pull/11263#issuecomment-149641296
| https://api.github.com/repos/pandas-dev/pandas/pulls/11404 | 2015-10-21T22:50:07Z | 2015-10-24T02:02:51Z | 2015-10-24T02:02:51Z | 2015-10-25T14:28:02Z |
BUG: drop_duplicates drops non-duplicate rows in the presence of integer columns | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 2bb9920b6f177..70226ca302a60 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -92,7 +92,7 @@ Bug Fixes
- Bug in ``pivot_table`` with ``margins=True`` when indexes are of ``Categorical`` dtype (:issue:`10993`)
- Bug in ``DataFrame.plot`` cannot use hex strings colors (:issue:`10299`)
-
+- Bug in ``DataFrame.drop_duplicates`` (regression from 0.16.2) causing some non-duplicate rows containing integer values to be dropped (:issue:`11376`)
- Bug in ``pd.eval`` where unary ops in a list error (:issue:`11235`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 31b7aacefcb60..4774fc4f17a91 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -2994,13 +2994,7 @@ def duplicated(self, subset=None, keep='first'):
from pandas.hashtable import duplicated_int64, _SIZE_HINT_LIMIT
def f(vals):
-
- # if we have integers we can directly index with these
- if com.is_integer_dtype(vals):
- from pandas.core.nanops import unique1d
- labels, shape = vals, unique1d(vals)
- else:
- labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
+ labels, shape = factorize(vals, size_hint=min(len(self), _SIZE_HINT_LIMIT))
return labels.astype('i8',copy=False), len(shape)
if subset is None:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5c7f1ec9e0037..dfbd21997568d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -8380,6 +8380,25 @@ def test_drop_duplicates(self):
expected = df.iloc[[-2,-1]]
assert_frame_equal(result, expected)
+ # GH 11376
+ df = pd.DataFrame({'x': [7, 6, 3, 3, 4, 8, 0],
+ 'y': [0, 6, 5, 5, 9, 1, 2]})
+ expected = df.loc[df.index != 3]
+ assert_frame_equal(df.drop_duplicates(), expected)
+
+ df = pd.DataFrame([[1 , 0], [0, 2]])
+ assert_frame_equal(df.drop_duplicates(), df)
+
+ df = pd.DataFrame([[-2, 0], [0, -4]])
+ assert_frame_equal(df.drop_duplicates(), df)
+
+ x = np.iinfo(np.int64).max / 3 * 2
+ df = pd.DataFrame([[-x, x], [0, x + 4]])
+ assert_frame_equal(df.drop_duplicates(), df)
+
+ df = pd.DataFrame([[-x, x], [x, x + 4]])
+ assert_frame_equal(df.drop_duplicates(), df)
+
def test_drop_duplicates_for_take_all(self):
df = DataFrame({'AAA': ['foo', 'bar', 'baz', 'bar',
'foo', 'bar', 'qux', 'foo'],
| Fixes GH #11376
| https://api.github.com/repos/pandas-dev/pandas/pulls/11403 | 2015-10-21T21:39:13Z | 2015-10-24T00:18:23Z | 2015-10-24T00:18:23Z | 2015-10-24T00:47:51Z |
ENH: Improve the error message in to_gbq when schema does not match #11359 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index bdfbf08b37e57..989b05003d76f 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -30,6 +30,7 @@ Other Enhancements
- ``pd.read_*`` functions can now also accept :class:`python:pathlib.Path`, or :class:`py:py._path.local.LocalPath`
objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
+- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
.. _whatsnew_0171.api:
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index e7241036b94c4..fff36a82529e3 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -511,7 +511,8 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
connector.delete_and_recreate_table(dataset_id, table_id, table_schema, verbose)
elif if_exists == 'append':
if not connector.verify_schema(dataset_id, table_id, table_schema):
- raise InvalidSchema("The schema of the destination table does not match")
+ raise InvalidSchema("Please verify that the column order, structure and data types in the DataFrame "
+ "match the schema of the destination table.")
else:
table.create(table_id, table_schema)
| See the discussion in #11359
| https://api.github.com/repos/pandas-dev/pandas/pulls/11401 | 2015-10-21T11:19:10Z | 2015-10-21T15:28:55Z | 2015-10-21T15:28:55Z | 2015-10-21T15:30:48Z |
BUG: using .ix with a multi-index indexer | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index cd3c3848523f0..eafd4973b5253 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -70,7 +70,7 @@ Bug Fixes
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
- Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`)
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-
+- Bug in using ``DataFrame.ix`` with a multi-index indexer(:issue:`11372`)
- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 5eb25a53d4533..0f3795fcad0c3 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -443,11 +443,14 @@ def can_do_equal_len():
# we have an equal len Frame
if isinstance(value, ABCDataFrame) and value.ndim > 1:
sub_indexer = list(indexer)
+ multiindex_indexer = isinstance(labels, MultiIndex)
for item in labels:
if item in value:
sub_indexer[info_axis] = item
- v = self._align_series(tuple(sub_indexer), value[item])
+ v = self._align_series(
+ tuple(sub_indexer), value[item], multiindex_indexer
+ )
else:
v = np.nan
@@ -516,8 +519,28 @@ def can_do_equal_len():
self.obj._data = self.obj._data.setitem(indexer=indexer, value=value)
self.obj._maybe_update_cacher(clear=True)
- def _align_series(self, indexer, ser):
- # indexer to assign Series can be tuple, slice, scalar
+ def _align_series(self, indexer, ser, multiindex_indexer=False):
+ """
+ Parameters
+ ----------
+ indexer : tuple, slice, scalar
+ The indexer used to get the locations that will be set to
+ `ser`
+
+ ser : pd.Series
+ The values to assign to the locations specified by `indexer`
+
+ multiindex_indexer : boolean, optional
+ Defaults to False. Should be set to True if `indexer` was from
+ a `pd.MultiIndex`, to avoid unnecessary broadcasting.
+
+
+ Returns:
+ --------
+ `np.array` of `ser` broadcast to the appropriate shape for assignment
+ to the locations selected by `indexer`
+
+ """
if isinstance(indexer, (slice, np.ndarray, list, Index)):
indexer = tuple([indexer])
@@ -555,7 +578,7 @@ def _align_series(self, indexer, ser):
ser = ser.reindex(obj.axes[0][indexer[0]], copy=True)._values
# single indexer
- if len(indexer) > 1:
+ if len(indexer) > 1 and not multiindex_indexer:
l = len(indexer[1])
ser = np.tile(ser, l).reshape(l, -1).T
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index a2d789aaf8b70..36e825924995a 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -762,32 +762,95 @@ def compare(result, expected):
result2 = s.loc[0:3]
assert_series_equal(result1,result2)
- def test_loc_setitem_multiindex(self):
+ def test_setitem_multiindex(self):
+ for index_fn in ('ix', 'loc'):
+ def check(target, indexers, value, compare_fn, expected=None):
+ fn = getattr(target, index_fn)
+ fn.__setitem__(indexers, value)
+ result = fn.__getitem__(indexers)
+ if expected is None:
+ expected = value
+ compare_fn(result, expected)
+ # GH7190
+ index = pd.MultiIndex.from_product([np.arange(0,100), np.arange(0, 80)], names=['time', 'firm'])
+ t, n = 0, 2
+ df = DataFrame(np.nan,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
+ check(
+ target=df, indexers=((t,n), 'X'),
+ value=0, compare_fn=self.assertEqual
+ )
- # GH7190
- index = pd.MultiIndex.from_product([np.arange(0,100), np.arange(0, 80)], names=['time', 'firm'])
- t, n = 0, 2
+ df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
+ check(
+ target=df, indexers=((t,n), 'X'),
+ value=1, compare_fn=self.assertEqual
+ )
- df = DataFrame(np.nan,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
- df.loc[(t,n),'X'] = 0
- result = df.loc[(t,n),'X']
- self.assertEqual(result, 0)
+ df = DataFrame(columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
+ check(
+ target=df, indexers=((t,n), 'X'),
+ value=2, compare_fn=self.assertEqual
+ )
- df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
- df.loc[(t,n),'X'] = 1
- result = df.loc[(t,n),'X']
- self.assertEqual(result, 1)
+ # GH 7218, assinging with 0-dim arrays
+ df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
+ check(
+ target=df, indexers=((t,n), 'X'),
+ value=np.array(3), compare_fn=self.assertEqual,
+ expected=3,
+ )
- df = DataFrame(columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
- df.loc[(t,n),'X'] = 2
- result = df.loc[(t,n),'X']
- self.assertEqual(result, 2)
+ # GH5206
+ df = pd.DataFrame(
+ np.arange(25).reshape(5, 5), columns='A,B,C,D,E'.split(','),
+ dtype=float
+ )
+ df['F'] = 99
+ row_selection = df['A'] % 2 == 0
+ col_selection = ['B', 'C']
+ df.ix[row_selection, col_selection] = df['F']
+ output = pd.DataFrame(99., index=[0, 2, 4], columns=['B', 'C'])
+ assert_frame_equal(df.ix[row_selection, col_selection], output)
+ check(
+ target=df, indexers=(row_selection, col_selection),
+ value=df['F'], compare_fn=assert_frame_equal,
+ expected=output,
+ )
- # GH 7218, assinging with 0-dim arrays
- df = DataFrame(-999,columns=['A', 'w', 'l', 'a', 'x', 'X', 'd', 'profit'], index=index)
- df.loc[(t,n), 'X'] = np.array(3)
- result = df.loc[(t,n),'X']
- self.assertEqual(result,3)
+ # GH11372
+ idx = pd.MultiIndex.from_product([
+ ['A', 'B', 'C'],
+ pd.date_range('2015-01-01', '2015-04-01', freq='MS')
+ ])
+ cols = pd.MultiIndex.from_product([
+ ['foo', 'bar'],
+ pd.date_range('2016-01-01', '2016-02-01', freq='MS')
+ ])
+ df = pd.DataFrame(np.random.random((12, 4)), index=idx, columns=cols)
+ subidx = pd.MultiIndex.from_tuples(
+ [('A', pd.Timestamp('2015-01-01')), ('A', pd.Timestamp('2015-02-01'))]
+ )
+ subcols = pd.MultiIndex.from_tuples(
+ [('foo', pd.Timestamp('2016-01-01')), ('foo', pd.Timestamp('2016-02-01'))]
+ )
+ vals = pd.DataFrame(np.random.random((2, 2)), index=subidx, columns=subcols)
+ check(
+ target=df, indexers=(subidx, subcols),
+ value=vals, compare_fn=assert_frame_equal,
+ )
+ # set all columns
+ vals = pd.DataFrame(np.random.random((2, 4)), index=subidx, columns=cols)
+ check(
+ target=df, indexers=(subidx, slice(None, None, None)),
+ value=vals, compare_fn=assert_frame_equal,
+ )
+ # identity
+ copy = df.copy()
+ check(
+ target=df, indexers=(df.index, df.columns),
+ value=df, compare_fn=assert_frame_equal,
+ expected=copy
+ )
def test_indexing_with_datetime_tz(self):
| closes #11372
Add an optional argument to _NDFrameIndexer to indicate
if the indexer is from a MultiIndex
| https://api.github.com/repos/pandas-dev/pandas/pulls/11400 | 2015-10-21T08:40:16Z | 2015-10-27T11:17:08Z | 2015-10-27T11:17:08Z | 2015-10-27T13:25:20Z |
ENC: better pandas typesetting in ipython nbconvert --to latex (updated) | diff --git a/doc/source/whatsnew/v0.16.1.txt b/doc/source/whatsnew/v0.16.1.txt
index f691b0842f071..31c61e5c36203 100644
--- a/doc/source/whatsnew/v0.16.1.txt
+++ b/doc/source/whatsnew/v0.16.1.txt
@@ -21,6 +21,7 @@ Enhancements
- ``DataFrame.mask()`` and ``Series.mask()`` now support same keywords as ``where`` (:issue:`8801`)
+- Added support for rendering DataFrames correctly when converting Jupyter/IPython notebooks to LaTeX (:issue:`9821`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index f700d4316842c..8c6ce6ee2641f 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1428,7 +1428,15 @@ def to_latex(self, buf=None, columns=None, col_space=None, colSpace=None,
if buf is None:
return formatter.buf.getvalue()
-
+
+ def _repr_latex_(self):
+ """
+ Used for rendering a DataFrame when converting a Jupyter/IPython notebook to LaTeX.
+ For other uses refer to the `to_latex` function.
+ See https://github.com/pydata/pandas/pull/9821 for aditional information.
+ """
+ return self.to_latex()
+
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None, null_counts=None):
"""
Concise summary of a DataFrame.
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 3e4c16f63035f..633765fc552e1 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4752,6 +4752,14 @@ def test_repr(self):
self.assertFalse("\t" in repr(df))
self.assertFalse("\r" in repr(df))
self.assertFalse("a\n" in repr(df))
+
+ def test_repr_latex(self):
+ # #9821
+
+ df = DataFrame({'A': [1., 2., 3.],
+ 'B': ['a', 'b', 'c']},
+ index=np.arange(3))
+ self.assertEqual(df._repl_latex_(), df.to_latex())
def test_repr_dimensions(self):
df = DataFrame([[1, 2,], [3, 4]])
| New pull request with tests and comments. See https://github.com/pydata/pandas/pull/9821 for original PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11399 | 2015-10-21T07:16:58Z | 2015-11-13T15:15:33Z | null | 2015-11-14T19:34:36Z |
BLD: add CFLAGS `-fgnu89-inline` on FreeBSD 10+ | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index bdfbf08b37e57..ea26fdd90c4da 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -115,3 +115,7 @@ Bug Fixes
- Fixed a bug that prevented the construction of an empty series of dtype
``datetime64[ns, tz]`` (:issue:`11245`).
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
+
+
+
+- Bug in the link-time error caused by C ``inline`` functions on FreeBSD 10+ (with ``clang``) (:issue:`10510`)
diff --git a/setup.py b/setup.py
index 2d1b9374f6c94..cf940cb9e27c4 100755
--- a/setup.py
+++ b/setup.py
@@ -11,6 +11,7 @@
import shutil
import warnings
import re
+import platform
from distutils.version import LooseVersion
# versioning
@@ -289,7 +290,10 @@ def run(self):
class CheckingBuildExt(build_ext):
- """Subclass build_ext to get clearer report if Cython is necessary."""
+ """
+ Subclass build_ext to get clearer report if Cython is necessary.
+ Also, add some platform based compiler flags.
+ """
def check_cython_extensions(self, extensions):
for ext in extensions:
@@ -302,8 +306,27 @@ def check_cython_extensions(self, extensions):
def build_extensions(self):
self.check_cython_extensions(self.extensions)
+ self.add_gnu_inline_flag(self.extensions)
build_ext.build_extensions(self)
+ def add_gnu_inline_flag(self, extensions):
+ '''
+ Add CFLAGS `-fgnu89-inline` for clang on FreeBSD 10+
+ '''
+ if not platform.system() == 'FreeBSD':
+ return
+
+ try:
+ bsd_release = float(platform.release().split('-')[0])
+ except ValueError: # unknow freebsd version
+ return
+
+ if bsd_release < 10: # 9 or earlier still using gcc42
+ return
+
+ for ext in extensions:
+ ext.extra_compile_args += ['-fgnu89-inline']
+
class CythonCommand(build_ext):
"""Custom distutils command subclassed from Cython.Distutils.build_ext
| closes #10510
| https://api.github.com/repos/pandas-dev/pandas/pulls/11398 | 2015-10-21T05:08:15Z | 2015-11-10T12:12:36Z | 2015-11-10T12:12:36Z | 2015-11-10T12:13:06Z |
Revert "BLD: conda" | diff --git a/ci/install_conda.sh b/ci/install_conda.sh
index 204dba58641c4..8d99034a86109 100755
--- a/ci/install_conda.sh
+++ b/ci/install_conda.sh
@@ -71,7 +71,7 @@ wget http://repo.continuum.io/miniconda/Miniconda-latest-Linux-x86_64.sh -O mini
bash miniconda.sh -b -p $HOME/miniconda || exit 1
conda config --set always_yes yes --set changeps1 no || exit 1
-#conda update -q conda || exit 1
+conda update -q conda || exit 1
conda config --add channels conda-forge || exit 1
conda config --add channels http://conda.binstar.org/pandas || exit 1
conda config --set ssl_verify false || exit 1
@@ -84,9 +84,7 @@ REQ="ci/requirements-${TRAVIS_PYTHON_VERSION}${JOB_TAG}.build"
time conda create -n pandas python=$TRAVIS_PYTHON_VERSION nose || exit 1
time conda install -n pandas --file=${REQ} || exit 1
-echo "activating pandas env: start"
source activate pandas
-echo "activating pandas env: done"
# set the compiler cache to work
if [ "$IRON_TOKEN" ]; then
@@ -108,7 +106,6 @@ if [ "$BUILD_TEST" ]; then
else
# build but don't install
- echo "starting build"
time python setup.py build_ext --inplace || exit 1
# we may have run installations
| This reverts commit 0c8a8e1372aef137ec71f0ba9c7af58cb4a2a8ac.
closes #11394
| https://api.github.com/repos/pandas-dev/pandas/pulls/11397 | 2015-10-21T02:33:09Z | 2015-10-21T10:15:06Z | 2015-10-21T10:15:06Z | 2015-10-21T10:15:06Z |
TST: remove invalid symbol warnings | diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index ad0e05f91d184..29970aef760f2 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -103,13 +103,15 @@ def test_get_multi1(self):
@network
def test_get_multi_invalid(self):
sl = ['AAPL', 'AMZN', 'INVALID']
- pan = web.get_data_google(sl, '2012')
- self.assertIn('INVALID', pan.minor_axis)
+ with tm.assert_produces_warning(SymbolWarning):
+ pan = web.get_data_google(sl, '2012')
+ self.assertIn('INVALID', pan.minor_axis)
@network
def test_get_multi_all_invalid(self):
sl = ['INVALID', 'INVALID2', 'INVALID3']
- self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012')
+ with tm.assert_produces_warning(SymbolWarning):
+ self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012')
@network
def test_get_multi2(self):
| https://api.github.com/repos/pandas-dev/pandas/pulls/11396 | 2015-10-21T02:09:17Z | 2015-10-21T10:15:22Z | 2015-10-21T10:15:22Z | 2015-10-21T10:15:22Z |
|
BUG: GH11344 in pandas.json when file to read is big | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 989b05003d76f..3b2ec3837cdd8 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -111,7 +111,7 @@ Bug Fixes
- Bug in ``DataFrame.to_latex()`` produces an extra rule when ``header=False`` (:issue:`7124`)
-
+- Bug in ``pandas.json`` when file to load is big (:issue:`11344`)
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
- Fixed a bug that prevented the construction of an empty series of dtype
``datetime64[ns, tz]`` (:issue:`11245`).
diff --git a/pandas/src/ujson/lib/ultrajsondec.c b/pandas/src/ujson/lib/ultrajsondec.c
index 9c2bb21612745..3e316eb26e6e1 100644
--- a/pandas/src/ujson/lib/ultrajsondec.c
+++ b/pandas/src/ujson/lib/ultrajsondec.c
@@ -443,7 +443,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds)
if (ds->escHeap)
{
- if (newSize > (UINT_MAX / sizeof(wchar_t)))
+ if (newSize > (SIZE_MAX / sizeof(wchar_t)))
{
return SetError(ds, -1, "Could not reserve memory block");
}
@@ -458,8 +458,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds)
else
{
wchar_t *oldStart = ds->escStart;
- ds->escHeap = 1;
- if (newSize > (UINT_MAX / sizeof(wchar_t)))
+ if (newSize > (SIZE_MAX / sizeof(wchar_t)))
{
return SetError(ds, -1, "Could not reserve memory block");
}
@@ -468,6 +467,7 @@ FASTCALL_ATTR JSOBJ FASTCALL_MSVC decode_string ( struct DecoderState *ds)
{
return SetError(ds, -1, "Could not reserve memory block");
}
+ ds->escHeap = 1;
memcpy(ds->escStart, oldStart, escLen * sizeof(wchar_t));
}
| closes #11344
taken from esnme/ultrajson#145
test code
```
import json
import pandas
from pandas.compat import zip, range
SIZE = 5*10**7
FILENAME = 'generated.json'
with open(FILENAME, 'w') as fileh:
json.dump(dict(zip(range(SIZE), range(SIZE))), fileh)
with open(FILENAME) as fileh:
pandas.json.load(fileh)
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11393 | 2015-10-21T01:08:58Z | 2015-10-23T16:57:20Z | 2015-10-23T16:57:20Z | 2015-10-23T16:57:37Z |
TST: remove incomparable warnings from py3 | diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 7474c0d118612..52c8a0d15f7a2 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -774,7 +774,6 @@ def check_basic_frame_alignment(self, engine, parser):
args = product(self.lhs_index_types, self.index_types,
self.index_types)
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for lr_idx_type, rr_idx_type, c_idx_type in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=lr_idx_type,
c_idx_type=c_idx_type)
@@ -816,7 +815,6 @@ def check_medium_complex_frame_alignment(self, engine, parser):
self.index_types, self.index_types)
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for r1, c1, r2, c2 in args:
df = mkdf(3, 2, data_gen_f=f, r_idx_type=r1, c_idx_type=c1)
@@ -859,7 +857,6 @@ def testit(r_idx_type, c_idx_type, index_name):
args = product(self.lhs_index_types, self.index_types,
('index', 'columns'))
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
@@ -890,12 +887,12 @@ def testit(r_idx_type, c_idx_type, index_name):
# only test dt with dt, otherwise weird joins result
args = product(['i', 'u', 's'], ['i', 'u', 's'], ('index', 'columns'))
for r_idx_type, c_idx_type, index_name in args:
- testit(r_idx_type, c_idx_type, index_name)
+ with warnings.catch_warnings(record=True):
+ testit(r_idx_type, c_idx_type, index_name)
# dt with dt
args = product(['dt'], ['dt'], ('index', 'columns'))
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, index_name in args:
testit(r_idx_type, c_idx_type, index_name)
@@ -909,7 +906,6 @@ def check_series_frame_commutativity(self, engine, parser):
('index', 'columns'))
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for r_idx_type, c_idx_type, op, index_name in args:
df = mkdf(10, 10, data_gen_f=f, r_idx_type=r_idx_type,
c_idx_type=c_idx_type)
@@ -946,7 +942,6 @@ def check_complex_series_frame_alignment(self, engine, parser):
m2 = 2 * m1
with warnings.catch_warnings(record=True):
- warnings.simplefilter('always', RuntimeWarning)
for r1, r2, c1, c2 in args:
index_name = random.choice(['index', 'columns'])
obj_name = random.choice(['df', 'df2'])
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 1b57d53a548f3..6f7e54804573d 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -8082,7 +8082,9 @@ def test_corrwith(self):
b = b.reindex(columns=b.columns[::-1], index=b.index[::-1][10:])
del b['B']
- colcorr = a.corrwith(b, axis=0)
+ # strings/datetimes
+ with tm.assert_produces_warning(RuntimeWarning):
+ colcorr = a.corrwith(b, axis=0)
assert_almost_equal(colcorr['A'], a['A'].corr(b['A']))
rowcorr = a.corrwith(b, axis=1)
| null | https://api.github.com/repos/pandas-dev/pandas/pulls/11392 | 2015-10-20T23:23:26Z | 2015-11-25T15:40:26Z | null | 2023-05-11T01:13:13Z |
TST: make a couple of tests slow / remove some warnings | diff --git a/ci/requirements-2.7_SLOW.pip b/ci/requirements-2.7_SLOW.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
index 5525b34951524..a6f9c9ed9467f 100644
--- a/pandas/io/ga.py
+++ b/pandas/io/ga.py
@@ -4,13 +4,6 @@
3. Goto APIs and register for OAuth2.0 for installed applications
4. Download JSON secret file and move into same directory as this file
"""
-
-# GH11038
-import warnings
-warnings.warn("The pandas.io.ga module is deprecated and will be "
- "removed in a future version.",
- FutureWarning, stacklevel=2)
-
from datetime import datetime
import re
from pandas import compat
@@ -27,6 +20,12 @@
from oauth2client.client import AccessTokenRefreshError
from pandas.compat import zip, u
+# GH11038
+import warnings
+warnings.warn("The pandas.io.ga module is deprecated and will be "
+ "removed in a future version.",
+ FutureWarning, stacklevel=2)
+
TYPE_MAP = {u('INTEGER'): int, u('FLOAT'): float, u('TIME'): int}
NO_CALLBACK = auth.OOB_CALLBACK_URN
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index 29970aef760f2..afc61dc42f569 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -9,12 +9,15 @@
import numpy as np
import pandas as pd
from pandas import DataFrame, Timestamp
-from pandas.io import data as web
-from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ from pandas.io import data as web
+
+from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
+
if compat.PY3:
from urllib.error import HTTPError
else:
@@ -293,6 +296,7 @@ def test_get_date_ret_index(self):
class TestYahooOptions(tm.TestCase):
+
@classmethod
def setUpClass(cls):
super(TestYahooOptions, cls).setUpClass()
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index e7ed83b5708f9..b06216719a016 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -6,6 +6,7 @@
import os
from distutils.version import LooseVersion
+import warnings
import operator
import functools
import nose
@@ -1829,7 +1830,6 @@ def test_column_format(self):
# Applicable to xlsxwriter only.
_skip_if_no_xlsxwriter()
- import warnings
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
index 13d31b43ac39a..965b3441d7405 100644
--- a/pandas/io/tests/test_ga.py
+++ b/pandas/io/tests/test_ga.py
@@ -1,6 +1,7 @@
import os
from datetime import datetime
+import warnings
import nose
import pandas as pd
from pandas import compat
@@ -13,7 +14,12 @@
try:
import httplib2
- import pandas.io.ga as ga
+ import apiclient
+
+ # deprecated
+ with warnings.catch_warnings(record=True):
+ import pandas.io.ga as ga
+
from pandas.io.ga import GAnalytics, read_ga
from pandas.io.auth import AuthenticationConfigError, reset_default_token_store
from pandas.io import auth
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 894b699281c80..3434afc4129c4 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -461,20 +461,21 @@ def test_sparse_frame(self):
def test_sparse_panel(self):
- items = ['x', 'y', 'z']
- p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
- sp = p.to_sparse()
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ items = ['x', 'y', 'z']
+ p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
+ sp = p.to_sparse()
- self._check_roundtrip(sp, tm.assert_panel_equal,
- check_panel_type=True)
+ self._check_roundtrip(sp, tm.assert_panel_equal,
+ check_panel_type=True)
- sp2 = p.to_sparse(kind='integer')
- self._check_roundtrip(sp2, tm.assert_panel_equal,
- check_panel_type=True)
+ sp2 = p.to_sparse(kind='integer')
+ self._check_roundtrip(sp2, tm.assert_panel_equal,
+ check_panel_type=True)
- sp3 = p.to_sparse(fill_value=0)
- self._check_roundtrip(sp3, tm.assert_panel_equal,
- check_panel_type=True)
+ sp3 = p.to_sparse(fill_value=0)
+ self._check_roundtrip(sp3, tm.assert_panel_equal,
+ check_panel_type=True)
class TestCompression(TestPackers):
diff --git a/pandas/io/tests/test_wb.py b/pandas/io/tests/test_wb.py
index 51d6ac02f0f20..ef72ad4964ff2 100644
--- a/pandas/io/tests/test_wb.py
+++ b/pandas/io/tests/test_wb.py
@@ -5,9 +5,11 @@
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
-from pandas.io.wb import search, download, get_countries
import pandas.util.testing as tm
+# deprecated
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ from pandas.io.wb import search, download, get_countries
class TestWB(tm.TestCase):
diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py
index a2e6d08d07b58..4b579e9263742 100644
--- a/pandas/rpy/tests/test_common.py
+++ b/pandas/rpy/tests/test_common.py
@@ -6,6 +6,7 @@
import numpy as np
import unittest
import nose
+import warnings
import pandas.util.testing as tm
try:
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index b765fdb8d67be..f275a34ca90db 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -283,7 +283,15 @@ def __getitem__(self, key):
if com.is_integer(key):
return self._get_val_at(key)
else:
- data_slice = self.values[key]
+ if isinstance(key, SparseArray):
+ key = np.asarray(key)
+ if hasattr(key,'__len__') and len(self) != len(key):
+ indices = self.sp_index
+ if hasattr(indices,'to_int_index'):
+ indices = indices.to_int_index()
+ data_slice = self.values.take(indices.indices)[key]
+ else:
+ data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
@@ -513,7 +521,12 @@ def make_sparse(arr, kind='block', fill_value=nan):
else:
mask = arr != fill_value
- indices = np.arange(length, dtype=np.int32)[mask]
+ length = len(arr)
+ if length != mask.size:
+ # the arr is a SparseArray
+ indices = mask.sp_index.indices
+ else:
+ indices = np.arange(length, dtype=np.int32)[mask]
if kind == 'block':
locs, lens = splib.get_blocks(indices)
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 86c8f5298e0ab..e2ed27156d2b5 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -43,10 +43,6 @@ class TestMoments(Base):
def setUp(self):
self._create_data()
- warnings.simplefilter("ignore", category=FutureWarning)
-
- def tearDown(self):
- warnings.simplefilter("default", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
@@ -890,7 +886,6 @@ def _create_data(self):
def setUp(self):
self._create_data()
- warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
@@ -1516,9 +1511,6 @@ def test_rolling_functions_window_non_shrinkage(self):
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
- # rolling_corr_pairwise is depracated, so the following line should be deleted
- # when rolling_corr_pairwise is removed.
- lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
@@ -1585,9 +1577,6 @@ def test_moment_functions_zero_length(self):
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
- # rolling_corr_pairwise is depracated, so the following line should be deleted
- # when rolling_corr_pairwise is removed.
- lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index 83b76393f30e0..b85f4628ae013 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -3169,6 +3169,7 @@ def test_pie_df_nan(self):
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
+ @slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 8eb641ce8f494..46026a4c887a6 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1655,6 +1655,7 @@ def check_nunique(df, keys):
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
+ @slow
def test_series_groupby_value_counts(self):
from itertools import product
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 90f85b3f4576d..a2d789aaf8b70 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -25,6 +25,7 @@
import pandas.util.testing as tm
from pandas import date_range
+from numpy.testing.decorators import slow
_verbose = False
@@ -1689,74 +1690,71 @@ def test_multiindex_perf_warn(self):
with tm.assert_produces_warning(PerformanceWarning):
_ = df.loc[(0,)]
+ @slow
def test_multiindex_get_loc(self): # GH7724, GH2646
- # ignore the warning here
- warnings.simplefilter('ignore', PerformanceWarning)
+ with warnings.catch_warnings(record=True):
- # test indexing into a multi-index before & past the lexsort depth
- from numpy.random import randint, choice, randn
- cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
+ # test indexing into a multi-index before & past the lexsort depth
+ from numpy.random import randint, choice, randn
+ cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
- def validate(mi, df, key):
- mask = np.ones(len(df)).astype('bool')
+ def validate(mi, df, key):
+ mask = np.ones(len(df)).astype('bool')
- # test for all partials of this key
- for i, k in enumerate(key):
- mask &= df.iloc[:, i] == k
+ # test for all partials of this key
+ for i, k in enumerate(key):
+ mask &= df.iloc[:, i] == k
- if not mask.any():
- self.assertNotIn(key[:i+1], mi.index)
- continue
-
- self.assertIn(key[:i+1], mi.index)
- right = df[mask].copy()
+ if not mask.any():
+ self.assertNotIn(key[:i+1], mi.index)
+ continue
- if i + 1 != len(key): # partial key
- right.drop(cols[:i+1], axis=1, inplace=True)
- right.set_index(cols[i+1:-1], inplace=True)
- assert_frame_equal(mi.loc[key[:i+1]], right)
+ self.assertIn(key[:i+1], mi.index)
+ right = df[mask].copy()
- else: # full key
- right.set_index(cols[:-1], inplace=True)
- if len(right) == 1: # single hit
- right = Series(right['jolia'].values,
- name=right.index[0], index=['jolia'])
- assert_series_equal(mi.loc[key[:i+1]], right)
- else: # multi hit
+ if i + 1 != len(key): # partial key
+ right.drop(cols[:i+1], axis=1, inplace=True)
+ right.set_index(cols[i+1:-1], inplace=True)
assert_frame_equal(mi.loc[key[:i+1]], right)
- def loop(mi, df, keys):
- for key in keys:
- validate(mi, df, key)
-
- n, m = 1000, 50
-
- vals = [randint(0, 10, n), choice(list('abcdefghij'), n),
- choice(pd.date_range('20141009', periods=10).tolist(), n),
- choice(list('ZYXWVUTSRQ'), n), randn(n)]
- vals = list(map(tuple, zip(*vals)))
-
- # bunch of keys for testing
- keys = [randint(0, 11, m), choice(list('abcdefghijk'), m),
- choice(pd.date_range('20141009', periods=11).tolist(), m),
- choice(list('ZYXWVUTSRQP'), m)]
- keys = list(map(tuple, zip(*keys)))
- keys += list(map(lambda t: t[:-1], vals[::n//m]))
-
- # covers both unique index and non-unique index
- df = pd.DataFrame(vals, columns=cols)
- a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
-
- for frame in a, b:
- for i in range(5): # lexsort depth
- df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
- mi = df.set_index(cols[:-1])
- assert not mi.index.lexsort_depth < i
- loop(mi, df, keys)
-
- # restore
- warnings.simplefilter('always', PerformanceWarning)
+ else: # full key
+ right.set_index(cols[:-1], inplace=True)
+ if len(right) == 1: # single hit
+ right = Series(right['jolia'].values,
+ name=right.index[0], index=['jolia'])
+ assert_series_equal(mi.loc[key[:i+1]], right)
+ else: # multi hit
+ assert_frame_equal(mi.loc[key[:i+1]], right)
+
+ def loop(mi, df, keys):
+ for key in keys:
+ validate(mi, df, key)
+
+ n, m = 1000, 50
+
+ vals = [randint(0, 10, n), choice(list('abcdefghij'), n),
+ choice(pd.date_range('20141009', periods=10).tolist(), n),
+ choice(list('ZYXWVUTSRQ'), n), randn(n)]
+ vals = list(map(tuple, zip(*vals)))
+
+ # bunch of keys for testing
+ keys = [randint(0, 11, m), choice(list('abcdefghijk'), m),
+ choice(pd.date_range('20141009', periods=11).tolist(), m),
+ choice(list('ZYXWVUTSRQP'), m)]
+ keys = list(map(tuple, zip(*keys)))
+ keys += list(map(lambda t: t[:-1], vals[::n//m]))
+
+ # covers both unique index and non-unique index
+ df = pd.DataFrame(vals, columns=cols)
+ a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
+
+ for frame in a, b:
+ for i in range(5): # lexsort depth
+ df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
+ mi = df.set_index(cols[:-1])
+ assert not mi.index.lexsort_depth < i
+ loop(mi, df, keys)
def test_series_getitem_multiindex(self):
@@ -4653,6 +4651,7 @@ def test_indexing_dtypes_on_empty(self):
assert_series_equal(df2.loc[:,'a'], df2.iloc[:,0])
assert_series_equal(df2.loc[:,'a'], df2.ix[:,0])
+ @slow
def test_large_dataframe_indexing(self):
#GH10692
result = DataFrame({'x': range(10**6)},dtype='int64')
@@ -4660,6 +4659,7 @@ def test_large_dataframe_indexing(self):
expected = DataFrame({'x': range(10**6 + 1)},dtype='int64')
assert_frame_equal(result, expected)
+ @slow
def test_large_mi_dataframe_indexing(self):
#GH10645
result = MultiIndex.from_arrays([range(10**6), range(10**6)])
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 6d6c289a6dfa6..b9db95fe06a43 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -3,6 +3,7 @@
from functools import partial
+import warnings
import numpy as np
from pandas import Series
from pandas.core.common import isnull, is_integer_dtype
@@ -135,7 +136,7 @@ def _coerce_tds(targ, res):
return targ, res
try:
- if axis != 0 and hasattr(targ, 'shape') and targ.ndim:
+ if axis != 0 and hasattr(targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
@@ -364,10 +365,11 @@ def test_returned_dtype(self):
"return dtype expected from %s is %s, got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
- self.check_funs(nanops.nanmedian, np.median,
- allow_complex=False, allow_str=False, allow_date=False,
- allow_tdelta=True,
- allow_obj='convert')
+ with warnings.catch_warnings(record=True):
+ self.check_funs(nanops.nanmedian, np.median,
+ allow_complex=False, allow_str=False, allow_date=False,
+ allow_tdelta=True,
+ allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var,
diff --git a/pandas/tests/test_rplot.py b/pandas/tests/test_rplot.py
index e79acfcbc58d8..4342417db193b 100644
--- a/pandas/tests/test_rplot.py
+++ b/pandas/tests/test_rplot.py
@@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
from pandas.compat import range
-import pandas.tools.rplot as rplot
import pandas.util.testing as tm
from pandas import read_csv
import os
-
import nose
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 929a72cfd4adc..b555a7dc2b3a1 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -20,6 +20,7 @@
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv
import pandas.algos as algos
import pandas.util.testing as tm
+from numpy.testing.decorators import slow
a_ = np.array
@@ -1410,6 +1411,7 @@ def test_merge_na_keys(self):
tm.assert_frame_equal(result, expected)
+ @slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index d142ffdbad983..a278c4d0f9045 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -2020,6 +2020,7 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always",
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
+
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
| https://api.github.com/repos/pandas-dev/pandas/pulls/11391 | 2015-10-20T23:21:57Z | 2015-10-21T12:23:28Z | 2015-10-21T12:23:28Z | 2015-10-21T12:23:28Z |
|
ENH: str.extractall for several matches | diff --git a/doc/source/api.rst b/doc/source/api.rst
index 3a6b31ceeeece..96bb91b60b8bf 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -526,6 +526,7 @@ strings and apply several methods to it. These can be accessed like
Series.str.encode
Series.str.endswith
Series.str.extract
+ Series.str.extractall
Series.str.find
Series.str.findall
Series.str.get
diff --git a/doc/source/text.rst b/doc/source/text.rst
index d5ca24523695d..13421ae3dfa55 100644
--- a/doc/source/text.rst
+++ b/doc/source/text.rst
@@ -168,28 +168,37 @@ Extracting Substrings
.. _text.extract:
-The method ``extract`` (introduced in version 0.13) accepts `regular expressions
-<https://docs.python.org/2/library/re.html>`__ with match groups. Extracting a
-regular expression with one group returns a Series of strings.
+Extract first match in each subject (extract)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-.. ipython:: python
+.. versionadded:: 0.13.0
+
+.. warning::
+
+ In version 0.18.0, ``extract`` gained the ``expand`` argument. When
+ ``expand=False`` it returns a ``Series``, ``Index``, or
+ ``DataFrame``, depending on the subject and regular expression
+ pattern (same behavior as pre-0.18.0). When ``expand=True`` it
+ always returns a ``DataFrame``, which is more consistent and less
+ confusing from the perspective of a user.
- pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
+The ``extract`` method accepts a `regular expression
+<https://docs.python.org/2/library/re.html>`__ with at least one
+capture group.
-Elements that do not match return ``NaN``. Extracting a regular expression
-with more than one group returns a DataFrame with one column per group.
+Extracting a regular expression with more than one group returns a
+DataFrame with one column per group.
.. ipython:: python
pd.Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
-Elements that do not match return a row filled with ``NaN``.
-Thus, a Series of messy strings can be "converted" into a
-like-indexed Series or DataFrame of cleaned-up or more useful strings,
-without necessitating ``get()`` to access tuples or ``re.match`` objects.
-
-The results dtype always is object, even if no match is found and the result
-only contains ``NaN``.
+Elements that do not match return a row filled with ``NaN``. Thus, a
+Series of messy strings can be "converted" into a like-indexed Series
+or DataFrame of cleaned-up or more useful strings, without
+necessitating ``get()`` to access tuples or ``re.match`` objects. The
+results dtype always is object, even if no match is found and the
+result only contains ``NaN``.
Named groups like
@@ -201,9 +210,109 @@ and optional groups like
.. ipython:: python
- pd.Series(['a1', 'b2', '3']).str.extract('(?P<letter>[ab])?(?P<digit>\d)')
+ pd.Series(['a1', 'b2', '3']).str.extract('([ab])?(\d)')
+
+can also be used. Note that any capture group names in the regular
+expression will be used for column names; otherwise capture group
+numbers will be used.
+
+Extracting a regular expression with one group returns a ``DataFrame``
+with one column if ``expand=True``.
+
+.. ipython:: python
+
+ pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=True)
+
+It returns a Series if ``expand=False``.
+
+.. ipython:: python
+
+ pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=False)
+
+Calling on an ``Index`` with a regex with exactly one capture group
+returns a ``DataFrame`` with one column if ``expand=True``,
+
+.. ipython:: python
+
+ s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"])
+ s
+ s.index.str.extract("(?P<letter>[a-zA-Z])", expand=True)
+
+It returns an ``Index`` if ``expand=False``.
+
+.. ipython:: python
+
+ s.index.str.extract("(?P<letter>[a-zA-Z])", expand=False)
+
+Calling on an ``Index`` with a regex with more than one capture group
+returns a ``DataFrame`` if ``expand=True``.
+
+.. ipython:: python
+
+ s.index.str.extract("(?P<letter>[a-zA-Z])([0-9]+)", expand=True)
+
+It raises ``ValueError`` if ``expand=False``.
+
+.. code-block:: python
+
+ >>> s.index.str.extract("(?P<letter>[a-zA-Z])([0-9]+)", expand=False)
+ ValueError: This pattern contains no groups to capture.
+
+The table below summarizes the behavior of ``extract(expand=False)``
+(input subject in first column, number of groups in regex in
+first row)
+
++--------+---------+------------+
+| | 1 group | >1 group |
++--------+---------+------------+
+| Index | Index | ValueError |
++--------+---------+------------+
+| Series | Series | DataFrame |
++--------+---------+------------+
+
+Extract all matches in each subject (extractall)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+.. _text.extractall:
+
+Unlike ``extract`` (which returns only the first match),
+
+.. ipython:: python
+
+ s = pd.Series(["a1a2", "b1", "c1"], ["A", "B", "C"])
+ s
+ s.str.extract("[ab](?P<digit>\d)")
+
+.. versionadded:: 0.18.0
+
+the ``extractall`` method returns every match. The result of
+``extractall`` is always a ``DataFrame`` with a ``MultiIndex`` on its
+rows. The last level of the ``MultiIndex`` is named ``match`` and
+indicates the order in the subject.
+
+.. ipython:: python
+
+ s.str.extractall("[ab](?P<digit>\d)")
+
+When each subject string in the Series has exactly one match,
+
+.. ipython:: python
+
+ s = pd.Series(['a3', 'b3', 'c2'])
+ s
+ two_groups = '(?P<letter>[a-z])(?P<digit>[0-9])'
+
+then ``extractall(pat).xs(0, level='match')`` gives the same result as
+``extract(pat)``.
+
+.. ipython:: python
+
+ extract_result = s.str.extract(two_groups)
+ extract_result
+ extractall_result = s.str.extractall(two_groups)
+ extractall_result
+ extractall_result.xs(0, level="match")
-can also be used.
Testing for Strings that Match or Contain a Pattern
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -288,7 +397,8 @@ Method Summary
:meth:`~Series.str.endswith`,Equivalent to ``str.endswith(pat)`` for each element
:meth:`~Series.str.findall`,Compute list of all occurrences of pattern/regex for each string
:meth:`~Series.str.match`,"Call ``re.match`` on each element, returning matched groups as list"
- :meth:`~Series.str.extract`,"Call ``re.match`` on each element, as ``match`` does, but return matched groups as strings for convenience."
+ :meth:`~Series.str.extract`,"Call ``re.search`` on each element, returning DataFrame with one row for each element and one column for each regex capture group"
+ :meth:`~Series.str.extractall`,"Call ``re.findall`` on each element, returning DataFrame with one row for each match and one column for each regex capture group"
:meth:`~Series.str.len`,Compute string lengths
:meth:`~Series.str.strip`,Equivalent to ``str.strip``
:meth:`~Series.str.rstrip`,Equivalent to ``str.rstrip``
diff --git a/doc/source/whatsnew/v0.18.0.txt b/doc/source/whatsnew/v0.18.0.txt
index 58b60fb08920a..e461335fbd8a3 100644
--- a/doc/source/whatsnew/v0.18.0.txt
+++ b/doc/source/whatsnew/v0.18.0.txt
@@ -136,6 +136,92 @@ New Behavior:
s.index
s.index.nbytes
+.. _whatsnew_0180.enhancements.extract:
+
+Changes to str.extract
+^^^^^^^^^^^^^^^^^^^^^^
+
+The :ref:`.str.extract <text.extract>` method takes a regular
+expression with capture groups, finds the first match in each subject
+string, and returns the contents of the capture groups
+(:issue:`11386`). In v0.18.0, the ``expand`` argument was added to
+``extract``. When ``expand=False`` it returns a ``Series``, ``Index``,
+or ``DataFrame``, depending on the subject and regular expression
+pattern (same behavior as pre-0.18.0). When ``expand=True`` it always
+returns a ``DataFrame``, which is more consistent and less confusing
+from the perspective of a user. Currently the default is
+``expand=None`` which gives a ``FutureWarning`` and uses
+``expand=False``. To avoid this warning, please explicitly specify
+``expand``.
+
+.. ipython:: python
+
+ pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
+
+Extracting a regular expression with one group returns a ``DataFrame``
+with one column if ``expand=True``.
+
+.. ipython:: python
+
+ pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=True)
+
+It returns a Series if ``expand=False``.
+
+.. ipython:: python
+
+ pd.Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)', expand=False)
+
+Calling on an ``Index`` with a regex with exactly one capture group
+returns a ``DataFrame`` with one column if ``expand=True``,
+
+.. ipython:: python
+
+ s = pd.Series(["a1", "b2", "c3"], ["A11", "B22", "C33"])
+ s
+ s.index.str.extract("(?P<letter>[a-zA-Z])", expand=True)
+
+It returns an ``Index`` if ``expand=False``.
+
+.. ipython:: python
+
+ s.index.str.extract("(?P<letter>[a-zA-Z])", expand=False)
+
+Calling on an ``Index`` with a regex with more than one capture group
+returns a ``DataFrame`` if ``expand=True``.
+
+.. ipython:: python
+
+ s.index.str.extract("(?P<letter>[a-zA-Z])([0-9]+)", expand=True)
+
+It raises ``ValueError`` if ``expand=False``.
+
+.. code-block:: python
+
+ >>> s.index.str.extract("(?P<letter>[a-zA-Z])([0-9]+)", expand=False)
+ ValueError: only one regex group is supported with Index
+
+In summary, ``extract(expand=True)`` always returns a ``DataFrame``
+with a row for every subject string, and a column for every capture
+group.
+
+.. _whatsnew_0180.enhancements.extractall:
+
+The :ref:`.str.extractall <text.extractall>` method was added
+(:issue:`11386`). Unlike ``extract`` (which returns only the first
+match),
+
+.. ipython:: python
+
+ s = pd.Series(["a1a2", "b1", "c1"], ["A", "B", "C"])
+ s
+ s.str.extract("(?P<letter>[ab])(?P<digit>\d)")
+
+the ``extractall`` method returns all matches.
+
+.. ipython:: python
+
+ s.str.extractall("(?P<letter>[ab])(?P<digit>\d)")
+
.. _whatsnew_0180.enhancements.rounding:
Datetimelike rounding
diff --git a/pandas/core/strings.py b/pandas/core/strings.py
index 1ffa836a75a1b..df5f0ffa9c0a4 100644
--- a/pandas/core/strings.py
+++ b/pandas/core/strings.py
@@ -418,38 +418,123 @@ def _get_single_group_name(rx):
return None
-def str_extract(arr, pat, flags=0):
+def _groups_or_na_fun(regex):
+ """Used in both extract_noexpand and extract_frame"""
+ if regex.groups == 0:
+ raise ValueError("pattern contains no capture groups")
+ empty_row = [np.nan] * regex.groups
+
+ def f(x):
+ if not isinstance(x, compat.string_types):
+ return empty_row
+ m = regex.search(x)
+ if m:
+ return [np.nan if item is None else item for item in m.groups()]
+ else:
+ return empty_row
+ return f
+
+
+def _str_extract_noexpand(arr, pat, flags=0):
"""
Find groups in each string in the Series using passed regular
- expression.
+ expression. This function is called from
+ str_extract(expand=False), and can return Series, DataFrame, or
+ Index.
+
+ """
+ from pandas import DataFrame, Index
+
+ regex = re.compile(pat, flags=flags)
+ groups_or_na = _groups_or_na_fun(regex)
+
+ if regex.groups == 1:
+ result = np.array([groups_or_na(val)[0] for val in arr], dtype=object)
+ name = _get_single_group_name(regex)
+ else:
+ if isinstance(arr, Index):
+ raise ValueError("only one regex group is supported with Index")
+ name = None
+ names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
+ columns = [names.get(1 + i, i) for i in range(regex.groups)]
+ if arr.empty:
+ result = DataFrame(columns=columns, dtype=object)
+ else:
+ result = DataFrame(
+ [groups_or_na(val) for val in arr],
+ columns=columns,
+ index=arr.index,
+ dtype=object)
+ return result, name
+
+
+def _str_extract_frame(arr, pat, flags=0):
+ """
+ For each subject string in the Series, extract groups from the
+ first match of regular expression pat. This function is called from
+ str_extract(expand=True), and always returns a DataFrame.
+
+ """
+ from pandas import DataFrame
+
+ regex = re.compile(pat, flags=flags)
+ groups_or_na = _groups_or_na_fun(regex)
+ names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
+ columns = [names.get(1 + i, i) for i in range(regex.groups)]
+
+ if len(arr) == 0:
+ return DataFrame(columns=columns, dtype=object)
+ try:
+ result_index = arr.index
+ except AttributeError:
+ result_index = None
+ return DataFrame(
+ [groups_or_na(val) for val in arr],
+ columns=columns,
+ index=result_index,
+ dtype=object)
+
+
+def str_extract(arr, pat, flags=0, expand=None):
+ """
+ For each subject string in the Series, extract groups from the
+ first match of regular expression pat.
+
+ .. versionadded:: 0.13.0
Parameters
----------
pat : string
- Pattern or regular expression
+ Regular expression pattern with capturing groups
flags : int, default 0 (no flags)
re module flags, e.g. re.IGNORECASE
+ .. versionadded:: 0.18.0
+ expand : bool, default False
+ * If True, return DataFrame.
+ * If False, return Series/Index/DataFrame.
+
Returns
-------
- extracted groups : Series (one group) or DataFrame (multiple groups)
- Note that dtype of the result is always object, even when no match is
- found and the result is a Series or DataFrame containing only NaN
- values.
+ DataFrame with one row for each subject string, and one column for
+ each group. Any capture group names in regular expression pat will
+ be used for column names; otherwise capture group numbers will be
+ used. The dtype of each result column is always object, even when
+ no match is found. If expand=True and pat has only one capture group,
+ then return a Series (if subject is a Series) or Index (if subject
+ is an Index).
- Examples
+ See Also
--------
- A pattern with one group will return a Series. Non-matches will be NaN.
-
- >>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
- 0 1
- 1 2
- 2 NaN
- dtype: object
+ extractall : returns all matches (not just the first match)
- A pattern with more than one group will return a DataFrame.
+ Examples
+ --------
+ A pattern with two groups will return a DataFrame with two columns.
+ Non-matches will be NaN.
- >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')
+ >>> s = Series(['a1', 'b2', 'c3'])
+ >>> s.str.extract('([ab])(\d)')
0 1
0 a 1
1 b 2
@@ -457,7 +542,7 @@ def str_extract(arr, pat, flags=0):
A pattern may contain optional groups.
- >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')
+ >>> s.str.extract('([ab])?(\d)')
0 1
0 a 1
1 b 2
@@ -465,46 +550,147 @@ def str_extract(arr, pat, flags=0):
Named groups will become column names in the result.
- >>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
+ >>> s.str.extract('(?P<letter>[ab])(?P<digit>\d)')
letter digit
0 a 1
1 b 2
2 NaN NaN
+ A pattern with one group will return a DataFrame with one column
+ if expand=True.
+
+ >>> s.str.extract('[ab](\d)', expand=True)
+ 0
+ 0 1
+ 1 2
+ 2 NaN
+
+ A pattern with one group will return a Series if expand=False.
+
+ >>> s.str.extract('[ab](\d)', expand=False)
+ 0 1
+ 1 2
+ 2 NaN
+ dtype: object
+
"""
- from pandas.core.frame import DataFrame
- from pandas.core.index import Index
+ if expand is None:
+ warnings.warn(
+ "currently extract(expand=None) " +
+ "means expand=False (return Index/Series/DataFrame) " +
+ "but in a future version of pandas this will be changed " +
+ "to expand=True (return DataFrame)",
+ FutureWarning,
+ stacklevel=3)
+ expand = False
+ if not isinstance(expand, bool):
+ raise ValueError("expand must be True or False")
+ if expand:
+ return _str_extract_frame(arr._orig, pat, flags=flags)
+ else:
+ result, name = _str_extract_noexpand(arr._data, pat, flags=flags)
+ return arr._wrap_result(result, name=name)
- regex = re.compile(pat, flags=flags)
- # just to be safe, check this
- if regex.groups == 0:
- raise ValueError("This pattern contains no groups to capture.")
- empty_row = [np.nan] * regex.groups
- def f(x):
- if not isinstance(x, compat.string_types):
- return empty_row
- m = regex.search(x)
- if m:
- return [np.nan if item is None else item for item in m.groups()]
- else:
- return empty_row
+def str_extractall(arr, pat, flags=0):
+ """
+ For each subject string in the Series, extract groups from all
+ matches of regular expression pat. When each subject string in the
+ Series has exactly one match, extractall(pat).xs(0, level='match')
+ is the same as extract(pat).
- if regex.groups == 1:
- result = np.array([f(val)[0] for val in arr], dtype=object)
- name = _get_single_group_name(regex)
+ .. versionadded:: 0.18.0
+
+ Parameters
+ ----------
+ pat : string
+ Regular expression pattern with capturing groups
+ flags : int, default 0 (no flags)
+ re module flags, e.g. re.IGNORECASE
+
+ Returns
+ -------
+ A DataFrame with one row for each match, and one column for each
+ group. Its rows have a MultiIndex with first levels that come from
+ the subject Series. The last level is named 'match' and indicates
+ the order in the subject. Any capture group names in regular
+ expression pat will be used for column names; otherwise capture
+ group numbers will be used.
+
+ See Also
+ --------
+ extract : returns first match only (not all matches)
+
+ Examples
+ --------
+ A pattern with one group will return a DataFrame with one column.
+ Indices with no matches will not appear in the result.
+
+ >>> s = Series(["a1a2", "b1", "c1"], index=["A", "B", "C"])
+ >>> s.str.extractall("[ab](\d)")
+ 0
+ match
+ A 0 1
+ 1 2
+ B 0 1
+
+ Capture group names are used for column names of the result.
+
+ >>> s.str.extractall("[ab](?P<digit>\d)")
+ digit
+ match
+ A 0 1
+ 1 2
+ B 0 1
+
+ A pattern with two groups will return a DataFrame with two columns.
+
+ >>> s.str.extractall("(?P<letter>[ab])(?P<digit>\d)")
+ letter digit
+ match
+ A 0 a 1
+ 1 a 2
+ B 0 b 1
+
+ Optional groups that do not match are NaN in the result.
+
+ >>> s.str.extractall("(?P<letter>[ab])?(?P<digit>\d)")
+ letter digit
+ match
+ A 0 a 1
+ 1 a 2
+ B 0 b 1
+ C 0 NaN 1
+
+ """
+ from pandas import DataFrame, MultiIndex
+ regex = re.compile(pat, flags=flags)
+ # the regex must contain capture groups.
+ if regex.groups == 0:
+ raise ValueError("pattern contains no capture groups")
+ names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
+ columns = [names.get(1 + i, i) for i in range(regex.groups)]
+ match_list = []
+ index_list = []
+ for subject_key, subject in arr.iteritems():
+ if isinstance(subject, compat.string_types):
+ try:
+ key_list = list(subject_key)
+ except TypeError:
+ key_list = [subject_key]
+ for match_i, match_tuple in enumerate(regex.findall(subject)):
+ na_tuple = [
+ np.NaN if group == "" else group for group in match_tuple]
+ match_list.append(na_tuple)
+ result_key = tuple(key_list + [match_i])
+ index_list.append(result_key)
+ if 0 < len(index_list):
+ index = MultiIndex.from_tuples(
+ index_list, names=arr.index.names + ["match"])
else:
- if isinstance(arr, Index):
- raise ValueError("only one regex group is supported with Index")
- name = None
- names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
- columns = [names.get(1 + i, i) for i in range(regex.groups)]
- if arr.empty:
- result = DataFrame(columns=columns, dtype=object)
- else:
- result = DataFrame([f(val) for val in arr], columns=columns,
- index=arr.index, dtype=object)
- return result, name
+ index = None
+ result = DataFrame(match_list, index, columns)
+ return result
def str_get_dummies(arr, sep='|'):
@@ -599,6 +785,10 @@ def str_findall(arr, pat, flags=0):
Returns
-------
matches : Series/Index of lists
+
+ See Also
+ --------
+ extractall : returns DataFrame with one column per capture group
"""
regex = re.compile(pat, flags=flags)
return _na_map(regex.findall, arr)
@@ -1403,9 +1593,12 @@ def translate(self, table, deletechars=None):
findall = _pat_wrapper(str_findall, flags=True)
@copy(str_extract)
- def extract(self, pat, flags=0):
- result, name = str_extract(self._data, pat, flags=flags)
- return self._wrap_result(result, name=name)
+ def extract(self, pat, flags=0, expand=None):
+ return str_extract(self, pat, flags=flags, expand=expand)
+
+ @copy(str_extractall)
+ def extractall(self, pat, flags=0):
+ return str_extractall(self._orig, pat, flags=flags)
_shared_docs['find'] = ("""
Return %(side)s indexes in each strings in the Series/Index
diff --git a/pandas/tests/test_categorical.py b/pandas/tests/test_categorical.py
index 733ed2fbcb971..071e280bd112a 100755
--- a/pandas/tests/test_categorical.py
+++ b/pandas/tests/test_categorical.py
@@ -4110,6 +4110,7 @@ def test_str_accessor_api_for_categorical(self):
('encode', ("UTF-8",), {}),
('endswith', ("a",), {}),
('extract', ("([a-z]*) ",), {}),
+ ('extractall', ("([a-z]*) ",), {}),
('find', ("a",), {}),
('findall', ("a",), {}),
('index', (" ",), {}),
diff --git a/pandas/tests/test_strings.py b/pandas/tests/test_strings.py
index f8255c4b4a410..2ff22f2d96774 100644
--- a/pandas/tests/test_strings.py
+++ b/pandas/tests/test_strings.py
@@ -509,12 +509,22 @@ def test_match(self):
exp = Series([True, np.nan, np.nan])
assert_series_equal(exp, res)
- def test_extract(self):
+ def test_extract_expand_None(self):
+ values = Series(['fooBAD__barBAD', NA, 'foo'])
+ with tm.assert_produces_warning(FutureWarning):
+ values.str.extract('.*(BAD[_]+).*(BAD)', expand=None)
+
+ def test_extract_expand_unspecified(self):
+ values = Series(['fooBAD__barBAD', NA, 'foo'])
+ with tm.assert_produces_warning(FutureWarning):
+ values.str.extract('.*(BAD[_]+).*(BAD)')
+
+ def test_extract_expand_False(self):
# Contains tests like those in test_match and some others.
values = Series(['fooBAD__barBAD', NA, 'foo'])
er = [NA, NA] # empty row
- result = values.str.extract('.*(BAD[_]+).*(BAD)')
+ result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD__', 'BAD'], er, er])
tm.assert_frame_equal(result, exp)
@@ -522,7 +532,7 @@ def test_extract(self):
mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
'foo', None, 1, 2.])
- rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)')
+ rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er, er, er,
er, er])
tm.assert_frame_equal(rs, exp)
@@ -530,7 +540,7 @@ def test_extract(self):
# unicode
values = Series([u('fooBAD__barBAD'), NA, u('foo')])
- result = values.str.extract('.*(BAD[_]+).*(BAD)')
+ result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=False)
exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
tm.assert_frame_equal(result, exp)
@@ -539,84 +549,85 @@ def test_extract(self):
# multi-group would expand to a frame
idx = Index(['A1', 'A2', 'A3', 'A4', 'B5'])
with tm.assertRaisesRegexp(ValueError, "supported"):
- idx.str.extract('([AB])([123])')
+ idx.str.extract('([AB])([123])', expand=False)
# these should work for both Series and Index
for klass in [Series, Index]:
# no groups
s_or_idx = klass(['A1', 'B2', 'C3'])
- f = lambda: s_or_idx.str.extract('[ABC][123]')
+ f = lambda: s_or_idx.str.extract('[ABC][123]', expand=False)
self.assertRaises(ValueError, f)
# only non-capturing groups
- f = lambda: s_or_idx.str.extract('(?:[AB]).*')
+ f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=False)
self.assertRaises(ValueError, f)
# single group renames series/index properly
s_or_idx = klass(['A1', 'A2'])
- result = s_or_idx.str.extract(r'(?P<uno>A)\d')
+ result = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=False)
tm.assert_equal(result.name, 'uno')
tm.assert_numpy_array_equal(result, klass(['A', 'A']))
s = Series(['A1', 'B2', 'C3'])
# one group, no matches
- result = s.str.extract('(_)')
+ result = s.str.extract('(_)', expand=False)
exp = Series([NA, NA, NA], dtype=object)
tm.assert_series_equal(result, exp)
# two groups, no matches
- result = s.str.extract('(_)(_)')
+ result = s.str.extract('(_)(_)', expand=False)
exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
tm.assert_frame_equal(result, exp)
# one group, some matches
- result = s.str.extract('([AB])[123]')
+ result = s.str.extract('([AB])[123]', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two groups, some matches
- result = s.str.extract('([AB])([123])')
+ result = s.str.extract('([AB])([123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one named group
- result = s.str.extract('(?P<letter>[AB])')
+ result = s.str.extract('(?P<letter>[AB])', expand=False)
exp = Series(['A', 'B', NA], name='letter')
tm.assert_series_equal(result, exp)
# two named groups
- result = s.str.extract('(?P<letter>[AB])(?P<number>[123])')
+ result = s.str.extract('(?P<letter>[AB])(?P<number>[123])',
+ expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# mix named and unnamed groups
- result = s.str.extract('([AB])(?P<number>[123])')
+ result = s.str.extract('([AB])(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]],
columns=[0, 'number'])
tm.assert_frame_equal(result, exp)
# one normal group, one non-capturing group
- result = s.str.extract('([AB])(?:[123])')
+ result = s.str.extract('([AB])(?:[123])', expand=False)
exp = Series(['A', 'B', NA])
tm.assert_series_equal(result, exp)
# two normal groups, one non-capturing group
result = Series(['A11', 'B22', 'C33']).str.extract(
- '([AB])([123])(?:[123])')
+ '([AB])([123])(?:[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
tm.assert_frame_equal(result, exp)
# one optional group followed by one normal group
result = Series(['A1', 'B2', '3']).str.extract(
- '(?P<letter>[AB])?(?P<number>[123])')
+ '(?P<letter>[AB])?(?P<number>[123])', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], [NA, '3']],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
# one normal group followed by one optional group
result = Series(['A1', 'B2', 'C']).str.extract(
- '(?P<letter>[ABC])(?P<number>[123])?')
+ '(?P<letter>[ABC])(?P<number>[123])?', expand=False)
exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]],
columns=['letter', 'number'])
tm.assert_frame_equal(result, exp)
@@ -626,28 +637,431 @@ def test_extract(self):
def check_index(index):
data = ['A1', 'B2', 'C']
index = index[:len(data)]
- result = Series(data, index=index).str.extract('(\d)')
+ s = Series(data, index=index)
+ result = s.str.extract('(\d)', expand=False)
exp = Series(['1', '2', NA], index=index)
tm.assert_series_equal(result, exp)
- result = Series(
- data, index=index).str.extract('(?P<letter>\D)(?P<number>\d)?')
- exp = DataFrame([['A', '1'], ['B', '2'], ['C', NA]], columns=[
- 'letter', 'number'
- ], index=index)
+ result = Series(data, index=index).str.extract(
+ '(?P<letter>\D)(?P<number>\d)?', expand=False)
+ e_list = [
+ ['A', '1'],
+ ['B', '2'],
+ ['C', NA]
+ ]
+ exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
tm.assert_frame_equal(result, exp)
- for index in [tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
- tm.makeDateIndex, tm.makePeriodIndex]:
+ i_funs = [
+ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
+ tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
+ ]
+ for index in i_funs:
check_index(index())
- def test_extract_single_series_name_is_preserved(self):
+ # single_series_name_is_preserved.
s = Series(['a3', 'b3', 'c2'], name='bob')
- r = s.str.extract(r'(?P<sue>[a-z])')
+ r = s.str.extract(r'(?P<sue>[a-z])', expand=False)
e = Series(['a', 'b', 'c'], name='sue')
tm.assert_series_equal(r, e)
self.assertEqual(r.name, e.name)
+ def test_extract_expand_True(self):
+ # Contains tests like those in test_match and some others.
+ values = Series(['fooBAD__barBAD', NA, 'foo'])
+ er = [NA, NA] # empty row
+
+ result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
+ exp = DataFrame([['BAD__', 'BAD'], er, er])
+ tm.assert_frame_equal(result, exp)
+
+ # mixed
+ mixed = Series(['aBAD_BAD', NA, 'BAD_b_BAD', True, datetime.today(),
+ 'foo', None, 1, 2.])
+
+ rs = Series(mixed).str.extract('.*(BAD[_]+).*(BAD)', expand=True)
+ exp = DataFrame([['BAD_', 'BAD'], er, ['BAD_', 'BAD'], er, er,
+ er, er, er, er])
+ tm.assert_frame_equal(rs, exp)
+
+ # unicode
+ values = Series([u('fooBAD__barBAD'), NA, u('foo')])
+
+ result = values.str.extract('.*(BAD[_]+).*(BAD)', expand=True)
+ exp = DataFrame([[u('BAD__'), u('BAD')], er, er])
+ tm.assert_frame_equal(result, exp)
+
+ # these should work for both Series and Index
+ for klass in [Series, Index]:
+ # no groups
+ s_or_idx = klass(['A1', 'B2', 'C3'])
+ f = lambda: s_or_idx.str.extract('[ABC][123]', expand=True)
+ self.assertRaises(ValueError, f)
+
+ # only non-capturing groups
+ f = lambda: s_or_idx.str.extract('(?:[AB]).*', expand=True)
+ self.assertRaises(ValueError, f)
+
+ # single group renames series/index properly
+ s_or_idx = klass(['A1', 'A2'])
+ result_df = s_or_idx.str.extract(r'(?P<uno>A)\d', expand=True)
+ result_series = result_df['uno']
+ tm.assert_numpy_array_equal(result_series, klass(['A', 'A']))
+
+ def test_extract_series(self):
+ # extract should give the same result whether or not the
+ # series has a name.
+ for series_name in None, "series_name":
+ s = Series(['A1', 'B2', 'C3'], name=series_name)
+ # one group, no matches
+ result = s.str.extract('(_)', expand=True)
+ exp = DataFrame([NA, NA, NA], dtype=object)
+ tm.assert_frame_equal(result, exp)
+
+ # two groups, no matches
+ result = s.str.extract('(_)(_)', expand=True)
+ exp = DataFrame([[NA, NA], [NA, NA], [NA, NA]], dtype=object)
+ tm.assert_frame_equal(result, exp)
+
+ # one group, some matches
+ result = s.str.extract('([AB])[123]', expand=True)
+ exp = DataFrame(['A', 'B', NA])
+ tm.assert_frame_equal(result, exp)
+
+ # two groups, some matches
+ result = s.str.extract('([AB])([123])', expand=True)
+ exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
+ tm.assert_frame_equal(result, exp)
+
+ # one named group
+ result = s.str.extract('(?P<letter>[AB])', expand=True)
+ exp = DataFrame({"letter": ['A', 'B', NA]})
+ tm.assert_frame_equal(result, exp)
+
+ # two named groups
+ result = s.str.extract(
+ '(?P<letter>[AB])(?P<number>[123])',
+ expand=True)
+ e_list = [
+ ['A', '1'],
+ ['B', '2'],
+ [NA, NA]
+ ]
+ exp = DataFrame(e_list, columns=['letter', 'number'])
+ tm.assert_frame_equal(result, exp)
+
+ # mix named and unnamed groups
+ result = s.str.extract('([AB])(?P<number>[123])', expand=True)
+ exp = DataFrame(e_list, columns=[0, 'number'])
+ tm.assert_frame_equal(result, exp)
+
+ # one normal group, one non-capturing group
+ result = s.str.extract('([AB])(?:[123])', expand=True)
+ exp = DataFrame(['A', 'B', NA])
+ tm.assert_frame_equal(result, exp)
+
+ def test_extract_optional_groups(self):
+
+ # two normal groups, one non-capturing group
+ result = Series(['A11', 'B22', 'C33']).str.extract(
+ '([AB])([123])(?:[123])', expand=True)
+ exp = DataFrame([['A', '1'], ['B', '2'], [NA, NA]])
+ tm.assert_frame_equal(result, exp)
+
+ # one optional group followed by one normal group
+ result = Series(['A1', 'B2', '3']).str.extract(
+ '(?P<letter>[AB])?(?P<number>[123])', expand=True)
+ e_list = [
+ ['A', '1'],
+ ['B', '2'],
+ [NA, '3']
+ ]
+ exp = DataFrame(e_list, columns=['letter', 'number'])
+ tm.assert_frame_equal(result, exp)
+
+ # one normal group followed by one optional group
+ result = Series(['A1', 'B2', 'C']).str.extract(
+ '(?P<letter>[ABC])(?P<number>[123])?', expand=True)
+ e_list = [
+ ['A', '1'],
+ ['B', '2'],
+ ['C', NA]
+ ]
+ exp = DataFrame(e_list, columns=['letter', 'number'])
+ tm.assert_frame_equal(result, exp)
+
+ # GH6348
+ # not passing index to the extractor
+ def check_index(index):
+ data = ['A1', 'B2', 'C']
+ index = index[:len(data)]
+ result = Series(data, index=index).str.extract('(\d)', expand=True)
+ exp = DataFrame(['1', '2', NA], index=index)
+ tm.assert_frame_equal(result, exp)
+
+ result = Series(data, index=index).str.extract(
+ '(?P<letter>\D)(?P<number>\d)?', expand=True)
+ e_list = [
+ ['A', '1'],
+ ['B', '2'],
+ ['C', NA]
+ ]
+ exp = DataFrame(e_list, columns=['letter', 'number'], index=index)
+ tm.assert_frame_equal(result, exp)
+
+ i_funs = [
+ tm.makeStringIndex, tm.makeUnicodeIndex, tm.makeIntIndex,
+ tm.makeDateIndex, tm.makePeriodIndex, tm.makeRangeIndex
+ ]
+ for index in i_funs:
+ check_index(index())
+
+ def test_extract_single_group_returns_frame(self):
+ # GH11386 extract should always return DataFrame, even when
+ # there is only one group. Prior to v0.18.0, extract returned
+ # Series when there was only one group in the regex.
+ s = Series(['a3', 'b3', 'c2'], name='series_name')
+ r = s.str.extract(r'(?P<letter>[a-z])', expand=True)
+ e = DataFrame({"letter": ['a', 'b', 'c']})
+ tm.assert_frame_equal(r, e)
+
+ def test_extractall(self):
+ subject_list = [
+ '[email protected]',
+ '[email protected]',
+ '[email protected]',
+ '[email protected] some text [email protected]',
+ '[email protected] some text [email protected] and [email protected]',
+ np.nan,
+ "",
+ ]
+ expected_tuples = [
+ ("dave", "google", "com"),
+ ("tdhock5", "gmail", "com"),
+ ("maudelaperriere", "gmail", "com"),
+ ("rob", "gmail", "com"), ("steve", "gmail", "com"),
+ ("a", "b", "com"), ("c", "d", "com"), ("e", "f", "com"),
+ ]
+ named_pattern = r'''
+ (?P<user>[a-z0-9]+)
+ @
+ (?P<domain>[a-z]+)
+ \.
+ (?P<tld>[a-z]{2,4})
+ '''
+ expected_columns = ["user", "domain", "tld"]
+ S = Series(subject_list)
+ # extractall should return a DataFrame with one row for each
+ # match, indexed by the subject from which the match came.
+ expected_index = MultiIndex.from_tuples([
+ (0, 0),
+ (1, 0),
+ (2, 0),
+ (3, 0),
+ (3, 1),
+ (4, 0),
+ (4, 1),
+ (4, 2),
+ ], names=(None, "match"))
+ expected_df = DataFrame(
+ expected_tuples, expected_index, expected_columns)
+ computed_df = S.str.extractall(named_pattern, re.VERBOSE)
+ tm.assert_frame_equal(computed_df, expected_df)
+
+ # The index of the input Series should be used to construct
+ # the index of the output DataFrame:
+ series_index = MultiIndex.from_tuples([
+ ("single", "Dave"),
+ ("single", "Toby"),
+ ("single", "Maude"),
+ ("multiple", "robAndSteve"),
+ ("multiple", "abcdef"),
+ ("none", "missing"),
+ ("none", "empty"),
+ ])
+ Si = Series(subject_list, series_index)
+ expected_index = MultiIndex.from_tuples([
+ ("single", "Dave", 0),
+ ("single", "Toby", 0),
+ ("single", "Maude", 0),
+ ("multiple", "robAndSteve", 0),
+ ("multiple", "robAndSteve", 1),
+ ("multiple", "abcdef", 0),
+ ("multiple", "abcdef", 1),
+ ("multiple", "abcdef", 2),
+ ], names=(None, None, "match"))
+ expected_df = DataFrame(
+ expected_tuples, expected_index, expected_columns)
+ computed_df = Si.str.extractall(named_pattern, re.VERBOSE)
+ tm.assert_frame_equal(computed_df, expected_df)
+
+ # MultiIndexed subject with names.
+ Sn = Series(subject_list, series_index)
+ Sn.index.names = ("matches", "description")
+ expected_index.names = ("matches", "description", "match")
+ expected_df = DataFrame(
+ expected_tuples, expected_index, expected_columns)
+ computed_df = Sn.str.extractall(named_pattern, re.VERBOSE)
+ tm.assert_frame_equal(computed_df, expected_df)
+
+ # optional groups.
+ subject_list = ['', 'A1', '32']
+ named_pattern = '(?P<letter>[AB])?(?P<number>[123])'
+ computed_df = Series(subject_list).str.extractall(named_pattern)
+ expected_index = MultiIndex.from_tuples([
+ (1, 0),
+ (2, 0),
+ (2, 1),
+ ], names=(None, "match"))
+ expected_df = DataFrame([
+ ('A', '1'),
+ (NA, '3'),
+ (NA, '2'),
+ ], expected_index, columns=['letter', 'number'])
+ tm.assert_frame_equal(computed_df, expected_df)
+
+ # only one of two groups has a name.
+ pattern = '([AB])?(?P<number>[123])'
+ computed_df = Series(subject_list).str.extractall(pattern)
+ expected_df = DataFrame([
+ ('A', '1'),
+ (NA, '3'),
+ (NA, '2'),
+ ], expected_index, columns=[0, 'number'])
+ tm.assert_frame_equal(computed_df, expected_df)
+
+ def test_extractall_single_group(self):
+ # extractall(one named group) returns DataFrame with one named
+ # column.
+ s = Series(['a3', 'b3', 'd4c2'], name='series_name')
+ r = s.str.extractall(r'(?P<letter>[a-z])')
+ i = MultiIndex.from_tuples([
+ (0, 0),
+ (1, 0),
+ (2, 0),
+ (2, 1),
+ ], names=(None, "match"))
+ e = DataFrame({"letter": ['a', 'b', 'd', 'c']}, i)
+ tm.assert_frame_equal(r, e)
+
+ # extractall(one un-named group) returns DataFrame with one
+ # un-named column.
+ r = s.str.extractall(r'([a-z])')
+ e = DataFrame(['a', 'b', 'd', 'c'], i)
+ tm.assert_frame_equal(r, e)
+
+ def test_extractall_no_matches(self):
+ s = Series(['a3', 'b3', 'd4c2'], name='series_name')
+ # one un-named group.
+ r = s.str.extractall('(z)')
+ e = DataFrame(columns=[0])
+ tm.assert_frame_equal(r, e)
+ # two un-named groups.
+ r = s.str.extractall('(z)(z)')
+ e = DataFrame(columns=[0, 1])
+ tm.assert_frame_equal(r, e)
+ # one named group.
+ r = s.str.extractall('(?P<first>z)')
+ e = DataFrame(columns=["first"])
+ tm.assert_frame_equal(r, e)
+ # two named groups.
+ r = s.str.extractall('(?P<first>z)(?P<second>z)')
+ e = DataFrame(columns=["first", "second"])
+ tm.assert_frame_equal(r, e)
+ # one named, one un-named.
+ r = s.str.extractall('(z)(?P<second>z)')
+ e = DataFrame(columns=[0,
+ "second"])
+ tm.assert_frame_equal(r, e)
+
+ def test_extractall_errors(self):
+ # Does not make sense to use extractall with a regex that has
+ # no capture groups. (it returns DataFrame with one column for
+ # each capture group)
+ s = Series(['a3', 'b3', 'd4c2'], name='series_name')
+ with tm.assertRaisesRegexp(ValueError, "no capture groups"):
+ s.str.extractall(r'[a-z]')
+
+ def test_extract_index_one_two_groups(self):
+ s = Series(
+ ['a3', 'b3', 'd4c2'], ["A3", "B3", "D4"], name='series_name')
+ r = s.index.str.extract(r'([A-Z])', expand=True)
+ e = DataFrame(['A', "B", "D"])
+ tm.assert_frame_equal(r, e)
+
+ # Prior to v0.18.0, index.str.extract(regex with one group)
+ # returned Index. With more than one group, extract raised an
+ # error (GH9980). Now extract always returns DataFrame.
+ r = s.index.str.extract(
+ r'(?P<letter>[A-Z])(?P<digit>[0-9])', expand=True)
+ e_list = [
+ ("A", "3"),
+ ("B", "3"),
+ ("D", "4"),
+ ]
+ e = DataFrame(e_list, columns=["letter", "digit"])
+ tm.assert_frame_equal(r, e)
+
+ def test_extractall_same_as_extract(self):
+ s = Series(['a3', 'b3', 'c2'], name='series_name')
+
+ pattern_two_noname = r'([a-z])([0-9])'
+ extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
+ has_multi_index = s.str.extractall(pattern_two_noname)
+ no_multi_index = has_multi_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_two_noname, no_multi_index)
+
+ pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
+ extract_two_named = s.str.extract(pattern_two_named, expand=True)
+ has_multi_index = s.str.extractall(pattern_two_named)
+ no_multi_index = has_multi_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_two_named, no_multi_index)
+
+ pattern_one_named = r'(?P<group_name>[a-z])'
+ extract_one_named = s.str.extract(pattern_one_named, expand=True)
+ has_multi_index = s.str.extractall(pattern_one_named)
+ no_multi_index = has_multi_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_one_named, no_multi_index)
+
+ pattern_one_noname = r'([a-z])'
+ extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
+ has_multi_index = s.str.extractall(pattern_one_noname)
+ no_multi_index = has_multi_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_one_noname, no_multi_index)
+
+ def test_extractall_same_as_extract_subject_index(self):
+ # same as above tests, but s has an MultiIndex.
+ i = MultiIndex.from_tuples([
+ ("A", "first"),
+ ("B", "second"),
+ ("C", "third"),
+ ], names=("capital", "ordinal"))
+ s = Series(['a3', 'b3', 'c2'], i, name='series_name')
+
+ pattern_two_noname = r'([a-z])([0-9])'
+ extract_two_noname = s.str.extract(pattern_two_noname, expand=True)
+ has_match_index = s.str.extractall(pattern_two_noname)
+ no_match_index = has_match_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_two_noname, no_match_index)
+
+ pattern_two_named = r'(?P<letter>[a-z])(?P<digit>[0-9])'
+ extract_two_named = s.str.extract(pattern_two_named, expand=True)
+ has_match_index = s.str.extractall(pattern_two_named)
+ no_match_index = has_match_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_two_named, no_match_index)
+
+ pattern_one_named = r'(?P<group_name>[a-z])'
+ extract_one_named = s.str.extract(pattern_one_named, expand=True)
+ has_match_index = s.str.extractall(pattern_one_named)
+ no_match_index = has_match_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_one_named, no_match_index)
+
+ pattern_one_noname = r'([a-z])'
+ extract_one_noname = s.str.extract(pattern_one_noname, expand=True)
+ has_match_index = s.str.extractall(pattern_one_noname)
+ no_match_index = has_match_index.xs(0, level="match")
+ tm.assert_frame_equal(extract_one_noname, no_match_index)
+
def test_empty_str_methods(self):
empty_str = empty = Series(dtype=str)
empty_int = Series(dtype=int)
@@ -670,9 +1084,18 @@ def test_empty_str_methods(self):
tm.assert_series_equal(empty_str, empty.str.replace('a', 'b'))
tm.assert_series_equal(empty_str, empty.str.repeat(3))
tm.assert_series_equal(empty_bool, empty.str.match('^a'))
- tm.assert_series_equal(empty_str, empty.str.extract('()'))
tm.assert_frame_equal(
- DataFrame(columns=[0, 1], dtype=str), empty.str.extract('()()'))
+ DataFrame(columns=[0], dtype=str),
+ empty.str.extract('()', expand=True))
+ tm.assert_frame_equal(
+ DataFrame(columns=[0, 1], dtype=str),
+ empty.str.extract('()()', expand=True))
+ tm.assert_series_equal(
+ empty_str,
+ empty.str.extract('()', expand=False))
+ tm.assert_frame_equal(
+ DataFrame(columns=[0, 1], dtype=str),
+ empty.str.extract('()()', expand=False))
tm.assert_frame_equal(DataFrame(dtype=str), empty.str.get_dummies())
tm.assert_series_equal(empty_str, empty_list.str.join(''))
tm.assert_series_equal(empty_int, empty.str.len())
| For a series `S`, the excellent `S.str.extract` method returns the first match in each subject of the series:
``` python
>>> import re
>>> import pandas as pd
>>> import numpy as np
>>> data = {
... 'Dave': '[email protected]',
... 'multiple': '[email protected] some text [email protected]',
... 'none': np.nan,
... }
>>> pattern = r'''
... (?P<user>[a-z]+)
... @
... (?P<domain>[a-z]+)
... \.
... (?P<tld>[a-z]{2,4})
... '''
>>> S = pd.Series(data)
>>> S.str.extract(pattern, re.VERBOSE)
user domain tld
Dave dave google com
multiple rob gmail com
none NaN NaN NaN
>>>
```
That's great, but sometimes we want to extract all matches in each element of the series. You can do that with `S.str.findall` but its result does not include the names specified in the capturing groups of the regular expression:
``` python
>>> S.str.findall(pattern, re.VERBOSE)
Dave [(dave, google, com)]
multiple [(rob, gmail, com), (steve, gmail, com)]
none NaN
dtype: object
>>>
```
I propose the `S.str.extractall` method which returns a `Series` the same length as the subject `S`. Each element of the series is a `DataFrame` with a row for each match and a column for each group:
``` python
>>> result = S.str.extractall(pattern, re.VERBOSE)
>>> result[0]
user domain tld
0 dave google com
>>> result[1]
user domain tld
0 rob gmail com
1 steve gmail com
>>> result[2]
Empty DataFrame
Columns: [user, domain, tld]
Index: []
>>>
```
Before I write any more testing code, can we start a discussion about whether or not this is an acceptable design choice, in relation to the other functionality of pandas? @sinhrks @jorisvandenbossche @jreback @mortada since you seem to be discussing extract in #10103
Also do you have any ideas about how to get the result (a Series of DataFrames) to print more nicely? With my current fork we have
``` python
>>> result
Dave user domain tld
0 dave google com
multiple user domain tld
0 rob gmail com
1 s...
none Empty DataFrame
Columns: [user, domain, tld]
I...
dtype: object
>>>
```
In R the equivalent functionality is provided by the https://github.com/tdhock/namedCapture package (str_match_all_named returns a list of data.frames), and the resulting printout is readable because of the way that R prints lists:
``` r
> library(namedCapture)
> S <- c(
+ Dave='[email protected]',
+ multiple='[email protected] some text [email protected]',
+ none=NA)
> pattern <- paste0(
+ "(?P<user>[a-z]+)",
+ "@",
+ "(?P<domain>[a-z]+)",
+ "[.]",
+ "(?P<tld>[a-z]{2,4})")
> str_match_all_named(S, pattern)
$Dave
user domain tld
[1,] "dave" "google" "com"
$multiple
user domain tld
[1,] "rob" "gmail" "com"
[2,] "steve" "gmail" "com"
$none
<0 x 0 matrix>
>
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11386 | 2015-10-20T16:42:01Z | 2016-02-09T22:20:53Z | null | 2016-02-09T22:22:32Z |
BUG: .loc assignment of datetime with tz is coercing to naive #11365 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index ea2b85d983ade..2aaa6ea89fb6e 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -115,3 +115,4 @@ Bug Fixes
- Fixed a bug that prevented the construction of an empty series of dtype
``datetime64[ns, tz]`` (:issue:`11245`).
- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
+- Bug in .loc assignment of datetime with tz is coercing to naive (:issue:`11365`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index c2c50bce04309..873f9e009a880 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -986,14 +986,15 @@ def _infer_fill_value(val):
if we are a NaT, return the correct dtyped element to provide proper block construction
"""
-
if not is_list_like(val):
val = [val]
- val = np.array(val,copy=False)
- if is_datetimelike(val):
- return np.array('NaT',dtype=val.dtype)
- elif is_object_dtype(val.dtype):
- dtype = lib.infer_dtype(_ensure_object(val))
+ v = np.array(val,copy=False)
+ if is_datetimelike(v):
+ if is_datetimetz(val):
+ return pd.DatetimeIndex(v, dtype=val.dtype)
+ return np.array('NaT',dtype=v.dtype)
+ elif is_object_dtype(v.dtype):
+ dtype = lib.infer_dtype(_ensure_object(v))
if dtype in ['datetime','datetime64']:
return np.array('NaT',dtype=_NS_DTYPE)
elif dtype in ['timedelta','timedelta64']:
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 90f85b3f4576d..00e6fc929a7a1 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -3446,6 +3446,13 @@ def test_loc_setitem_datetime(self):
expected = DataFrame({'one' : [100.0,200.0]},index=[dt1,dt2])
assert_frame_equal(df, expected)
+ def test_loc_setitem_datetimetz(self):
+ # GH 11365
+ idx = pd.date_range('20130101',periods=3,tz='US/Eastern')
+ df = DataFrame({'A': idx})
+ df.loc[[True,False,True],'B'] = idx
+ self.assert_equal(df['A'].dtype, df['B'].dtype)
+
def test_series_partial_set(self):
# partial set with new index
# Regression from GH4825
| closes #11365
| https://api.github.com/repos/pandas-dev/pandas/pulls/11377 | 2015-10-19T23:13:25Z | 2015-10-19T23:20:00Z | null | 2015-11-15T18:41:09Z |
BUG: pivot table bug with Categorical indexes, #10993 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index ea2b85d983ade..bdfbf08b37e57 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -87,7 +87,7 @@ Bug Fixes
- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
-
+- Bug in ``pivot_table`` with ``margins=True`` when indexes are of ``Categorical`` dtype (:issue:`10993`)
- Bug in ``DataFrame.plot`` cannot use hex strings colors (:issue:`10299`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index ede848c1103ab..7049ac33feac6 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -627,6 +627,10 @@ def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
dtype=dtype)
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self
+
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
@@ -3190,6 +3194,10 @@ def duplicated(self, keep='first'):
from pandas.hashtable import duplicated_int64
return duplicated_int64(self.codes.astype('i8'), keep)
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self.astype('object')
+
def get_loc(self, key, method=None):
"""
Get integer location for requested label
@@ -4529,6 +4537,10 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
else:
return result_levels
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self.set_levels([ i._to_safe_for_reshape() for i in self.levels ])
+
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index d98121520b8b0..f1d82ec1f3b2e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -3427,6 +3427,9 @@ def insert(self, loc, item, value, allow_duplicates=False):
if not isinstance(loc, int):
raise TypeError("loc must be int")
+ # insert to the axis; this could possibly raise a TypeError
+ new_axis = self.items.insert(loc, item)
+
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
@@ -3449,8 +3452,7 @@ def insert(self, loc, item, value, allow_duplicates=False):
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
- self.axes[0] = self.items.insert(loc, item)
-
+ self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 89fe9463282b6..de7a5f5a73f3d 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -189,7 +189,13 @@ def _add_margins(table, data, values, rows, cols, aggfunc):
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
- result = result.append(margin_dummy)
+ try:
+ result = result.append(margin_dummy)
+ except TypeError:
+
+ # we cannot reshape, so coerce the axis
+ result.index = result.index._to_safe_for_reshape()
+ result = result.append(margin_dummy)
result.index.names = row_names
return result
@@ -218,6 +224,7 @@ def _compute_grand_margin(data, values, aggfunc):
def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin):
+
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
@@ -235,7 +242,13 @@ def _all_key(key):
# we are going to mutate this, so need to copy!
piece = piece.copy()
- piece[all_key] = margin[key]
+ try:
+ piece[all_key] = margin[key]
+ except TypeError:
+
+ # we cannot reshape, so coerce the axis
+ piece.set_axis(cat_axis, piece._get_axis(cat_axis)._to_safe_for_reshape())
+ piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 50ae574c03067..f0052774d66a2 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -719,6 +719,26 @@ def test_crosstab_dropna(self):
('two', 'dull'), ('two', 'shiny')])
assert_equal(res.columns.values, m.values)
+ def test_categorical_margins(self):
+ # GH 10989
+ df = pd.DataFrame({'x': np.arange(8),
+ 'y': np.arange(8) // 4,
+ 'z': np.arange(8) % 2})
+
+ expected = pd.DataFrame([[1.0, 2.0, 1.5],[5, 6, 5.5],[3, 4, 3.5]])
+ expected.index = Index([0,1,'All'],name='y')
+ expected.columns = Index([0,1,'All'],name='z')
+
+ data = df.copy()
+ table = data.pivot_table('x', 'y', 'z', margins=True)
+ tm.assert_frame_equal(table, expected)
+
+ data = df.copy()
+ data.y = data.y.astype('category')
+ data.z = data.z.astype('category')
+ table = data.pivot_table('x', 'y', 'z', margins=True)
+ tm.assert_frame_equal(table, expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| closes #10993
replaces #10989
So issue #10993 involves the insertion of a key into a multi-index that has as one of its levels a `CategoricalIndex`. This causes the semantics to break down because we are inserting an new key.
Existing
```
In [16]: df = DataFrame({'A' : [1,2], 'B' : [3,4] })
In [17]: df.columns = MultiIndex([pd.CategoricalIndex(list('ab')),[1,2]],[[0,1],[0,1]])
In [3]: df.columns.levels[0]
Out[3]: CategoricalIndex([u'a', u'b'], categories=[u'a', u'b'], ordered=False, dtype='category')
In [18]: df
Out[18]:
a b
1 2
0 1 3
1 2 4
In [19]: df.columns
Out[19]:
MultiIndex(levels=[[u'a', u'b'], [1, 2]],
labels=[[0, 1], [0, 1]])
In [20]: df[('c',3)] = 5
TypeError: cannot insert an item into a CategoricalIndex that is not already an existing category
```
New
```
In [4]: df[('c',3)] = 5
In [5]: df
Out[5]:
a b c
1 2 3
0 1 3 5
1 2 4 5
In [8]: df.columns.levels[0]
Out[8]: CategoricalIndex([u'a', u'b', u'c'], categories=[u'a', u'b', u'c'], ordered=False, dtype='category')
```
The only issue that was slightly controversial is that `.insert` will retain the `ordered` attribute (and new categories go to the end). while `.append` will always have `ordered=False`. In theory we could do the same, but `.append` is used to generally append _another_ `CategoricalIndex`, so you would have some possiblity of interleaving of the 'ordered' categories (IOW, if self has `[1,2,3]` and other has `[3,2,1,4]`, then the result will be `[1,2,3,4]`.
We _could_ raise if we have mixed ordering (e.g. self is `ordered=False`, other is `ordered=True`).
Of course the user is free to reorder and such, but the default should be intuitive.
Futher note that we can now concat pandas objects with `CategoricalIndexes` (I don't think was specified before, certainly not tested), e.g.
```
In [1]: df = DataFrame({'A' : np.arange(5)},index=pd.CategoricalIndex(list('aabbc')))
In [2]: df2 = DataFrame({'A' : np.arange(5)},index=pd.CategoricalIndex(list('bbcde')))
In [3]: pd.concat([df,df2])
Out[3]:
A
a 0
a 1
b 2
b 3
c 4
b 0
b 1
c 2
d 3
e 4
In [4]: pd.concat([df,df2]).index
Out[4]: CategoricalIndex([u'a', u'a', u'b', u'b', u'c', u'b', u'b', u'c', u'd', u'e'], categories=[u'a', u'b', u'c', u'd', u'e'], ordered=False, dtype='category')
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11371 | 2015-10-19T11:39:15Z | 2015-10-20T17:29:50Z | 2015-10-20T17:29:50Z | 2015-10-20T20:47:02Z |
interpreting index name in min_itemsize specification #11364, #10381 | diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4e25b546bddf2..c2653089c54f7 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -2989,7 +2989,6 @@ def data_orientation(self):
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
-
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
@@ -3090,6 +3089,13 @@ def validate_min_itemsize(self, min_itemsize):
return
q = self.queryables()
+
+ if ('index' in min_itemsize) and ('index' not in q): # issue #11364
+ for axname in self.index_axes:
+ #print("axname:" , axname.name)
+ min_itemsize[ axname.name ] = min_itemsize['index']
+ del min_itemsize['index']
+
for k, v in min_itemsize.items():
# ok, apply generally
@@ -3099,6 +3105,7 @@ def validate_min_itemsize(self, min_itemsize):
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
+ return min_itemsize
@property
def indexables(self):
@@ -3288,7 +3295,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
-
+
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
@@ -3318,15 +3325,17 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
# create axes to index and non_index
index_axes_map = dict()
+
for i, a in enumerate(obj.axes):
if i in axes:
- name = obj._AXIS_NAMES[i]
+ name = getattr(obj, obj._AXIS_NAMES[i]).name # obj._AXIS_NAMES[i]
+ if name is None:
+ name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
-
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
@@ -3346,6 +3355,7 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
self.non_index_axes.append((i, append_axis))
+
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
@@ -3353,11 +3363,6 @@ def create_axes(self, axes, obj, validate=True, nan_rep=None,
]
j = len(self.index_axes)
- # check for column conflicts
- if validate:
- for a in self.axes:
- a.maybe_set_size(min_itemsize=min_itemsize)
-
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
@@ -3455,10 +3460,7 @@ def get_blk_items(mgr, blocks):
% (b.dtype.name, b_items, str(detail))
)
j += 1
-
- # validate our min_itemsize
- self.validate_min_itemsize(min_itemsize)
-
+
# validate our metadata
self.validate_metadata(existing_table)
@@ -3466,6 +3468,15 @@ def get_blk_items(mgr, blocks):
if validate:
self.validate(existing_table)
+ # validate and correct our min_itemsize # issue #11364
+ min_itemsize = self.validate_min_itemsize(min_itemsize)
+
+ # check for column conflicts
+ if validate:
+ for a in self.axes:
+ a.maybe_set_size(min_itemsize=min_itemsize)
+
+
def process_axes(self, obj, columns=None):
""" process axes filters """
diff --git a/pandas/io/tests/test_hdf5_index_11364.py b/pandas/io/tests/test_hdf5_index_11364.py
new file mode 100644
index 0000000000000..3f2b0e9277d63
--- /dev/null
+++ b/pandas/io/tests/test_hdf5_index_11364.py
@@ -0,0 +1,71 @@
+import nose
+from nose import with_setup
+import pandas as pd
+import numpy as np
+import os, sys
+
+def create_test_file():
+ global xbed, xstore, xgroup
+ xbed = "testtable.tab"
+ xstore = 'tempstore.h5'
+ xgroup = "x"
+
+ col_nums = [0]
+ df = pd.DataFrame({"V1":["a","b","c","d","e", "aaaah!!!"],
+ "W":["c","d","c","d","c","c"],
+ "ZZZ":np.arange(6)})
+ df.set_index(["V1","W"], inplace = True)
+ df.to_csv( xbed, sep = "\t")
+
+
+def clear_files():
+ os.remove(xbed)
+ os.remove(xstore)
+
+def write_hdf5_11364(indexcols):
+ sep = "\t"
+ chunksize=5
+ try:
+ os.remove(xstore)
+ except OSError:
+ pass
+ # create a store
+ with pd.HDFStore(xstore) as store:
+ for nn, chunk in enumerate(pd.read_table(xbed, chunksize=chunksize, sep = sep, index_col= indexcols if not indexcols==["index"] else 0)):
+ #print(chunk.index.names)
+ store.append(xgroup, chunk, format = "table", min_itemsize = \
+ #{"index":32} if len(indexcols)==1 else \
+ dict(zip(chunk.index.names, [32]*len(chunk.index.names))))
+ print("chunk #" , nn, file = sys.stderr)
+
+ print("index columns:", indexcols, file = sys.stderr)
+ assert True
+
+def read_hdf5_11364(indexcols):
+ with pd.HDFStore(xstore) as store:
+ df = store.get(xgroup)
+ print(df.shape)
+ assert (df.shape==(6,3 - len(indexcols))), "wrong shape"
+
+@with_setup(create_test_file, clear_files )
+def test_write_read_hdf5_11364_indexcol():
+ indexcols = ["index"]
+ write_hdf5_11364(indexcols)
+ read_hdf5_11364(indexcols)
+ return
+
+@with_setup(create_test_file, clear_files )
+def test_write_read_hdf5_11364_1col():
+ indexcols =[0]
+ write_hdf5_11364(indexcols)
+ read_hdf5_11364(indexcols)
+ return
+
+@with_setup(create_test_file, clear_files )
+def test_write_read_hdf5_11364_2col():
+ indexcols =[0,1]
+ write_hdf5_11364(indexcols)
+ read_hdf5_11364(indexcols)
+ return
+
+
| closes #10381
The fix replaces named index columns in [`Table.index_axes` with their names](https://github.com/pydata/pandas/compare/master...DSLituiev:master#diff-1b15d1477da3a0548d2dd72a5d023d00L3323) and [corrects `min_itemsize` if it contains 'index' item which is not in `Table.index_axes`](https://github.com/pydata/pandas/compare/master...DSLituiev:master#diff-1b15d1477da3a0548d2dd72a5d023d00R3093)
| https://api.github.com/repos/pandas-dev/pandas/pulls/11368 | 2015-10-19T06:23:48Z | 2015-11-18T20:16:52Z | null | 2015-11-18T23:56:16Z |
BUG: GH11235 where pd.eval doesn't handle unary ops in lists | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 989b05003d76f..afb143ac78df1 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -94,7 +94,7 @@ Bug Fixes
-
+- Bug in ``pd.eval`` where unary ops in a list error (:issue:`11235`)
- Bug in ``squeeze()`` with zero length arrays (:issue:`11230`, :issue:`8999`)
diff --git a/pandas/computation/expr.py b/pandas/computation/expr.py
index 2ae6f29f74efc..6da5cf4753a8e 100644
--- a/pandas/computation/expr.py
+++ b/pandas/computation/expr.py
@@ -427,7 +427,7 @@ def visit_Str(self, node, **kwargs):
return self.term_type(name, self.env)
def visit_List(self, node, **kwargs):
- name = self.env.add_tmp([self.visit(e).value for e in node.elts])
+ name = self.env.add_tmp([self.visit(e)(self.env) for e in node.elts])
return self.term_type(name, self.env)
visit_Tuple = visit_List
@@ -655,7 +655,7 @@ def visitor(x, y):
return reduce(visitor, operands)
# ast.Call signature changed on 3.5,
-# conditionally change which methods is named
+# conditionally change which methods is named
# visit_Call depending on Python version, #11097
if compat.PY35:
BaseExprVisitor.visit_Call = BaseExprVisitor.visit_Call_35
diff --git a/pandas/computation/tests/test_eval.py b/pandas/computation/tests/test_eval.py
index 8db0b82f1aa2e..7474c0d118612 100644
--- a/pandas/computation/tests/test_eval.py
+++ b/pandas/computation/tests/test_eval.py
@@ -29,7 +29,7 @@
import pandas.computation.expr as expr
import pandas.util.testing as tm
from pandas.util.testing import (assert_frame_equal, randbool,
- assertRaisesRegexp,
+ assertRaisesRegexp, assert_numpy_array_equal,
assert_produces_warning, assert_series_equal)
from pandas.compat import PY3, u, reduce
@@ -609,6 +609,16 @@ def test_scalar_unary(self):
self.assertEqual(
pd.eval('+False', parser=self.parser, engine=self.engine), +False)
+ def test_unary_in_array(self):
+ # GH 11235
+ assert_numpy_array_equal(
+ pd.eval('[-True, True, ~True, +True,'
+ '-False, False, ~False, +False,'
+ '-37, 37, ~37, +37]'),
+ np.array([-True, True, ~True, +True,
+ -False, False, ~False, +False,
+ -37, 37, ~37, +37]))
+
def test_disallow_scalar_bool_ops(self):
exprs = '1 or 2', '1 and 2'
exprs += 'a and b', 'a or b'
@@ -1256,6 +1266,13 @@ def f():
expected['c'] = expected['a'] + expected['b']
assert_frame_equal(df, expected)
+ def test_column_in(self):
+ # GH 11235
+ df = DataFrame({'a': [11], 'b': [-32]})
+ result = df.eval('a in [11, -32]')
+ expected = Series([True])
+ assert_series_equal(result, expected)
+
def test_basic_period_index_boolean_expression(self):
df = mkdf(2, 2, data_gen_f=f, c_idx_type='p', r_idx_type='i')
| closes #11235
| https://api.github.com/repos/pandas-dev/pandas/pulls/11366 | 2015-10-19T05:40:18Z | 2015-10-23T20:41:12Z | 2015-10-23T20:41:12Z | 2015-10-23T20:41:17Z |
fixed pathlib tests on windows | diff --git a/pandas/io/tests/test_common.py b/pandas/io/tests/test_common.py
index 003068a702246..73cae1130c740 100644
--- a/pandas/io/tests/test_common.py
+++ b/pandas/io/tests/test_common.py
@@ -43,12 +43,12 @@ def test_stringify_path_pathlib(self):
rel_path = common._stringify_path(Path('.'))
self.assertEqual(rel_path, '.')
redundant_path = common._stringify_path(Path('foo//bar'))
- self.assertEqual(redundant_path, 'foo/bar')
+ self.assertEqual(redundant_path, os.path.join('foo', 'bar'))
def test_stringify_path_localpath(self):
tm._skip_if_no_localpath()
- path = 'foo/bar'
+ path = os.path.join('foo', 'bar')
abs_path = os.path.abspath(path)
lpath = LocalPath(path)
self.assertEqual(common._stringify_path(lpath), abs_path)
| as requested here: https://github.com/pydata/pandas/issues/11033#issuecomment-148924778
i can’t test on windows unfortunately.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11362 | 2015-10-18T08:36:40Z | 2015-10-18T16:02:57Z | 2015-10-18T16:02:57Z | 2015-10-18T16:03:53Z |
DOC: added exp weighting clarifications from #8861 | diff --git a/doc/source/computation.rst b/doc/source/computation.rst
index dfb9fab19bf31..cc114a2519d92 100644
--- a/doc/source/computation.rst
+++ b/doc/source/computation.rst
@@ -528,10 +528,18 @@ In general, a weighted moving average is calculated as
y_t = \frac{\sum_{i=0}^t w_i x_{t-i}}{\sum_{i=0}^t w_i},
-where :math:`x_t` is the input at :math:`y_t` is the result.
+where :math:`x_t` is the input and :math:`y_t` is the result.
+
+The EW functions support two variants of exponential weights.
+The default, ``adjust=True``, uses the weights :math:`w_i = (1 - \alpha)^i`
+which gives
+
+.. math::
+
+ y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ...
+ + (1 - \alpha)^t x_{0}}{1 + (1 - \alpha) + (1 - \alpha)^2 + ...
+ + (1 - \alpha)^t}
-The EW functions support two variants of exponential weights:
-The default, ``adjust=True``, uses the weights :math:`w_i = (1 - \alpha)^i`.
When ``adjust=False`` is specified, moving averages are calculated as
.. math::
@@ -556,6 +564,34 @@ which is equivalent to using weights
y_t = \alpha' y_{t-1} + (1 - \alpha') x_t.
+The difference between the above two variants arises because we are
+dealing with series which have finite history. Consider a series of infinite
+history:
+
+.. math::
+
+ y_t = \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ...}
+ {1 + (1 - \alpha) + (1 - \alpha)^2 + ...}
+
+Noting that the denominator is a geometric series with initial term equal to 1
+and a ratio of :math:`1 - \alpha` we have
+
+.. math::
+
+ y_t &= \frac{x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ...}
+ {\frac{1}{1 - (1 - \alpha)}}\\
+ &= [x_t + (1 - \alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ...] \alpha \\
+ &= \alpha x_t + [(1-\alpha)x_{t-1} + (1 - \alpha)^2 x_{t-2} + ...]\alpha \\
+ &= \alpha x_t + (1 - \alpha)[x_{t-1} + (1 - \alpha) x_{t-2} + ...]\alpha\\
+ &= \alpha x_t + (1 - \alpha) y_{t-1}
+
+which shows the equivalence of the above two variants for infinite series.
+When ``adjust=True`` we have :math:`y_0 = x_0` and from the last
+representation above we have :math:`y_t = \alpha x_t + (1 - \alpha) y_{t-1}`,
+therefore there is an assumption that :math:`x_0` is not an ordinary value
+but rather an exponentially weighted moment of the infinite series up to that
+point.
+
One must have :math:`0 < \alpha \leq 1`, but rather than pass :math:`\alpha`
directly, it's easier to think about either the **span**, **center of mass
(com)** or **halflife** of an EW moment:
diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index c4791c43278b9..ab49459ff7668 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -124,6 +124,9 @@
relative positions. For example, the weights of x and y used in calculating
the final weighted average of [x, None, y] are 1-alpha and 1 (if adjust is
True), and 1-alpha and alpha (if adjust is False).
+
+More details can be found at
+http://pandas.pydata.org/pandas-docs/stable/computation.html#exponentially-weighted-moment-functions
"""
_expanding_kw = """min_periods : int, default None
| I integrated the comments in #8861 from @seth-p into the exponential weighted moment functions docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11361 | 2015-10-18T04:23:02Z | 2015-11-01T10:27:53Z | 2015-11-01T10:27:53Z | 2015-11-01T10:28:53Z |
added a note on index creation for pandas dataframe stored in hdf5 file | diff --git a/doc/source/cookbook.rst b/doc/source/cookbook.rst
index 0b05f062f5fce..907f53a54462d 100644
--- a/doc/source/cookbook.rst
+++ b/doc/source/cookbook.rst
@@ -1077,6 +1077,16 @@ Storing Attributes to a group node
store.close()
os.remove('test.h5')
+How to construct an index of a Pandas dataframe stored in HDF5 file. This operation is useful after you append multiple data to the dataframe without index creation. The index creation is purposely turned off during appending to save costly computation time
+.. ipython:: python
+
+ df = DataFrame(randn(10,2),columns=list('AB')).to_hdf('test.h5','df',data_columns=['B'],mode='w',table=True)
+ store = pd.HDFStore('test.h5')
+
+ # create index
+ store.create_table_index('df',columns=['B'],optlevel=9,kind='full')
+ store.close()
+
.. _cookbook.binary:
| Based on my discussion with Jeff
http://stackoverflow.com/questions/25714549/indexing-and-data-columns-in-pandas-pytables/25715005?noredirect=1#comment53932720_25715005
| https://api.github.com/repos/pandas-dev/pandas/pulls/11358 | 2015-10-17T20:32:06Z | 2015-11-13T16:20:55Z | null | 2015-11-13T16:20:55Z |
WIP: avoid some numpy warnings #8537 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 7c4701b61c18d..ea2b85d983ade 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -41,6 +41,7 @@ API changes
- Regression from 0.16.2 for output formatting of long floats/nan, restored in (:issue:`11302`)
- Prettyprinting sets (e.g. in DataFrame cells) now uses set literal syntax (``{x, y}``) instead of
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
+- Indexing with a null key will raise a ``TypeError``, instead of a ``ValueError`` (:issue:`11356`)
.. _whatsnew_0171.deprecations:
diff --git a/pandas/core/common.py b/pandas/core/common.py
index c6e774b5077db..c2c50bce04309 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -444,14 +444,24 @@ def mask_missing(arr, values_to_mask):
mask = None
for x in nonna:
if mask is None:
- mask = arr == x
+
+ # numpy elementwise comparison warning
+ if is_numeric_v_string_like(arr, x):
+ mask = False
+ else:
+ mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
- mask |= arr == x
+
+ # numpy elementwise comparison warning
+ if is_numeric_v_string_like(arr, x):
+ mask |= False
+ else:
+ mask |= arr == x
if na_mask.any():
if mask is None:
@@ -2382,6 +2392,9 @@ def _maybe_make_list(obj):
is_complex = lib.is_complex
+def is_string_like(obj):
+ return isinstance(obj, (compat.text_type, compat.string_types))
+
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
@@ -2525,6 +2538,27 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
return issubclass(tipo, (np.datetime64, np.timedelta64))
+def is_numeric_v_string_like(a, b):
+ """
+ numpy doesn't like to compare numeric arrays vs scalar string-likes
+
+ return a boolean result if this is the case for a,b or b,a
+
+ """
+ is_a_array = isinstance(a, np.ndarray)
+ is_b_array = isinstance(b, np.ndarray)
+
+ is_a_numeric_array = is_a_array and is_numeric_dtype(a)
+ is_b_numeric_array = is_b_array and is_numeric_dtype(b)
+
+ is_a_scalar_string_like = not is_a_array and is_string_like(a)
+ is_b_scalar_string_like = not is_b_array and is_string_like(b)
+
+ return (
+ is_a_numeric_array and is_b_scalar_string_like) or (
+ is_b_numeric_array and is_a_scalar_string_like
+ )
+
def is_datetimelike_v_numeric(a, b):
# return if we have an i8 convertible and numeric comparision
if not hasattr(a,'dtype'):
diff --git a/pandas/core/index.py b/pandas/core/index.py
index b4c690fe8973b..ede848c1103ab 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -862,9 +862,10 @@ def to_int():
return self._invalid_indexer('label', key)
if is_float(key):
- if not self.is_floating():
- warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
- type(self).__name__), FutureWarning, stacklevel=3)
+ if isnull(key):
+ return self._invalid_indexer('label', key)
+ warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
+ type(self).__name__), FutureWarning, stacklevel=3)
return to_int()
return key
@@ -3721,9 +3722,23 @@ def astype(self, dtype):
return Index(self._values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, kind=None):
+ """
+ convert a scalar indexer
+
+ Parameters
+ ----------
+ key : label of the slice bound
+ kind : optional, type of the indexing operation (loc/ix/iloc/None)
+
+ right now we are converting
+ floats -> ints if the index supports it
+ """
+
if kind == 'iloc':
- return super(Float64Index, self)._convert_scalar_indexer(key,
- kind=kind)
+ if is_integer(key):
+ return key
+ return super(Float64Index, self)._convert_scalar_indexer(key, kind=kind)
+
return key
def _convert_slice_indexer(self, key, kind=None):
@@ -4276,7 +4291,7 @@ def _reference_duplicate_name(self, name):
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
- return np.sum(name == np.asarray(self.names)) > 1
+ return sum(name == n for n in self.names) > 1
def _format_native_types(self, **kwargs):
return self.values
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b4528ef451ef..5eb25a53d4533 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1285,7 +1285,7 @@ def _has_valid_type(self, key, axis):
def error():
if isnull(key):
- raise ValueError(
+ raise TypeError(
"cannot use label indexing with a null key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index ed4d6a6ccd73e..d98121520b8b0 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -17,7 +17,7 @@
is_datetime64tz_dtype, is_datetimetz, is_sparse,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric,
- is_internal_type)
+ is_numeric_v_string_like, is_internal_type)
from pandas.core.dtypes import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
@@ -1082,8 +1082,16 @@ def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
# get the result, may need to transpose the other
def get_result(other):
- # compute
- result = func(values, other)
+ # avoid numpy warning of comparisons again None
+ if other is None:
+ result = not func.__name__ == 'eq'
+
+ # avoid numpy warning of elementwise comparisons to object
+ elif is_numeric_v_string_like(values, other):
+ result = False
+
+ else:
+ result = func(values, other)
# mask if needed
if isinstance(values_mask, np.ndarray) and values_mask.any():
@@ -3214,7 +3222,7 @@ def get(self, item, fastpath=True):
else:
if isnull(item):
- raise ValueError("cannot label index with a null key")
+ raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
@@ -4251,11 +4259,16 @@ def _possibly_compare(a, b, op):
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
- res = False
+ result = False
+
+ # numpy deprecation warning if comparing numeric vs string-like
+ elif is_numeric_v_string_like(a, b):
+ result = False
+
else:
- res = op(a, b)
+ result = op(a, b)
- if np.isscalar(res) and (is_a_array or is_b_array):
+ if lib.isscalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
@@ -4265,7 +4278,7 @@ def _possibly_compare(a, b, op):
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
- return res
+ return result
def _concat_indexes(indexes):
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 310b165101bdf..ac6f14e846bec 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -1024,7 +1024,7 @@ def _validate_expiry(self, expiry):
if expiry in expiry_dates:
return expiry
else:
- index = DatetimeIndex(expiry_dates).order()
+ index = DatetimeIndex(expiry_dates).sort_values()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index a45f4bf1726f2..dc0e0e2670565 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5839,7 +5839,7 @@ def check(df):
def f():
df.loc[:,np.nan]
- self.assertRaises(ValueError, f)
+ self.assertRaises(TypeError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
| closes #8537
- avoid numpy comparison to `None` warnings
- change null indexing from `ValueError` to `TypeError` (more consistent with other errors)
- elementwise comparisons with index names xref #11162
- elementwise comparisons vs string-likes
| https://api.github.com/repos/pandas-dev/pandas/pulls/11356 | 2015-10-17T16:30:40Z | 2015-10-18T23:52:37Z | 2015-10-18T23:52:37Z | 2015-10-18T23:52:37Z |
PERF: fast inf checking in to_excel | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index dcea59545aae3..6a739873a032f 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -43,6 +43,7 @@
"numexpr": [],
"pytables": [],
"openpyxl": [],
+ "xlsxwriter": [],
"xlrd": [],
"xlwt": []
},
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index e9614ec4f2290..05c99e38e8511 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -62,6 +62,8 @@ Performance Improvements
- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
+- Improved performance to ``to_excel`` (:issue:`11352`)
+
.. _whatsnew_0171.bug_fixes:
Bug Fixes
diff --git a/pandas/core/format.py b/pandas/core/format.py
index e4aa1eac248d5..4a3b1d02c9422 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1708,9 +1708,9 @@ def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
- if np.isposinf(val):
+ if lib.isposinf_scalar(val):
val = self.inf_rep
- elif np.isneginf(val):
+ elif lib.isneginf_scalar(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 2b4974155d44c..74842d9a165fe 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -269,6 +269,18 @@ cpdef checknull_old(object val):
else:
return util._checknull(val)
+cpdef isposinf_scalar(object val):
+ if util.is_float_object(val) and val == INF:
+ return True
+ else:
+ return False
+
+cpdef isneginf_scalar(object val):
+ if util.is_float_object(val) and val == NEGINF:
+ return True
+ else:
+ return False
+
def isscalar(object val):
"""
Return True if given value is scalar.
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index cfc98f5c20360..a24f71482c404 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -161,6 +161,19 @@ def test_maybe_indices_to_slice_middle(self):
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+ def test_isinf_scalar(self):
+ #GH 11352
+ self.assertTrue(lib.isposinf_scalar(float('inf')))
+ self.assertTrue(lib.isposinf_scalar(np.inf))
+ self.assertFalse(lib.isposinf_scalar(-np.inf))
+ self.assertFalse(lib.isposinf_scalar(1))
+ self.assertFalse(lib.isposinf_scalar('a'))
+
+ self.assertTrue(lib.isneginf_scalar(float('-inf')))
+ self.assertTrue(lib.isneginf_scalar(-np.inf))
+ self.assertFalse(lib.isneginf_scalar(np.inf))
+ self.assertFalse(lib.isneginf_scalar(1))
+ self.assertFalse(lib.isneginf_scalar('a'))
class Testisscalar(tm.TestCase):
@@ -232,4 +245,4 @@ def test_lisscalar_pandas_containers(self):
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
- exit=False)
\ No newline at end of file
+ exit=False)
| Adds new functions to check for infinity rather than calling `np.isposinf` and `np.isneginf`, which were (surprising to me) a significant drag on `to_excel`.
I also added `xlsxwriter` to the asv build configuration. `openpyxl` is still failing, I'm assuming something do with the specific version on conda?
```
before after ratio
[472e6e0e] [8002555d]
156.36ms 154.16ms 0.99 packers.packers_read_excel.time_packers_read_excel
failed failed n/a packers.packers_write_excel_openpyxl.time_packers_write_excel_openpyxl
469.69ms 357.49ms 0.76 packers.packers_write_excel_xlsxwriter.time_packers_write_excel_xlsxwriter
368.96ms 270.60ms 0.73 packers.packers_write_excel_xlwt.time_packers_write_excel_xlwt
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11352 | 2015-10-17T01:20:27Z | 2015-10-17T15:08:44Z | 2015-10-17T15:08:44Z | 2015-10-21T22:41:46Z |
DEPR: remove some SparsePanel deprecation warnings in testing | diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 08ef82835830c..da0ab7bc59440 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -680,8 +680,8 @@ def _combine(self, other, func, axis=0):
elif np.isscalar(other):
return self._combine_const(other, func)
else:
- raise NotImplementedError(str(type(other)) +
- ' is not supported in combine operation with ' +
+ raise NotImplementedError(str(type(other)) +
+ ' is not supported in combine operation with ' +
str(type(self)))
def _combine_const(self, other, func):
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index a86942718091c..9ce08c550dd0d 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -39,10 +39,6 @@
from pandas.sparse.tests.test_array import assert_sp_array_equal
-import warnings
-warnings.filterwarnings(action='ignore', category=FutureWarning)
-
-
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
@@ -503,15 +499,6 @@ def check(a, b):
result = self.bseries + self.bseries.to_dense()
assert_sp_series_equal(result, self.bseries + self.bseries)
- # @dec.knownfailureif(True, 'Known NumPy failer as of 1.5.1')
- def test_operators_corner2(self):
- raise nose.SkipTest('known failer on numpy 1.5.1')
-
- # NumPy circumvents __r*__ operations
- val = np.float64(3.0)
- result = val - self.zbseries
- assert_sp_series_equal(result, 3 - self.zbseries)
-
def test_binary_operators(self):
# skipping for now #####
@@ -1778,20 +1765,23 @@ def setUp(self):
'ItemC': panel_data3(),
'ItemD': panel_data1(),
}
- self.panel = SparsePanel(self.data_dict)
+ with tm.assert_produces_warning(FutureWarning):
+ self.panel = SparsePanel(self.data_dict)
@staticmethod
def _test_op(panel, op):
# arithmetic tests
- result = op(panel, 1)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = op(panel, 1)
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
- self.assertRaises(ValueError, SparsePanel, self.data_dict,
- items=['Item0', 'ItemA', 'ItemB'])
- with tm.assertRaisesRegexp(TypeError,
- "input must be a dict, a 'list' was passed"):
- SparsePanel(['a', 'b', 'c'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.assertRaises(ValueError, SparsePanel, self.data_dict,
+ items=['Item0', 'ItemA', 'ItemB'])
+ with tm.assertRaisesRegexp(TypeError,
+ "input must be a dict, a 'list' was passed"):
+ SparsePanel(['a', 'b', 'c'])
# deprecation GH11157
def test_deprecation(self):
@@ -1800,13 +1790,15 @@ def test_deprecation(self):
# GH 9272
def test_constructor_empty(self):
- sp = SparsePanel()
+ with tm.assert_produces_warning(FutureWarning):
+ sp = SparsePanel()
self.assertEqual(len(sp.items), 0)
self.assertEqual(len(sp.major_axis), 0)
self.assertEqual(len(sp.minor_axis), 0)
def test_from_dict(self):
- fd = SparsePanel.from_dict(self.data_dict)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ fd = SparsePanel.from_dict(self.data_dict)
assert_sp_panel_equal(fd, self.panel)
def test_pickle(self):
@@ -1830,21 +1822,25 @@ def test_to_dense(self):
assert_panel_equal(dwp, dwp2)
def test_to_frame(self):
- def _compare_with_dense(panel):
- slp = panel.to_frame()
- dlp = panel.to_dense().to_frame()
- self.assert_numpy_array_equal(slp.values, dlp.values)
- self.assertTrue(slp.index.equals(dlp.index))
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+
+ def _compare_with_dense(panel):
+ slp = panel.to_frame()
+ dlp = panel.to_dense().to_frame()
- _compare_with_dense(self.panel)
- _compare_with_dense(self.panel.reindex(items=['ItemA']))
+ self.assert_numpy_array_equal(slp.values, dlp.values)
+ self.assertTrue(slp.index.equals(dlp.index))
- zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
- self.assertRaises(Exception, zero_panel.to_frame)
+ _compare_with_dense(self.panel)
+ _compare_with_dense(self.panel.reindex(items=['ItemA']))
- self.assertRaises(Exception, self.panel.to_frame,
- filter_observations=False)
+ with tm.assert_produces_warning(FutureWarning):
+ zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
+ self.assertRaises(Exception, zero_panel.to_frame)
+
+ self.assertRaises(Exception, self.panel.to_frame,
+ filter_observations=False)
def test_long_to_wide_sparse(self):
pass
@@ -1885,47 +1881,53 @@ def test_delitem_pop(self):
self.assertRaises(KeyError, self.panel.__delitem__, 'ItemC')
def test_copy(self):
- cop = self.panel.copy()
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ cop = self.panel.copy()
assert_sp_panel_equal(cop, self.panel)
def test_reindex(self):
- def _compare_with_dense(swp, items, major, minor):
- swp_re = swp.reindex(items=items, major=major,
- minor=minor)
- dwp_re = swp.to_dense().reindex(items=items, major=major,
- minor=minor)
- assert_panel_equal(swp_re.to_dense(), dwp_re)
-
- _compare_with_dense(self.panel, self.panel.items[:2],
- self.panel.major_axis[::2],
- self.panel.minor_axis[::2])
- _compare_with_dense(self.panel, None,
- self.panel.major_axis[::2],
- self.panel.minor_axis[::2])
-
- self.assertRaises(ValueError, self.panel.reindex)
-
- # TODO: do something about this later...
- self.assertRaises(Exception, self.panel.reindex,
- items=['item0', 'ItemA', 'ItemB'])
-
- # test copying
- cp = self.panel.reindex(self.panel.major_axis, copy=True)
- cp['ItemA']['E'] = cp['ItemA']['A']
- self.assertNotIn('E', self.panel['ItemA'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+
+ def _compare_with_dense(swp, items, major, minor):
+ swp_re = swp.reindex(items=items, major=major,
+ minor=minor)
+ dwp_re = swp.to_dense().reindex(items=items, major=major,
+ minor=minor)
+ assert_panel_equal(swp_re.to_dense(), dwp_re)
+
+ _compare_with_dense(self.panel, self.panel.items[:2],
+ self.panel.major_axis[::2],
+ self.panel.minor_axis[::2])
+ _compare_with_dense(self.panel, None,
+ self.panel.major_axis[::2],
+ self.panel.minor_axis[::2])
+
+ self.assertRaises(ValueError, self.panel.reindex)
+
+ # TODO: do something about this later...
+ self.assertRaises(Exception, self.panel.reindex,
+ items=['item0', 'ItemA', 'ItemB'])
+
+ # test copying
+ cp = self.panel.reindex(self.panel.major_axis, copy=True)
+ cp['ItemA']['E'] = cp['ItemA']['A']
+ self.assertNotIn('E', self.panel['ItemA'])
def test_operators(self):
def _check_ops(panel):
+
def _dense_comp(op):
- dense = panel.to_dense()
- sparse_result = op(panel)
- dense_result = op(dense)
- assert_panel_equal(sparse_result.to_dense(), dense_result)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ dense = panel.to_dense()
+ sparse_result = op(panel)
+ dense_result = op(dense)
+ assert_panel_equal(sparse_result.to_dense(), dense_result)
def _mixed_comp(op):
- result = op(panel, panel.to_dense())
- expected = op(panel.to_dense(), panel.to_dense())
- assert_panel_equal(result, expected)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = op(panel, panel.to_dense())
+ expected = op(panel.to_dense(), panel.to_dense())
+ assert_panel_equal(result, expected)
op1 = lambda x: x + 2
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 3615cc3dc8ad8..86c8f5298e0ab 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -45,6 +45,9 @@ def setUp(self):
self._create_data()
warnings.simplefilter("ignore", category=FutureWarning)
+ def tearDown(self):
+ warnings.simplefilter("default", category=FutureWarning)
+
def test_centered_axis_validation(self):
# ok
mom.rolling_mean(Series(np.ones(10)),3,center=True ,axis=0)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 13c671e8e4e59..a45f4bf1726f2 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -381,15 +381,11 @@ def test_getitem_boolean(self):
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
- import warnings
- warnings.filterwarnings(action='ignore', category=UserWarning)
-
- indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
-
- subframe_obj = self.tsframe[indexer_obj]
- assert_frame_equal(subframe_obj, subframe)
+ with tm.assert_produces_warning(UserWarning):
+ indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
- warnings.filterwarnings(action='default', category=UserWarning)
+ subframe_obj = self.tsframe[indexer_obj]
+ assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 061382e0e16de..d29673e96ecdd 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -39,8 +39,7 @@ class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
+ pass
@property
def _ndim(self):
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index df61387734cb3..5b00ea163d85f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -28,8 +28,6 @@ class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 0dad55a9133b6..1f8bcf8c9879f 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -5,6 +5,7 @@
from inspect import getargspec
import operator
import nose
+from functools import wraps
import numpy as np
import pandas as pd
@@ -17,6 +18,7 @@
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict
+from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
@@ -31,6 +33,22 @@
import pandas.core.panel as panelm
import pandas.util.testing as tm
+def ignore_sparse_panel_future_warning(func):
+ """
+ decorator to ignore FutureWarning if we have a SparsePanel
+
+ can be removed when SparsePanel is fully removed
+ """
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+
+ if isinstance(self.panel, SparsePanel):
+ with assert_produces_warning(FutureWarning, check_stacklevel=False):
+ return func(self, *args, **kwargs)
+ else:
+ return func(self, *args, **kwargs)
+
+ return wrapper
class PanelTests(object):
panel = None
@@ -56,6 +74,7 @@ class SafeForLongAndSparse(object):
def test_repr(self):
foo = repr(self.panel)
+ @ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
@@ -233,6 +252,7 @@ def test_get_plane_axes(self):
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
+ @ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
@@ -293,6 +313,7 @@ def test_iteritems(self):
self.assertEqual(len(list(compat.iteritems(self.panel))),
len(self.panel.items))
+ @ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
@@ -321,7 +342,7 @@ def check_op(op, name):
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
- from pandas import SparsePanel
+
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
@@ -348,16 +369,18 @@ def check_op(op, name):
com.pprint_thing("Failing operation: %r" % name)
raise
+ @ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
+ @ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
- p = Panel(np.arange(3*4*5).reshape(3,4,5), items=['ItemA','ItemB','ItemC'],
+ p = Panel(np.arange(3*4*5).reshape(3,4,5), items=['ItemA','ItemB','ItemC'],
major_axis=pd.date_range('20130101',periods=4),minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
@@ -365,6 +388,7 @@ def test_raise_when_not_implemented(self):
with self.assertRaises(NotImplementedError):
getattr(p,op)(d, axis=0)
+ @ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
@@ -396,7 +420,9 @@ def test_get_value(self):
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
+ @ignore_sparse_panel_future_warning
def test_abs(self):
+
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
@@ -872,9 +898,6 @@ def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
-
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
@@ -1534,6 +1557,7 @@ def test_transpose_copy(self):
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
+ @ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
@@ -2313,6 +2337,7 @@ def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
+ @ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index df3f1aaa815fa..d142ffdbad983 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -59,7 +59,6 @@ def reset_testing_mode():
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', DeprecationWarning)
-
set_testing_mode()
class TestCase(unittest.TestCase):
@@ -1975,7 +1974,6 @@ def handle_success(self, exc_type, exc_value, traceback):
raise_with_traceback(e, traceback)
return True
-
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
| https://api.github.com/repos/pandas-dev/pandas/pulls/11347 | 2015-10-16T22:05:26Z | 2015-10-18T16:03:57Z | 2015-10-18T16:03:56Z | 2015-10-18T16:03:57Z |
|
BUG: bug in comparisons vs tuples, #11339 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 6d4b61bb97f22..c5ae364e4a528 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -64,8 +64,8 @@ Bug Fixes
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issues:`11295`)
-
+- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
+- Bug in comparisons of Series vs list-likes (:issue:`11339`)
- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 5b3d6069f17ec..bf331ff1b781c 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -720,7 +720,7 @@ def wrapper(self, other, axis=None):
res = op(self.values, other)
else:
values = self.get_values()
- if is_list_like(other):
+ if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 9c86c3f894c67..f8d2c8bfd0dfb 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4269,6 +4269,43 @@ def test_object_comparisons(self):
expected = -(s == 'a')
assert_series_equal(result, expected)
+ def test_comparison_tuples(self):
+ # GH11339
+ # comparisons vs tuple
+ s = Series([(1,1),(1,2)])
+
+ result = s == (1,2)
+ expected = Series([False,True])
+ assert_series_equal(result, expected)
+
+ result = s != (1,2)
+ expected = Series([True, False])
+ assert_series_equal(result, expected)
+
+ result = s == (0,0)
+ expected = Series([False, False])
+ assert_series_equal(result, expected)
+
+ result = s != (0,0)
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+ s = Series([(1,1),(1,1)])
+
+ result = s == (1,1)
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+ result = s != (1,1)
+ expected = Series([False, False])
+ assert_series_equal(result, expected)
+
+ s = Series([frozenset([1]),frozenset([1,2])])
+
+ result = s == frozenset([1])
+ expected = Series([True, False])
+ assert_series_equal(result, expected)
+
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
| closes #11339
| https://api.github.com/repos/pandas-dev/pandas/pulls/11345 | 2015-10-16T16:01:39Z | 2015-10-16T21:51:18Z | 2015-10-16T21:51:18Z | 2015-10-16T21:51:18Z |
ENH: Add Index.fillna | diff --git a/doc/source/indexing.rst b/doc/source/indexing.rst
index 38629ee7baaea..3618c31b65116 100644
--- a/doc/source/indexing.rst
+++ b/doc/source/indexing.rst
@@ -1367,6 +1367,31 @@ with duplicates dropped.
idx1.sym_diff(idx2)
idx1 ^ idx2
+Missing values
+~~~~~~~~~~~~~~
+
+.. _indexing.missing:
+
+.. versionadded:: 0.17.1
+
+.. important::
+
+ Even though ``Index`` can hold missing values (``NaN``), it should be avoided
+ if you do not want any unexpected results. For example, some operations
+ exclude missing values implicitly.
+
+``Index.fillna`` fills missing values with specified scalar value.
+
+.. ipython:: python
+
+ idx1 = pd.Index([1, np.nan, 3, 4])
+ idx1
+ idx1.fillna(2)
+
+ idx2 = pd.DatetimeIndex([pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')])
+ idx2
+ idx2.fillna(pd.Timestamp('2011-01-02'))
+
Set / Reset Index
-----------------
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 28129287d51af..5d4c670620070 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -26,6 +26,12 @@ Enhancements
- ``DataFrame`` now uses the fields of a ``namedtuple`` as columns, if columns are not supplied (:issue:`11181`)
- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
+- ``Index`` now has ``fillna`` method (:issue:`10089`)
+
+.. ipython:: python
+
+ pd.Index([1, np.nan, 3]).fillna(2)
+
.. _whatsnew_0171.api:
API changes
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 855e3f013bfd3..1de0f78fa6bb1 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -15,7 +15,8 @@
from pandas.compat import range, zip, lrange, lzip, u, map
from pandas import compat
from pandas.core import algorithms
-from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs, PandasDelegate
+from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, PandasDelegate
+import pandas.core.base as base
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
import pandas.core.common as com
@@ -29,8 +30,6 @@
from pandas.io.common import PerformanceWarning
-
-
# simplify
default_pprint = lambda x, max_seq_items=None: com.pprint_thing(x,
escape_chars=('\t', '\r', '\n'),
@@ -45,6 +44,7 @@
_index_doc_kwargs = dict(klass='Index', inplace='',
duplicated='np.array')
+_index_shared_docs = dict()
def _try_get_item(x):
@@ -108,6 +108,7 @@ class Index(IndexOpsMixin, PandasObject):
_allow_datetime_index_ops = False
_allow_period_index_ops = False
_is_numeric_dtype = False
+ _can_hold_na = True
_engine_type = _index.ObjectEngine
@@ -1236,6 +1237,43 @@ def take(self, indices, axis=0, allow_fill=True, fill_value=None):
taken = self.values.take(indices)
return self._shallow_copy(taken)
+ @cache_readonly
+ def _isnan(self):
+ """ return if each value is nan"""
+ if self._can_hold_na:
+ return isnull(self)
+ else:
+ # shouldn't reach to this condition by checking hasnans beforehand
+ values = np.empty(len(self), dtype=np.bool_)
+ values.fill(False)
+ return values
+
+ @cache_readonly
+ def _nan_idxs(self):
+ if self._can_hold_na:
+ w, = self._isnan.nonzero()
+ return w
+ else:
+ return np.array([], dtype=np.int64)
+
+ @cache_readonly
+ def hasnans(self):
+ """ return if I have any nans; enables various perf speedups """
+ if self._can_hold_na:
+ return self._isnan.any()
+ else:
+ return False
+
+ def _convert_for_op(self, value):
+ """ Convert value to be insertable to ndarray """
+ return value
+
+ def _assert_can_do_op(self, value):
+ """ Check value is valid for scalar op """
+ if not lib.isscalar(value):
+ msg = "'value' must be a scalar, passed: {0}"
+ raise TypeError(msg.format(type(value).__name__))
+
def putmask(self, mask, value):
"""
return a new Index of the values set with the mask
@@ -1245,8 +1283,12 @@ def putmask(self, mask, value):
numpy.ndarray.putmask
"""
values = self.values.copy()
- np.putmask(values, mask, value)
- return self._shallow_copy(values)
+ try:
+ np.putmask(values, mask, self._convert_for_op(value))
+ return self._shallow_copy(values)
+ except (ValueError, TypeError):
+ # coerces to object
+ return self.astype(object).putmask(mask, value)
def format(self, name=False, formatter=None, **kwargs):
"""
@@ -2766,15 +2808,45 @@ def drop(self, labels, errors='raise'):
return self.delete(indexer)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @Appender(_shared_docs['drop_duplicates'] % _index_doc_kwargs)
+ @Appender(base._shared_docs['drop_duplicates'] % _index_doc_kwargs)
def drop_duplicates(self, keep='first'):
return super(Index, self).drop_duplicates(keep=keep)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
+ @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
return super(Index, self).duplicated(keep=keep)
+ _index_shared_docs['fillna'] = """
+ Fill NA/NaN values with the specified value
+
+ Parameters
+ ----------
+ value : scalar
+ Scalar value to use to fill holes (e.g. 0).
+ This value cannot be a list-likes.
+ downcast : dict, default is None
+ a dict of item->dtype of what to downcast if possible,
+ or the string 'infer' which will try to downcast to an appropriate
+ equal type (e.g. float64 to int64 if possible)
+
+ Returns
+ -------
+ filled : Index
+ """
+
+ @Appender(_index_shared_docs['fillna'])
+ def fillna(self, value=None, downcast=None):
+ self._assert_can_do_op(value)
+ if self.hasnans:
+ result = self.putmask(self._isnan, value)
+ if downcast is None:
+ # no need to care metadata other than name
+ # because it can't have freq if
+ return Index(result, name=self.name)
+
+ return self._shallow_copy()
+
def _evaluate_with_timedelta_like(self, other, op, opstr):
raise TypeError("can only perform ops with timedelta like values")
@@ -3200,6 +3272,16 @@ def __array__(self, dtype=None):
""" the array interface, return my values """
return np.array(self._data, dtype=dtype)
+ @cache_readonly
+ def _isnan(self):
+ """ return if each value is nan"""
+ return self._data.codes == -1
+
+ @Appender(_index_shared_docs['fillna'])
+ def fillna(self, value, downcast=None):
+ self._assert_can_do_op(value)
+ return CategoricalIndex(self._data.fillna(value), name=self.name)
+
def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
@@ -3214,7 +3296,7 @@ def is_unique(self):
return not self.duplicated().any()
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
+ @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.hashtable import duplicated_int64
return duplicated_int64(self.codes.astype('i8'), keep)
@@ -3612,6 +3694,8 @@ class Int64Index(NumericIndex):
_inner_indexer = _algos.inner_join_indexer_int64
_outer_indexer = _algos.outer_join_indexer_int64
+ _can_hold_na = False
+
_engine_type = _index.Int64Engine
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
@@ -3646,11 +3730,6 @@ def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, *
def inferred_type(self):
return 'integer'
- @cache_readonly
- def hasnans(self):
- # by definition
- return False
-
@property
def asi8(self):
# do not cache or you'll create a memory leak
@@ -3872,19 +3951,6 @@ def is_all_dates(self):
"""
return False
- @cache_readonly
- def _nan_idxs(self):
- w, = self._isnan.nonzero()
- return w
-
- @cache_readonly
- def _isnan(self):
- return np.isnan(self.values)
-
- @cache_readonly
- def hasnans(self):
- return self._isnan.any()
-
@cache_readonly
def is_unique(self):
return super(Float64Index, self).is_unique and self._nan_idxs.size < 2
@@ -4409,7 +4475,7 @@ def is_unique(self):
return not self.duplicated().any()
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
- @Appender(_shared_docs['duplicated'] % _index_doc_kwargs)
+ @Appender(base._shared_docs['duplicated'] % _index_doc_kwargs)
def duplicated(self, keep='first'):
from pandas.core.groupby import get_group_index
from pandas.hashtable import duplicated_int64
@@ -4419,6 +4485,11 @@ def duplicated(self, keep='first'):
return duplicated_int64(ids, keep)
+ @Appender(_index_shared_docs['fillna'])
+ def fillna(self, value=None, downcast=None):
+ # isnull is not implemented for MultiIndex
+ raise NotImplementedError('isnull is not defined for MultiIndex')
+
def get_value(self, series, key):
# somewhat broken encapsulation
from pandas.core.indexing import maybe_droplevels
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index cfc50afc8f9f3..43adbbb66b80e 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -452,7 +452,7 @@ def extract_ordinals(ndarray[object] values, freq):
p = values[i]
ordinals[i] = p.ordinal
if p.freqstr != freqstr:
- raise ValueError("%s is wrong freq" % p)
+ raise ValueError(_DIFFERENT_FREQ_INDEX.format(freqstr, p.freqstr))
return ordinals
@@ -624,8 +624,8 @@ cdef ndarray[int64_t] localize_dt64arr_to_period(ndarray[int64_t] stamps,
return result
-_DIFFERENT_FREQ_ERROR = "Input has different freq={1} from Period(freq={0})"
-
+_DIFFERENT_FREQ = "Input has different freq={1} from Period(freq={0})"
+_DIFFERENT_FREQ_INDEX = "Input has different freq={1} from PeriodIndex(freq={0})"
cdef class Period(object):
"""
@@ -766,7 +766,7 @@ cdef class Period(object):
if isinstance(other, Period):
from pandas.tseries.frequencies import get_freq_code as _gfc
if other.freq != self.freq:
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr)
+ msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise ValueError(msg)
if self.ordinal == tslib.iNaT or other.ordinal == tslib.iNaT:
return _nat_scalar_rules[op]
@@ -807,7 +807,7 @@ cdef class Period(object):
else:
ordinal = self.ordinal + other.n
return Period(ordinal=ordinal, freq=self.freq)
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr)
+ msg = _DIFFERENT_FREQ.format(self.freqstr, other.freqstr)
raise ValueError(msg)
else: # pragma no cover
return NotImplemented
diff --git a/pandas/tests/test_base.py b/pandas/tests/test_base.py
index fb255f300ebdd..3a42059a63b0d 100644
--- a/pandas/tests/test_base.py
+++ b/pandas/tests/test_base.py
@@ -182,6 +182,15 @@ def f():
class Ops(tm.TestCase):
+
+ def _allow_na_ops(self, obj):
+ """Whether to skip test cases including NaN"""
+ if (isinstance(obj, Index) and
+ (obj.is_boolean() or not obj._can_hold_na)):
+ # don't test boolean / int64 index
+ return False
+ return True
+
def setUp(self):
self.bool_index = tm.makeBoolIndex(10, name='a')
self.int_index = tm.makeIntIndex(10, name='a')
@@ -452,13 +461,7 @@ def test_value_counts_unique_nunique(self):
klass = type(o)
values = o.values
- if isinstance(o,Index) and o.is_boolean():
- # don't test boolean
- continue
-
- if ((isinstance(o, Int64Index) and not isinstance(o,
- (DatetimeIndex, PeriodIndex)))):
- # skips int64 because it doesn't allow to include nan or None
+ if not self._allow_na_ops(o):
continue
# special assign to the numpy array
@@ -815,6 +818,64 @@ def test_duplicated_drop_duplicates(self):
s.drop_duplicates(inplace=True)
tm.assert_series_equal(s, original)
+ def test_fillna(self):
+ # # GH 11343
+ # though Index.fillna and Series.fillna has separate impl,
+ # test here to confirm these works as the same
+ def get_fill_value(obj):
+ if isinstance(obj, pd.tseries.base.DatetimeIndexOpsMixin):
+ return obj.asobject.values[0]
+ else:
+ return obj.values[0]
+
+ for o in self.objs:
+ klass = type(o)
+ values = o.values
+
+ # values will not be changed
+ result = o.fillna(get_fill_value(o))
+ if isinstance(o, Index):
+ self.assert_index_equal(o, result)
+ else:
+ self.assert_series_equal(o, result)
+ # check shallow_copied
+ self.assertFalse(o is result)
+
+ for null_obj in [np.nan, None]:
+ for o in self.objs:
+ klass = type(o)
+ values = o.values.copy()
+
+ if not self._allow_na_ops(o):
+ continue
+
+ # value for filling
+ fill_value = get_fill_value(o)
+
+ # special assign to the numpy array
+ if o.values.dtype == 'datetime64[ns]' or isinstance(o, PeriodIndex):
+ values[0:2] = pd.tslib.iNaT
+ else:
+ values[0:2] = null_obj
+
+ if isinstance(o, PeriodIndex):
+ # freq must be specified because repeat makes freq ambiguous
+ expected = [fill_value.ordinal] * 2 + list(values[2:])
+ expected = klass(ordinal=expected, freq=o.freq)
+ o = klass(ordinal=values, freq=o.freq)
+ else:
+ expected = [fill_value] * 2 + list(values[2:])
+ expected = klass(expected)
+ o = klass(values)
+
+ result = o.fillna(fill_value)
+ if isinstance(o, Index):
+ self.assert_index_equal(result, expected)
+ else:
+ self.assert_series_equal(result, expected)
+ # check shallow_copied
+ self.assertFalse(o is result)
+
class TestFloat64HashTable(tm.TestCase):
def test_lookup_nan(self):
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index f7d93a978a46a..f897ac02b2a8a 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -559,6 +559,81 @@ def test_numpy_ufuncs(self):
with tm.assertRaises(Exception):
func(idx)
+ def test_hasnans_isnans(self):
+ # GH 11343, added tests for hasnans / isnans
+ for name, index in self.indices.items():
+ if isinstance(index, MultiIndex):
+ pass
+ else:
+ idx = index.copy()
+
+ # cases in indices doesn't include NaN
+ expected = np.array([False] * len(idx), dtype=bool)
+ self.assert_numpy_array_equal(idx._isnan, expected)
+ self.assertFalse(idx.hasnans)
+
+ idx = index.copy()
+ values = idx.values
+
+ if len(index) == 0:
+ continue
+ elif isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin):
+ values[1] = pd.tslib.iNaT
+ elif isinstance(index, Int64Index):
+ continue
+ else:
+ values[1] = np.nan
+
+ if isinstance(index, PeriodIndex):
+ idx = index.__class__(values, freq=index.freq)
+ else:
+ idx = index.__class__(values)
+
+ expected = np.array([False] * len(idx), dtype=bool)
+ expected[1] = True
+ self.assert_numpy_array_equal(idx._isnan, expected)
+ self.assertTrue(idx.hasnans)
+
+ def test_fillna(self):
+ # GH 11343
+ for name, index in self.indices.items():
+ if len(index) == 0:
+ pass
+ elif isinstance(index, MultiIndex):
+ idx = index.copy()
+ msg = "isnull is not defined for MultiIndex"
+ with self.assertRaisesRegexp(NotImplementedError, msg):
+ idx.fillna(idx[0])
+ else:
+ idx = index.copy()
+ result = idx.fillna(idx[0])
+ self.assert_index_equal(result, idx)
+ self.assertFalse(result is idx)
+
+ msg = "'value' must be a scalar, passed: "
+ with self.assertRaisesRegexp(TypeError, msg):
+ idx.fillna([idx[0]])
+
+ idx = index.copy()
+ values = idx.values
+
+ if isinstance(index, pd.tseries.base.DatetimeIndexOpsMixin):
+ values[1] = pd.tslib.iNaT
+ elif isinstance(index, Int64Index):
+ continue
+ else:
+ values[1] = np.nan
+
+ if isinstance(index, PeriodIndex):
+ idx = index.__class__(values, freq=index.freq)
+ else:
+ idx = index.__class__(values)
+
+ expected = np.array([False] * len(idx), dtype=bool)
+ expected[1] = True
+ self.assert_numpy_array_equal(idx._isnan, expected)
+ self.assertTrue(idx.hasnans)
+
class TestIndex(Base, tm.TestCase):
_holder = Index
@@ -2516,6 +2591,17 @@ def test_string_categorical_index_repr(self):
categories=[u'あ', u'い', u'う', u'え', u'お', u'か', u'き', u'く', ...], ordered=False, dtype='category')"""
self.assertEqual(unicode(idx), expected)
+ def test_fillna_categorical(self):
+ # GH 11343
+ idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name='x')
+ # fill by value in categories
+ exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name='x')
+ self.assert_index_equal(idx.fillna(1.0), exp)
+
+ # fill by value not in categories raises ValueError
+ with tm.assertRaisesRegexp(ValueError, 'fill value must be in categories'):
+ idx.fillna(2.0)
+
class Numeric(Base):
@@ -2798,6 +2884,21 @@ def test_astype_from_object(self):
tm.assert_equal(result.dtype, expected.dtype)
tm.assert_index_equal(result, expected)
+ def test_fillna_float64(self):
+ # GH 11343
+ idx = Index([1.0, np.nan, 3.0], dtype=float, name='x')
+ # can't downcast
+ exp = Index([1.0, 0.1, 3.0], name='x')
+ self.assert_index_equal(idx.fillna(0.1), exp)
+
+ # downcast
+ exp = Int64Index([1, 2, 3], name='x')
+ self.assert_index_equal(idx.fillna(2), exp)
+
+ # object
+ exp = Index([1, 'obj', 3], name='x')
+ self.assert_index_equal(idx.fillna('obj'), exp)
+
class TestInt64Index(Numeric, tm.TestCase):
_holder = Int64Index
@@ -3551,6 +3652,39 @@ def test_ufunc_coercions(self):
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, 'D')
+ def test_fillna_datetime64(self):
+ # GH 11343
+ for tz in ['US/Eastern', 'Asia/Tokyo']:
+ idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'])
+
+ exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'])
+ self.assert_index_equal(idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
+
+ # tz mismatch
+ exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), pd.Timestamp('2011-01-01 10:00', tz=tz),
+ pd.Timestamp('2011-01-01 11:00')], dtype=object)
+ self.assert_index_equal(idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
+
+ # object
+ exp = pd.Index([pd.Timestamp('2011-01-01 09:00'), 'x',
+ pd.Timestamp('2011-01-01 11:00')], dtype=object)
+ self.assert_index_equal(idx.fillna('x'), exp)
+
+
+ idx = pd.DatetimeIndex(['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], tz=tz)
+
+ exp = pd.DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], tz=tz)
+ self.assert_index_equal(idx.fillna(pd.Timestamp('2011-01-01 10:00', tz=tz)), exp)
+
+ exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), pd.Timestamp('2011-01-01 10:00'),
+ pd.Timestamp('2011-01-01 11:00', tz=tz)], dtype=object)
+ self.assert_index_equal(idx.fillna(pd.Timestamp('2011-01-01 10:00')), exp)
+
+ # object
+ exp = pd.Index([pd.Timestamp('2011-01-01 09:00', tz=tz), 'x',
+ pd.Timestamp('2011-01-01 11:00', tz=tz)], dtype=object)
+ self.assert_index_equal(idx.fillna('x'), exp)
+
class TestPeriodIndex(DatetimeLike, tm.TestCase):
_holder = PeriodIndex
@@ -3633,6 +3767,21 @@ def test_period_index_indexer(self):
self.assert_frame_equal(df.iloc[0:5], df.loc[idx[0:5]])
self.assert_frame_equal(df, df.loc[list(idx)])
+ def test_fillna_period(self):
+ # GH 11343
+ idx = pd.PeriodIndex(['2011-01-01 09:00', pd.NaT, '2011-01-01 11:00'], freq='H')
+
+ exp = pd.PeriodIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'], freq='H')
+ self.assert_index_equal(idx.fillna(pd.Period('2011-01-01 10:00', freq='H')), exp)
+
+ exp = pd.Index([pd.Period('2011-01-01 09:00', freq='H'), 'x',
+ pd.Period('2011-01-01 11:00', freq='H')], dtype=object)
+ self.assert_index_equal(idx.fillna('x'), exp)
+
+ with tm.assertRaisesRegexp(ValueError, 'Input has different freq=D from PeriodIndex\\(freq=H\\)'):
+ idx.fillna(pd.Period('2011-01-01', freq='D'))
+
+
class TestTimedeltaIndex(DatetimeLike, tm.TestCase):
_holder = TimedeltaIndex
_multiprocess_can_split_ = True
@@ -3751,6 +3900,19 @@ def test_ufunc_coercions(self):
tm.assert_index_equal(result, exp)
self.assertEqual(result.freq, None)
+ def test_fillna_timedelta(self):
+ # GH 11343
+ idx = pd.TimedeltaIndex(['1 day', pd.NaT, '3 day'])
+
+ exp = pd.TimedeltaIndex(['1 day', '2 day', '3 day'])
+ self.assert_index_equal(idx.fillna(pd.Timedelta('2 day')), exp)
+
+ exp = pd.TimedeltaIndex(['1 day', '3 hour', '3 day'])
+ idx.fillna(pd.Timedelta('3 hour'))
+
+ exp = pd.Index([pd.Timedelta('1 day'), 'x', pd.Timedelta('3 day')], dtype=object)
+ self.assert_index_equal(idx.fillna('x'), exp)
+
class TestMultiIndex(Base, tm.TestCase):
_holder = MultiIndex
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index 50137493e6b01..d5382e8057f4b 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -201,10 +201,15 @@ def get_duplicates(self):
values = Index.get_duplicates(self)
return self._simple_new(values)
+ @cache_readonly
+ def _isnan(self):
+ """ return if each value is nan"""
+ return (self.asi8 == tslib.iNaT)
+
@cache_readonly
def hasnans(self):
""" return if I have any nans; enables various perf speedups """
- return (self.asi8 == tslib.iNaT).any()
+ return self._isnan.any()
@property
def asobject(self):
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 868057c675594..fd26e9834bd5f 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -499,6 +499,12 @@ def _generate(cls, start, end, periods, name, offset,
def _box_func(self):
return lambda x: Timestamp(x, offset=self.offset, tz=self.tz)
+ def _convert_for_op(self, value):
+ """ Convert value to be insertable to ndarray """
+ if self._has_same_tz(value):
+ return _to_m8(value)
+ raise ValueError('Passed item and index have different timezone')
+
def _local_timestamps(self):
utc = _utc()
@@ -548,6 +554,21 @@ def tzinfo(self):
"""
return self.tz
+ @cache_readonly
+ def _timezone(self):
+ """ Comparable timezone both for pytz / dateutil"""
+ return tslib.get_timezone(self.tzinfo)
+
+ def _has_same_tz(self, other):
+ zzone = self._timezone
+
+ # vzone sholdn't be None if value is non-datetime like
+ if isinstance(other, np.datetime64):
+ # convert to Timestamp as np.datetime64 doesn't have tz attr
+ other = Timestamp(other)
+ vzone = tslib.get_timezone(getattr(other, 'tzinfo', '__no_tz__'))
+ return zzone == vzone
+
@classmethod
def _cached_range(cls, start=None, end=None, periods=None, offset=None,
name=None):
@@ -680,7 +701,7 @@ def _sub_datelike(self, other):
other = Timestamp(other)
# require tz compat
- if tslib.get_timezone(self.tz) != tslib.get_timezone(other.tzinfo):
+ if not self._has_same_tz(other):
raise TypeError("Timestamp subtraction must have the same timezones or no timezones")
i8 = self.asi8
@@ -1552,17 +1573,9 @@ def equals(self, other):
except:
return False
- if self.tz is not None:
- if other.tz is None:
- return False
- same_zone = tslib.get_timezone(
- self.tz) == tslib.get_timezone(other.tz)
- else:
- if other.tz is not None:
- return False
- same_zone = True
-
- return same_zone and np.array_equal(self.asi8, other.asi8)
+ if self._has_same_tz(other):
+ return np.array_equal(self.asi8, other.asi8)
+ return False
def insert(self, loc, item):
"""
@@ -1581,10 +1594,10 @@ def insert(self, loc, item):
"""
freq = None
+
if isinstance(item, (datetime, np.datetime64)):
- zone = tslib.get_timezone(self.tz)
- izone = tslib.get_timezone(getattr(item, 'tzinfo', None))
- if zone != izone:
+ self._assert_can_do_op(item)
+ if not self._has_same_tz(item):
raise ValueError('Passed item and index have different timezone')
# check freq can be preserved on edge cases
if self.size and self.freq is not None:
diff --git a/pandas/tseries/period.py b/pandas/tseries/period.py
index 888c50e86b7b2..578727f515fe4 100644
--- a/pandas/tseries/period.py
+++ b/pandas/tseries/period.py
@@ -57,7 +57,7 @@ def dt64arr_to_periodarr(data, freq, tz):
# --- Period index sketch
-_DIFFERENT_FREQ_ERROR = "Input has different freq={1} from PeriodIndex(freq={0})"
+_DIFFERENT_FREQ_INDEX = period._DIFFERENT_FREQ_INDEX
def _period_index_cmp(opname, nat_result=False):
"""
@@ -68,13 +68,13 @@ def wrapper(self, other):
func = getattr(self.values, opname)
other_base, _ = _gfc(other.freq)
if other.freq != self.freq:
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr)
+ msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
result = func(other.ordinal)
elif isinstance(other, PeriodIndex):
if other.freq != self.freq:
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr)
+ msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
result = getattr(self.values, opname)(other.values)
@@ -336,6 +336,10 @@ def __array_wrap__(self, result, context=None):
def _box_func(self):
return lambda x: Period._from_ordinal(ordinal=x, freq=self.freq)
+ def _convert_for_op(self):
+ """ Convert value to be insertable to ndarray """
+ return self._box_func(value)
+
def _to_embed(self, keep_tz=False):
""" return an array repr of this object, potentially casting to object """
return self.asobject.values
@@ -378,7 +382,7 @@ def astype(self, dtype):
def searchsorted(self, key, side='left'):
if isinstance(key, Period):
if key.freq != self.freq:
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, key.freqstr)
+ msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, key.freqstr)
raise ValueError(msg)
key = key.ordinal
elif isinstance(key, compat.string_types):
@@ -764,7 +768,7 @@ def _assert_can_do_setop(self, other):
raise ValueError('can only call with other PeriodIndex-ed objects')
if self.freq != other.freq:
- msg = _DIFFERENT_FREQ_ERROR.format(self.freqstr, other.freqstr)
+ msg = _DIFFERENT_FREQ_INDEX.format(self.freqstr, other.freqstr)
raise ValueError(msg)
def _wrap_union_result(self, other, result):
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index afb15badf433c..0d47c2526df14 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -642,7 +642,7 @@ class NaTType(_NaT):
def __reduce__(self):
return (__nat_unpickle, (None, ))
-
+
def total_seconds(self):
# GH 10939
return np.nan
@@ -1749,7 +1749,8 @@ def dateutil_parse(object timestr, object default, ignoretz=False,
res, _ = res
if res is None:
- raise ValueError("unknown string format")
+ msg = "Unknown datetime string format, unable to parse: {0}"
+ raise ValueError(msg.format(timestr))
for attr in ["year", "month", "day", "hour",
"minute", "second", "microsecond"]:
@@ -1759,7 +1760,8 @@ def dateutil_parse(object timestr, object default, ignoretz=False,
reso = attr
if reso is None:
- raise ValueError("Cannot parse date.")
+ msg = "Unable to parse datetime string: {0}"
+ raise ValueError(msg.format(timestr))
if reso == 'microsecond':
if repl['microsecond'] == 0:
| Closes #10089.
- `value` can only accept scalar.
- `MultiIndex.fillna` raises `NotImplementedError` because `isnull` is not defined for MI.
- Moved `hasnans` and related properties to base `Index`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11343 | 2015-10-16T14:22:08Z | 2015-11-13T16:05:41Z | 2015-11-13T16:05:41Z | 2015-11-13T21:17:44Z |
TST: tests for list skiprows in read_excel | diff --git a/pandas/io/tests/data/testskiprows.xls b/pandas/io/tests/data/testskiprows.xls
new file mode 100644
index 0000000000000..21ccd30ec62da
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xls differ
diff --git a/pandas/io/tests/data/testskiprows.xlsm b/pandas/io/tests/data/testskiprows.xlsm
new file mode 100644
index 0000000000000..f5889ded4637a
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xlsm differ
diff --git a/pandas/io/tests/data/testskiprows.xlsx b/pandas/io/tests/data/testskiprows.xlsx
new file mode 100644
index 0000000000000..2d7ce943a7214
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xlsx differ
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 40cbd97ea539f..e7ed83b5708f9 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -660,6 +660,21 @@ def test_read_excel_chunksize(self):
pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
chunksize=100)
+ def test_read_excel_skiprows_list(self):
+ #GH 4903
+ actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=[0,2])
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns = ['a','b','c','d'])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=np.array([0,2]))
+ tm.assert_frame_equal(actual, expected)
+
class XlsReaderTests(XlrdTests, tm.TestCase):
ext = '.xls'
engine_name = 'xlrd'
| Closes #4903
This seemed to be fixed already, so just adding tests
| https://api.github.com/repos/pandas-dev/pandas/pulls/11340 | 2015-10-16T02:49:53Z | 2015-10-16T07:53:21Z | 2015-10-16T07:53:21Z | 2016-07-21T16:02:53Z |
Use Service Account JSON Key File | diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index e7241036b94c4..77bcbe8af2b70 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -4,6 +4,7 @@
import logging
from time import sleep
import uuid
+import pandas.json
import numpy as np
@@ -109,9 +110,10 @@ class TableCreationError(PandasError, ValueError):
class GbqConnector(object):
- def __init__(self, project_id, reauth=False):
+ def __init__(self, project_id, key_file, reauth=False):
self.test_google_api_imports()
self.project_id = project_id
+ self.key_file = key_file
self.reauth = reauth
self.credentials = self.get_credentials()
self.service = self.get_service(self.credentials)
@@ -129,22 +131,15 @@ def test_google_api_imports(self):
raise ImportError("Missing module required for Google BigQuery support: {0}".format(str(e)))
def get_credentials(self):
- from oauth2client.client import OAuth2WebServerFlow
- from oauth2client.file import Storage
- from oauth2client.tools import run_flow, argparser
-
- _check_google_client_version()
-
- flow = OAuth2WebServerFlow(client_id='495642085510-k0tmvj2m941jhre2nbqka17vqpjfddtd.apps.googleusercontent.com',
- client_secret='kOc9wMptUtxkcIFbtZCcrEAc',
- scope='https://www.googleapis.com/auth/bigquery',
- redirect_uri='urn:ietf:wg:oauth:2.0:oob')
-
- storage = Storage('bigquery_credentials.dat')
- credentials = storage.get()
+ from oauth2client.client import SignedJwtAssertionCredentials
- if credentials is None or credentials.invalid or self.reauth:
- credentials = run_flow(flow, storage, argparser.parse_args([]))
+ scope = 'https://www.googleapis.com/auth/bigquery'
+ with open(self.key_file) as key_file:
+ key = pandas.json.load(key_file)
+ credentials = SignedJwtAssertionCredentials(
+ key['client_email'],
+ key['private_key'],
+ scope)
return credentials
@@ -185,8 +180,7 @@ def process_insert_errors(insert_errors, verbose):
for error in errors:
reason = error['reason']
message = error['message']
- location = error['location']
- error_message = 'Error at Row: {0}, Reason: {1}, Location: {2}, Message: {3}'.format(row, reason, location, message)
+ error_message = 'Error at Row: {0}, Reason: {1}, Message: {2}'.format(row, reason, message)
# Report all error messages if verbose is set
if verbose:
@@ -386,7 +380,7 @@ def _parse_entry(field_value, field_type):
return field_value
-def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=False, verbose=True):
+def read_gbq(query, key_file, project_id=None, index_col=None, col_order=None, reauth=False, verbose=True):
"""Load data from Google BigQuery.
THIS IS AN EXPERIMENTAL LIBRARY
@@ -424,7 +418,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals
if not project_id:
raise TypeError("Missing required parameter: project_id")
- connector = GbqConnector(project_id, reauth=reauth)
+ connector = GbqConnector(project_id, key_file, reauth=reauth)
schema, pages = connector.run_query(query, verbose=verbose)
dataframe_list = []
while len(pages) > 0:
@@ -462,7 +456,7 @@ def read_gbq(query, project_id=None, index_col=None, col_order=None, reauth=Fals
return final_df
-def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
+def to_gbq(dataframe, destination_table, project_id, key_file, chunksize=10000,
verbose=True, reauth=False, if_exists='fail'):
"""Write a DataFrame to a Google BigQuery table.
@@ -495,7 +489,7 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
if '.' not in destination_table:
raise NotFoundException("Invalid Table Name. Should be of the form 'datasetId.tableId' ")
- connector = GbqConnector(project_id, reauth=reauth)
+ connector = GbqConnector(project_id, key_file, reauth=reauth)
dataset_id, table_id = destination_table.rsplit('.', 1)
table = _Table(project_id, dataset_id, reauth=reauth)
| It is recommended by Google that we use Service Account credentials make
API calls from server. See [this issue comment](https://github.com/pydata/pandas/issues/8489#issuecomment-148466759) for details.
Docs and tests are not yet updated. DO NOT MERGE.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11335 | 2015-10-15T18:45:05Z | 2015-11-18T20:15:48Z | null | 2015-11-18T20:15:48Z |
CLN: GH11271 move _get_handle, UTF encoders to io.common | diff --git a/pandas/core/common.py b/pandas/core/common.py
index 724843d379f64..c6e774b5077db 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -5,8 +5,6 @@
import re
import collections
import numbers
-import codecs
-import csv
import types
from datetime import datetime, timedelta
from functools import partial
@@ -19,7 +17,7 @@
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
-from pandas.compat import StringIO, BytesIO, range, long, u, zip, map, string_types, iteritems
+from pandas.compat import BytesIO, range, long, u, zip, map, string_types, iteritems
from pandas.core.dtypes import CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType
from pandas.core.config import get_option
@@ -2808,154 +2806,6 @@ def _all_none(*args):
return True
-class UTF8Recoder:
-
- """
- Iterator that reads an encoded stream and reencodes the input to UTF-8
- """
-
- def __init__(self, f, encoding):
- self.reader = codecs.getreader(encoding)(f)
-
- def __iter__(self):
- return self
-
- def read(self, bytes=-1):
- return self.reader.read(bytes).encode('utf-8')
-
- def readline(self):
- return self.reader.readline().encode('utf-8')
-
- def next(self):
- return next(self.reader).encode("utf-8")
-
- # Python 3 iterator
- __next__ = next
-
-
-def _get_handle(path, mode, encoding=None, compression=None):
- """Gets file handle for given path and mode.
- NOTE: Under Python 3.2, getting a compressed file handle means reading in
- the entire file, decompressing it and decoding it to ``str`` all at once
- and then wrapping it in a StringIO.
- """
- if compression is not None:
- if encoding is not None and not compat.PY3:
- msg = 'encoding + compression not yet supported in Python 2'
- raise ValueError(msg)
-
- if compression == 'gzip':
- import gzip
- f = gzip.GzipFile(path, mode)
- elif compression == 'bz2':
- import bz2
- f = bz2.BZ2File(path, mode)
- else:
- raise ValueError('Unrecognized compression type: %s' %
- compression)
- if compat.PY3:
- from io import TextIOWrapper
- f = TextIOWrapper(f, encoding=encoding)
- return f
- else:
- if compat.PY3:
- if encoding:
- f = open(path, mode, encoding=encoding)
- else:
- f = open(path, mode, errors='replace')
- else:
- f = open(path, mode)
-
- return f
-
-
-if compat.PY3: # pragma: no cover
- def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
- # ignore encoding
- return csv.reader(f, dialect=dialect, **kwds)
-
- def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
- return csv.writer(f, dialect=dialect, **kwds)
-else:
- class UnicodeReader:
-
- """
- A CSV reader which will iterate over lines in the CSV file "f",
- which is encoded in the given encoding.
-
- On Python 3, this is replaced (below) by csv.reader, which handles
- unicode.
- """
-
- def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
- f = UTF8Recoder(f, encoding)
- self.reader = csv.reader(f, dialect=dialect, **kwds)
-
- def next(self):
- row = next(self.reader)
- return [compat.text_type(s, "utf-8") for s in row]
-
- # python 3 iterator
- __next__ = next
-
- def __iter__(self): # pragma: no cover
- return self
-
- class UnicodeWriter:
-
- """
- A CSV writer which will write rows to CSV file "f",
- which is encoded in the given encoding.
- """
-
- def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
- # Redirect output to a queue
- self.queue = StringIO()
- self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
- self.stream = f
- self.encoder = codecs.getincrementalencoder(encoding)()
- self.quoting = kwds.get("quoting", None)
-
- def writerow(self, row):
- def _check_as_is(x):
- return (self.quoting == csv.QUOTE_NONNUMERIC and
- is_number(x)) or isinstance(x, str)
-
- row = [x if _check_as_is(x)
- else pprint_thing(x).encode('utf-8') for x in row]
-
- self.writer.writerow([s for s in row])
- # Fetch UTF-8 output from the queue ...
- data = self.queue.getvalue()
- data = data.decode("utf-8")
- # ... and reencode it into the target encoding
- data = self.encoder.encode(data)
- # write to the target stream
- self.stream.write(data)
- # empty queue
- self.queue.truncate(0)
-
- def writerows(self, rows):
- def _check_as_is(x):
- return (self.quoting == csv.QUOTE_NONNUMERIC and
- is_number(x)) or isinstance(x, str)
-
- for i, row in enumerate(rows):
- rows[i] = [x if _check_as_is(x)
- else pprint_thing(x).encode('utf-8') for x in row]
-
- self.writer.writerows([[s for s in row] for row in rows])
- # Fetch UTF-8 output from the queue ...
- data = self.queue.getvalue()
- data = data.decode("utf-8")
- # ... and reencode it into the target encoding
- data = self.encoder.encode(data)
- # write to the target stream
- self.stream.write(data)
- # empty queue
- self.queue.truncate(0)
-
-
def get_dtype_kinds(l):
"""
Parameters
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 322d97ab6b58f..e4aa1eac248d5 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -13,6 +13,7 @@
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
+from pandas.io.common import _get_handle, UnicodeWriter
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
@@ -1475,7 +1476,7 @@ def save(self):
f = self.path_or_buf
close = False
else:
- f = com._get_handle(self.path_or_buf, self.mode,
+ f = _get_handle(self.path_or_buf, self.mode,
encoding=self.encoding,
compression=self.compression)
close = True
@@ -1488,7 +1489,7 @@ def save(self):
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
- self.writer = com.UnicodeWriter(f, **writer_kwargs)
+ self.writer = UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index b9cdd44e52555..ad0145492f9b6 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,11 +2,14 @@
import sys
import os
+import csv
+import codecs
import zipfile
from contextlib import contextmanager, closing
from pandas.compat import StringIO, string_types, BytesIO
from pandas import compat
+from pandas.core.common import pprint_thing, is_number
if compat.PY3:
@@ -284,3 +287,148 @@ def ZipFile(*args, **kwargs):
yield zf
else:
ZipFile = zipfile.ZipFile
+
+
+def _get_handle(path, mode, encoding=None, compression=None):
+ """Gets file handle for given path and mode.
+ """
+ if compression is not None:
+ if encoding is not None and not compat.PY3:
+ msg = 'encoding + compression not yet supported in Python 2'
+ raise ValueError(msg)
+
+ if compression == 'gzip':
+ import gzip
+ f = gzip.GzipFile(path, mode)
+ elif compression == 'bz2':
+ import bz2
+ f = bz2.BZ2File(path, mode)
+ else:
+ raise ValueError('Unrecognized compression type: %s' %
+ compression)
+ if compat.PY3:
+ from io import TextIOWrapper
+ f = TextIOWrapper(f, encoding=encoding)
+ return f
+ else:
+ if compat.PY3:
+ if encoding:
+ f = open(path, mode, encoding=encoding)
+ else:
+ f = open(path, mode, errors='replace')
+ else:
+ f = open(path, mode)
+
+ return f
+
+
+class UTF8Recoder:
+
+ """
+ Iterator that reads an encoded stream and reencodes the input to UTF-8
+ """
+
+ def __init__(self, f, encoding):
+ self.reader = codecs.getreader(encoding)(f)
+
+ def __iter__(self):
+ return self
+
+ def read(self, bytes=-1):
+ return self.reader.read(bytes).encode("utf-8")
+
+ def readline(self):
+ return self.reader.readline().encode("utf-8")
+
+ def next(self):
+ return next(self.reader).encode("utf-8")
+
+ # Python 3 iterator
+ __next__ = next
+
+
+if compat.PY3: # pragma: no cover
+ def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
+ # ignore encoding
+ return csv.reader(f, dialect=dialect, **kwds)
+
+ def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
+ return csv.writer(f, dialect=dialect, **kwds)
+else:
+ class UnicodeReader:
+
+ """
+ A CSV reader which will iterate over lines in the CSV file "f",
+ which is encoded in the given encoding.
+
+ On Python 3, this is replaced (below) by csv.reader, which handles
+ unicode.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
+ f = UTF8Recoder(f, encoding)
+ self.reader = csv.reader(f, dialect=dialect, **kwds)
+
+ def next(self):
+ row = next(self.reader)
+ return [compat.text_type(s, "utf-8") for s in row]
+
+ # python 3 iterator
+ __next__ = next
+
+ def __iter__(self): # pragma: no cover
+ return self
+
+ class UnicodeWriter:
+
+ """
+ A CSV writer which will write rows to CSV file "f",
+ which is encoded in the given encoding.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
+ # Redirect output to a queue
+ self.queue = StringIO()
+ self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
+ self.stream = f
+ self.encoder = codecs.getincrementalencoder(encoding)()
+ self.quoting = kwds.get("quoting", None)
+
+ def writerow(self, row):
+ def _check_as_is(x):
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
+ is_number(x)) or isinstance(x, str)
+
+ row = [x if _check_as_is(x)
+ else pprint_thing(x).encode("utf-8") for x in row]
+
+ self.writer.writerow([s for s in row])
+ # Fetch UTF-8 output from the queue ...
+ data = self.queue.getvalue()
+ data = data.decode("utf-8")
+ # ... and reencode it into the target encoding
+ data = self.encoder.encode(data)
+ # write to the target stream
+ self.stream.write(data)
+ # empty queue
+ self.queue.truncate(0)
+
+ def writerows(self, rows):
+ def _check_as_is(x):
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
+ is_number(x)) or isinstance(x, str)
+
+ for i, row in enumerate(rows):
+ rows[i] = [x if _check_as_is(x)
+ else pprint_thing(x).encode("utf-8") for x in row]
+
+ self.writer.writerows([[s for s in row] for row in rows])
+ # Fetch UTF-8 output from the queue ...
+ data = self.queue.getvalue()
+ data = data.decode("utf-8")
+ # ... and reencode it into the target encoding
+ data = self.encoder.encode(data)
+ # write to the target stream
+ self.stream.write(data)
+ # empty queue
+ self.queue.truncate(0)
\ No newline at end of file
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8ac1aed9d9af7..fb58c45170c52 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -17,7 +17,8 @@
from pandas.core.common import AbstractMethodError
from pandas.core.config import get_option
from pandas.io.date_converters import generic_parser
-from pandas.io.common import get_filepath_or_buffer, _validate_header_arg
+from pandas.io.common import (get_filepath_or_buffer, _validate_header_arg,
+ _get_handle, UnicodeReader, UTF8Recoder)
from pandas.tseries import tools
from pandas.util.decorators import Appender
@@ -1084,7 +1085,7 @@ def __init__(self, src, **kwds):
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
- src = com.UTF8Recoder(src, kwds['encoding'])
+ src = UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
@@ -1420,7 +1421,7 @@ def __init__(self, f, **kwds):
self._comment_lines = []
if isinstance(f, compat.string_types):
- f = com._get_handle(f, 'r', encoding=self.encoding,
+ f = _get_handle(f, 'r', encoding=self.encoding,
compression=self.compression)
elif self.compression:
f = _wrap_compressed(f, self.compression, self.encoding)
@@ -1540,17 +1541,17 @@ class MyDialect(csv.Dialect):
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
- com.UnicodeReader(StringIO(line),
- dialect=dia,
- encoding=self.encoding)))
+ UnicodeReader(StringIO(line),
+ dialect=dia,
+ encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
- reader = com.UnicodeReader(f, dialect=dia,
- encoding=self.encoding,
- strict=True)
+ reader = UnicodeReader(f, dialect=dia,
+ encoding=self.encoding,
+ strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
| closes #11271
| https://api.github.com/repos/pandas-dev/pandas/pulls/11330 | 2015-10-15T00:01:39Z | 2015-10-15T22:21:38Z | 2015-10-15T22:21:38Z | 2015-10-15T22:21:45Z |
BUG: Bug in DataFrame.replace with a datetime64[ns, tz] and a non-compat to_replace #11326 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index e9614ec4f2290..512aacad8a837 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -72,9 +72,15 @@ Bug Fixes
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
+
- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
- Bug in comparisons of Series vs list-likes (:issue:`11339`)
+
+- Bug in ``DataFrame.replace`` with a ``datetime64[ns, tz]`` and a non-compat to_replace (:issue:`11326`, :issue:`11153`)
+
+
+
- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 98f9677fb6784..248203c259aaa 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2999,8 +2999,6 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
- new_data = new_data.convert(copy=not inplace, numeric=False)
-
if inplace:
self._update_inplace(new_data)
else:
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index c8c834180c9f6..ed4d6a6ccd73e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -137,6 +137,11 @@ def get_values(self, dtype=None):
def to_dense(self):
return self.values.view()
+ def to_object_block(self, mgr):
+ """ return myself as an object block """
+ values = self.get_values(dtype=object)
+ return self.make_block(values,klass=ObjectBlock)
+
@property
def fill_value(self):
return np.nan
@@ -215,7 +220,7 @@ def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
- def reshape_nd(self, labels, shape, ref_items):
+ def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
@@ -312,7 +317,7 @@ def delete(self, loc):
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
- def apply(self, func, **kwargs):
+ def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
@@ -320,13 +325,17 @@ def apply(self, func, **kwargs):
return result
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
+ """ fillna on the block with the value. If we fail, then convert to ObjectBlock
+ and try again """
+
if not self._can_hold_na:
if inplace:
- return [self]
+ return self
else:
- return [self.copy()]
+ return self.copy()
+ original_value = value
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
@@ -334,9 +343,24 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
- value = self._try_fill(value)
- blocks = self.putmask(mask, value, inplace=inplace)
- return self._maybe_downcast(blocks, downcast)
+ # fillna, but if we cannot coerce, then try again as an ObjectBlock
+ try:
+ values, _, value, _ = self._try_coerce_args(self.values, value)
+ blocks = self.putmask(mask, value, inplace=inplace)
+ blocks = [ b.make_block(values=self._try_coerce_result(b.values)) for b in blocks ]
+ return self._maybe_downcast(blocks, downcast)
+ except (TypeError, ValueError):
+
+ # we can't process the value, but nothing to do
+ if not mask.any():
+ return self if inplace else self.copy()
+
+ # we cannot coerce the underlying object, so
+ # make an ObjectBlock
+ return self.to_object_block(mgr=mgr).fillna(original_value,
+ limit=limit,
+ inplace=inplace,
+ downcast=False)
def _maybe_downcast(self, blocks, downcast=None):
@@ -347,18 +371,14 @@ def _maybe_downcast(self, blocks, downcast=None):
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
- result_blocks = []
- for b in blocks:
- result_blocks.extend(b.downcast(downcast))
+ return _extend_blocks([ b.downcast(downcast) for b in blocks ])
- return result_blocks
-
- def downcast(self, dtypes=None):
+ def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
- return [self]
+ return self
values = self.values
@@ -370,12 +390,12 @@ def downcast(self, dtypes=None):
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
- return [self.make_block(nv,
- fastpath=True)]
+ return self.make_block(nv,
+ fastpath=True)
# ndim > 1
if dtypes is None:
- return [self]
+ return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
@@ -409,7 +429,7 @@ def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
- klass=None, **kwargs):
+ klass=None, mgr=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
@@ -474,7 +494,7 @@ def convert(self, copy=True, **kwargs):
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
- return [self.copy()] if copy else [self]
+ return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
@@ -520,7 +540,7 @@ def _try_operate(self, values):
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
- return values, other
+ return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -551,7 +571,7 @@ def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs):
return values
# block actions ####
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
values = self.values
if deep:
values = values.copy()
@@ -560,23 +580,45 @@ def copy(self, deep=True):
fastpath=True)
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
- mask = com.mask_missing(self.values, to_replace)
- if filter is not None:
- filtered_out = ~self.mgr_locs.isin(filter)
- mask[filtered_out.nonzero()[0]] = False
- if not mask.any():
- if inplace:
- return [self]
- return [self.copy()]
- return self.putmask(mask, value, inplace=inplace)
+ original_to_replace = to_replace
+
+ # try to replace, if we raise an error, convert to ObjectBlock and retry
+ try:
+ values, _, to_replace, _ = self._try_coerce_args(self.values, to_replace)
+ mask = com.mask_missing(values, to_replace)
+ if filter is not None:
+ filtered_out = ~self.mgr_locs.isin(filter)
+ mask[filtered_out.nonzero()[0]] = False
+
+ blocks = self.putmask(mask, value, inplace=inplace)
+ if convert:
+ blocks = [ b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks ]
+ return blocks
+ except (TypeError, ValueError):
- def setitem(self, indexer, value):
+ # we can't process the value, but nothing to do
+ if not mask.any():
+ return self if inplace else self.copy()
+
+ return self.to_object_block(mgr=mgr).replace(to_replace=original_to_replace,
+ value=value,
+ inplace=inplace,
+ filter=filter,
+ regex=regex,
+ convert=convert)
+
+
+ def _replace_single(self, *args, **kwargs):
+ """ no-op on a non-ObjectBlock """
+ return self if kwargs['inplace'] else self.copy()
+
+ def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
@@ -590,7 +632,7 @@ def setitem(self, indexer, value):
value = np.nan
# coerce args
- values, value = self._try_coerce_args(self.values, value)
+ values, _, value, _ = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
@@ -682,7 +724,7 @@ def _is_empty_indexer(indexer):
return [self]
def putmask(self, mask, new, align=True, inplace=False,
- axis=0, transpose=False):
+ axis=0, transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
@@ -797,7 +839,7 @@ def putmask(self, mask, new, align=True, inplace=False,
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
limit_direction='forward',
- fill_value=None, coerce=False, downcast=None, **kwargs):
+ fill_value=None, coerce=False, downcast=None, mgr=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
@@ -824,7 +866,8 @@ def check_int_bool(self, inplace):
limit=limit,
fill_value=fill_value,
coerce=coerce,
- downcast=downcast)
+ downcast=downcast,
+ mgr=mgr)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
@@ -844,13 +887,14 @@ def check_int_bool(self, inplace):
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
+ mgr=mgr,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
- downcast=None):
+ downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
@@ -862,8 +906,8 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
else:
return [self.copy()]
- fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
+ values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
@@ -881,7 +925,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward',
- inplace=False, downcast=None, **kwargs):
+ inplace=False, downcast=None, mgr=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
@@ -957,13 +1001,13 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
else:
return self.make_block_same_class(new_values, new_mgr_locs)
- def diff(self, n, axis=1):
+ def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values,
fastpath=True)]
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
@@ -993,7 +1037,7 @@ def shift(self, periods, axis=0):
return [self.make_block(new_values,
fastpath=True)]
- def eval(self, func, other, raise_on_error=True, try_cast=False):
+ def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
@@ -1003,6 +1047,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False):
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
+ try_cast : try casting the results to the input type
Returns
-------
@@ -1032,11 +1077,26 @@ def eval(self, func, other, raise_on_error=True, try_cast=False):
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
- values, other = self._try_coerce_args(transf(values), other)
+ values, values_mask, other, other_mask = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
- return self._try_coerce_result(func(values, other))
+
+ # compute
+ result = func(values, other)
+
+ # mask if needed
+ if isinstance(values_mask, np.ndarray) and values_mask.any():
+ result = result.astype('float64',copy=False)
+ result[values_mask] = np.nan
+ if other_mask is True:
+ result = result.astype('float64',copy=False)
+ result[:] = np.nan
+ elif isinstance(other_mask, np.ndarray) and other_mask.any():
+ result = result.astype('float64',copy=False)
+ result[other_mask.ravel()] = np.nan
+
+ return self._try_coerce_result(result)
# error handler if we have an issue operating with the function
def handle_error():
@@ -1086,7 +1146,7 @@ def handle_error():
fastpath=True,)]
def where(self, other, cond, align=True, raise_on_error=True,
- try_cast=False, axis=0, transpose=False):
+ try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
@@ -1128,22 +1188,22 @@ def where(self, other, cond, align=True, raise_on_error=True,
other = _maybe_convert_string_to_object(other)
# our where function
- def func(c, v, o):
- if c.ravel().all():
- return v
+ def func(cond, values, other):
+ if cond.ravel().all():
+ return values
- v, o = self._try_coerce_args(v, o)
+ values, values_mask, other, other_mask = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(
- expressions.where(c, v, o, raise_on_error=True)
+ expressions.where(cond, values, other, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
- '[%s]' % (repr(o), str(detail)))
+ '[%s]' % (repr(other), str(detail)))
else:
# return the values
- result = np.empty(v.shape, dtype='float64')
+ result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
@@ -1253,6 +1313,34 @@ def get(self, item):
else:
return self.values
+ def putmask(self, mask, new, align=True, inplace=False,
+ axis=0, transpose=False, mgr=None):
+ """
+ putmask the data to the block; we must be a single block and not generate
+ other blocks
+
+ return the resulting block
+
+ Parameters
+ ----------
+ mask : the condition to respect
+ new : a ndarray/object
+ align : boolean, perform alignment on other/cond, default is True
+ inplace : perform inplace modification, default is False
+
+ Returns
+ -------
+ a new block(s), the result of the putmask
+ """
+ new_values = self.values if inplace else self.values.copy()
+ new_values, _, new, _ = self._try_coerce_args(new_values, new)
+
+ if isinstance(new, np.ndarray) and len(new) == len(mask):
+ new = new[mask]
+ new_values[mask] = new
+ new_values = self._try_coerce_result(new_values)
+ return [self.make_block(values=new_values)]
+
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
@@ -1386,45 +1474,56 @@ class TimeDeltaBlock(IntBlock):
def fill_value(self):
return tslib.iNaT
- def _try_fill(self, value):
- """ if we are a NaT, return the actual fill value """
- if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
- value = tslib.iNaT
- elif isinstance(value, Timedelta):
- value = value.value
- elif isinstance(value, np.timedelta64):
- pass
- elif com.is_integer(value):
- # coerce to seconds of timedelta
- value = np.timedelta64(int(value * 1e9))
- elif isinstance(value, timedelta):
- value = np.timedelta64(value)
+ def fillna(self, value, **kwargs):
- return value
+ # allow filling with integers to be
+ # interpreted as seconds
+ if not isinstance(value, np.timedelta64) and com.is_integer(value):
+ value = Timedelta(value,unit='s')
+ return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
- """ Coerce values and other to float64, with null values converted to
- NaN. values is always ndarray-like, other may not be """
- def masker(v):
- mask = isnull(v)
- v = v.astype('float64')
- v[mask] = np.nan
- return v
-
- values = masker(values)
-
- if is_null_datelike_scalar(other):
- other = np.nan
- elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
- other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
- if other == tslib.iNaT:
- other = np.nan
- elif lib.isscalar(other):
- other = np.float64(other)
+ """
+ Coerce values and other to int64, with null values converted to
+ iNaT. values is always ndarray-like, other may not be
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
+
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+
+ values_mask = isnull(values)
+ values = values.view('i8')
+ other_mask = False
+
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
+ other = tslib.iNaT
+ other_mask = True
+ elif isinstance(other, Timedelta):
+ other_mask = isnull(other)
+ other = other.value
+ elif isinstance(other, np.timedelta64):
+ other_mask = isnull(other)
+ other = other.view('i8')
+ elif isinstance(other, timedelta):
+ other = Timedelta(other).value
+ elif isinstance(other, np.ndarray):
+ other_mask = isnull(other)
+ other = other.astype('i8',copy=False).view('i8')
else:
- other = masker(other)
+ # scalar
+ other = Timedelta(other)
+ other_mask = isnull(other)
+ other = other.value
- return values, other
+ return values, values_mask, other, other_mask
def _try_operate(self, values):
""" return a version to operate on """
@@ -1496,13 +1595,13 @@ def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
- regex=regex)
+ regex=regex, mgr=mgr)
class ObjectBlock(Block):
@@ -1609,10 +1708,7 @@ def _maybe_downcast(self, blocks, downcast=None):
return blocks
# split and convert the blocks
- result_blocks = []
- for blk in blocks:
- result_blocks.extend(blk.convert(datetime=True, numeric=False))
- return result_blocks
+ return _extend_blocks([ b.convert(datetime=True, numeric=False) for b in blocks ])
def _can_hold_element(self, element):
return True
@@ -1626,38 +1722,53 @@ def should_store(self, value):
np.datetime64, np.bool_)) or is_internal_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
- blk = [self]
+ regex=False, convert=True, mgr=None):
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
+ result_blocks = []
+ blocks = [self]
+
if not either_list and com.is_re(to_replace):
- blk[0], = blk[0]._replace_single(to_replace, value,
- inplace=inplace, filter=filter,
- regex=True)
+ return self._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ regex=True, convert=convert, mgr=mgr)
elif not (either_list or regex):
- blk = super(ObjectBlock, self).replace(to_replace, value,
- inplace=inplace,
- filter=filter, regex=regex)
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
- blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
- filter=filter, regex=regex)
+ result_blocks = []
+ for b in blocks:
+ result = b._replace_single(to_rep, v, inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
+ result_blocks = _extend_blocks(result, result_blocks)
+ blocks = result_blocks
+ return result_blocks
+
elif to_rep_is_list and regex:
for to_rep in to_replace:
- blk[0], = blk[0]._replace_single(to_rep, value,
- inplace=inplace,
- filter=filter, regex=regex)
- else:
- blk[0], = blk[0]._replace_single(to_replace, value,
- inplace=inplace, filter=filter,
- regex=regex)
- return blk
+ result_blocks = []
+ for b in blocks:
+ result = b._replace_single(to_rep, value,
+ inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
+ result_blocks = _extend_blocks(result, result_blocks)
+ blocks = result_blocks
+ return result_blocks
+
+ return self._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ convert=convert, regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, convert=True, mgr=None):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
@@ -1689,13 +1800,11 @@ def _replace_single(self, to_replace, value, inplace=False, filter=None,
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
- result = super(ObjectBlock, self).replace(to_replace, value,
- inplace=inplace,
- filter=filter,
- regex=regex)
- if not isinstance(result, list):
- result = [result]
- return result
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter,
+ regex=regex,
+ mgr=mgr)
new_values = self.values if inplace else self.values.copy()
@@ -1725,9 +1834,12 @@ def re_replacer(s):
new_values[filt] = f(new_values[filt])
- return [self if inplace else
- self.make_block(new_values,
- fastpath=True)]
+ # convert
+ block = self.make_block(new_values)
+ if convert:
+ block = block.convert(by_item=True,numeric=False)
+
+ return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
@@ -1753,7 +1865,7 @@ def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
- return [self.copy() if copy else self]
+ return self.copy() if copy else self
@property
def array_dtype(self):
@@ -1767,16 +1879,16 @@ def _slice(self, slicer):
# return same dims as we currently have
return self.values._slice(slicer)
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
- return [self.make_block_same_class(values=values.fillna(value=value,
- limit=limit),
- placement=self.mgr_locs)]
+ values = self._try_coerce_result(values.fillna(value=value,
+ limit=limit))
+ return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
@@ -1787,7 +1899,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
limit=limit),
placement=self.mgr_locs)
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
@@ -1815,30 +1927,8 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
return self.make_block_same_class(new_values, new_mgr_locs)
- def putmask(self, mask, new, align=True, inplace=False,
- axis=0, transpose=False):
- """ putmask the data to the block; it is possible that we may create a
- new dtype of block
-
- return the resulting block(s)
-
- Parameters
- ----------
- mask : the condition to respect
- new : a ndarray/object
- align : boolean, perform alignment on other/cond, default is True
- inplace : perform inplace modification, default is False
-
- Returns
- -------
- a new block(s), the result of the putmask
- """
- new_values = self.values if inplace else self.values.copy()
- new_values[mask] = new
- return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
-
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
- klass=None):
+ klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
@@ -1882,7 +1972,7 @@ def __init__(self, values, placement,
fastpath=True, placement=placement,
**kwargs)
- def _astype(self, dtype, **kwargs):
+ def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
@@ -1921,22 +2011,52 @@ def _try_operate(self, values):
return values.view('i8')
def _try_coerce_args(self, values, other):
- """ Coerce values and other to dtype 'i8'. NaN and NaT convert to
- the smallest i8, and will correctly round-trip to NaT if converted
- back in _try_coerce_result. values is always ndarray-like, other
- may not be """
+ """
+ Coerce values and other to dtype 'i8'. NaN and NaT convert to
+ the smallest i8, and will correctly round-trip to NaT if converted
+ back in _try_coerce_result. values is always ndarray-like, other
+ may not be
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
+
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+
+ values_mask = isnull(values)
values = values.view('i8')
+ other_mask = False
- if is_null_datelike_scalar(other):
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
other = tslib.iNaT
+ other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
- other = lib.Timestamp(other).asm8.view('i8')
+ other = lib.Timestamp(other)
+ if getattr(other,'tz') is not None:
+ raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
+ other_mask = isnull(other)
+ other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
- other = np.array(other, dtype='i8')
+ try:
+ other = np.asarray(other)
+ other_mask = isnull(other)
+
+ other = other.astype('i8',copy=False).view('i8')
+ except ValueError:
- return values, other
+ # coercion issues
+ # let higher levels handle
+ raise TypeError
+
+ return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -1951,52 +2071,6 @@ def _try_coerce_result(self, result):
def fill_value(self):
return tslib.iNaT
- def _try_fill(self, value):
- """ if we are a NaT, return the actual fill value """
- if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
- value = tslib.iNaT
- return value
-
- def fillna(self, value, limit=None,
- inplace=False, downcast=None):
-
- mask = isnull(self.values)
- value = self._try_fill(value)
-
- if limit is not None:
- if self.ndim > 2:
- raise NotImplementedError("number of dimensions for 'fillna' "
- "is currently limited to 2")
- mask[mask.cumsum(self.ndim-1)>limit]=False
-
- if mask.any():
- try:
- return self._fillna_mask(mask, value, inplace=inplace)
- except TypeError:
- pass
- # _fillna_mask raises TypeError when it fails
- # cannot perform inplace op because of object coercion
- values = self.get_values(dtype=object)
- np.putmask(values, mask, value)
- return [self.make_block(values, fastpath=True)]
- else:
- return [self if inplace else self.copy()]
-
- def _fillna_mask(self, mask, value, inplace=False):
- if getattr(value, 'tzinfo', None) is None:
- # Series comes to this path
- values = self.values
- if not inplace:
- values = values.copy()
- try:
- np.putmask(values, mask, value)
- return [self if inplace else
- self.make_block(values, fastpath=True)]
- except (ValueError, TypeError):
- # scalar causes ValueError, and array causes TypeError
- pass
- raise TypeError
-
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
@@ -2068,28 +2142,25 @@ def get_values(self, dtype=None):
.reshape(self.values.shape)
return self.values
- def _fillna_mask(self, mask, value, inplace=False):
- # cannot perform inplace op for internal DatetimeIndex
- my_tz = tslib.get_timezone(self.values.tz)
- value_tz = tslib.get_timezone(getattr(value, 'tzinfo', None))
-
- if (my_tz == value_tz or self.dtype == getattr(value, 'dtype', None)):
- if my_tz == value_tz:
- # hack for PY2.6 / numpy 1.7.1.
- # Other versions can directly use self.values.putmask
- # --------------------------------------
- try:
- value = value.asm8
- except AttributeError:
- value = tslib.Timestamp(value).asm8
- ### ------------------------------------
+ def to_object_block(self, mgr):
+ """
+ return myself as an object block
- try:
- values = self.values.putmask(mask, value)
- return [self.make_block(values, fastpath=True)]
- except ValueError:
- pass
- raise TypeError
+ Since we keep the DTI as a 1-d object, this is different
+ depends on BlockManager's ndim
+ """
+ values = self.get_values(dtype=object)
+ kwargs = {}
+ if mgr.ndim > 1:
+ values = _block_shape(values,ndim=mgr.ndim)
+ kwargs['ndim'] = mgr.ndim
+ kwargs['placement']=[0]
+ return self.make_block(values, klass=ObjectBlock, **kwargs)
+
+ def replace(self, *args, **kwargs):
+ # if we are forced to ObjectBlock, then don't coerce (to UTC)
+ kwargs['convert'] = False
+ return super(DatetimeTZBlock, self).replace(*args, **kwargs)
def _slice(self, slicer):
""" return a slice of my values """
@@ -2101,22 +2172,46 @@ def _slice(self, slicer):
return self.values[slicer]
def _try_coerce_args(self, values, other):
- """ localize and return i8 for the values """
- values = values.tz_localize(None).asi8
+ """
+ localize and return i8 for the values
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
- if is_null_datelike_scalar(other):
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+ values_mask = isnull(values)
+ values = values.tz_localize(None).asi8
+ other_mask = False
+
+ if isinstance(other, ABCSeries):
+ other = self._holder(other)
+ other_mask = isnull(other)
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
other = tslib.iNaT
+ other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
- else:
+ other_mask = isnull(other)
+ elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
- if not getattr(other, 'tz', None):
+ tz = getattr(other, 'tz', None)
+
+ # test we can have an equal time zone
+ if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
- other = other.value
+ other_mask = isnull(other)
+ other = other.tz_localize(None).value
- return values, other
+ return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -2128,7 +2223,7 @@ def _try_coerce_result(self, result):
result = lib.Timestamp(result, tz=self.values.tz)
return result
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
### think about moving this to the DatetimeIndex. This is a non-freq (number of periods) shift ###
@@ -2210,7 +2305,7 @@ def __len__(self):
except:
return 0
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
@@ -2259,7 +2354,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
@@ -2271,7 +2366,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
fill_value=value,
placement=self.mgr_locs)]
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
@@ -2715,12 +2810,9 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
+ kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
-
- if isinstance(applied, list):
- result_blocks.extend(applied)
- else:
- result_blocks.append(applied)
+ result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
@@ -2768,9 +2860,12 @@ def convert(self, **kwargs):
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
- def replace_list(self, src_list, dest_list, inplace=False, regex=False):
+ def replace_list(self, src_list, dest_list, inplace=False, regex=False, mgr=None):
""" do a list replace """
+ if mgr is None:
+ mgr = self
+
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
@@ -2792,11 +2887,8 @@ def comp(s):
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
- regex=regex)
- if isinstance(result, list):
- new_rb.extend(result)
- else:
- new_rb.append(result)
+ regex=regex, mgr=mgr)
+ new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
@@ -2930,7 +3022,7 @@ def __contains__(self, item):
def nblocks(self):
return len(self.blocks)
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
@@ -4084,15 +4176,12 @@ def _consolidate(blocks):
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
- if isinstance(merged_blocks, list):
- new_blocks.extend(merged_blocks)
- else:
- new_blocks.append(merged_blocks)
-
+ new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
+
if len(blocks) == 1:
return blocks[0]
@@ -4119,6 +4208,22 @@ def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
return blocks
+def _extend_blocks(result, blocks=None):
+ """ return a new extended blocks, givin the result """
+ if blocks is None:
+ blocks = []
+ if isinstance(result, list):
+ for r in result:
+ if isinstance(r, list):
+ blocks.extend(r)
+ else:
+ blocks.append(r)
+ elif isinstance(result, BlockManager):
+ blocks.extend(result.blocks)
+ else:
+ blocks.append(result)
+ return blocks
+
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4de641bb67926..4e25b546bddf2 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1839,7 +1839,9 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
- block = block.fillna(nan_rep, downcast=False)[0]
+ block = block.fillna(nan_rep, downcast=False)
+ if isinstance(block, list):
+ block = block[0]
data = block.values
# see if we have a valid string type
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 0972b2e17c48a..13c671e8e4e59 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7221,6 +7221,7 @@ def test_to_csv_chunking(self):
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
+ @slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
@@ -9458,18 +9459,20 @@ def test_regex_replace_dict_nested(self):
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
- assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)
+ result = df.replace({'Type': {'Q':0,'T':1}})
+ assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
+ expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
+ 'c': [nan, nan, nan, 'd']})
+
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
- expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
- 'c': [nan, nan, nan, 'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
@@ -9523,8 +9526,8 @@ def test_regex_replace_series_of_regexes(self):
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
- res = df.replace(0, 'a')
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
+ res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
@@ -9953,6 +9956,56 @@ def test_replace_datetime(self):
result = df.replace(d)
tm.assert_frame_equal(result, expected)
+ def test_replace_datetimetz(self):
+
+ # GH 11326
+ # behaving poorly when presented with a datetime64[ns, tz]
+ df = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : [0, np.nan, 2]})
+ result = df.replace(np.nan,1)
+ expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : Series([0, 1, 2],dtype='float64')})
+ assert_frame_equal(result, expected)
+
+ result = df.fillna(1)
+ assert_frame_equal(result, expected)
+
+ result = df.replace(0,np.nan)
+ expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : [np.nan, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.replace(Timestamp('20130102',tz='US/Eastern'),Timestamp('20130104',tz='US/Eastern'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104',tz='US/Eastern'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Eastern'))
+ assert_frame_equal(result, expected)
+
+ # coerce to object
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Pacific'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104',tz='US/Pacific'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : np.nan }, Timestamp('20130104'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
def test_combine_multiple_frames_dtypes(self):
# GH 2759
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 00553102e172f..fbab0d2a92203 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -306,7 +306,7 @@ def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
- none_coerced = block._try_coerce_args(block.values, None)[1]
+ none_coerced = block._try_coerce_args(block.values, None)[2]
self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
# coerce different types of date bojects
@@ -314,7 +314,7 @@ def test_try_coerce_arg(self):
datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
- coerced = block._try_coerce_args(block.values, val)[1]
+ coerced = block._try_coerce_args(block.values, val)[2]
self.assertEqual(np.int64, type(coerced))
self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index f8d2c8bfd0dfb..33f8ea080e21c 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4032,6 +4032,21 @@ def test_datetime64_tz_fillna(self):
Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
+ # filling with a naive/other zone, coerce to object
+ result = s.fillna(Timestamp('20130101'))
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2013-01-01'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2013-01-01')])
+ self.assert_series_equal(expected, result)
+
+ result = s.fillna(Timestamp('20130101',tz='US/Pacific'))
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2013-01-01',tz='US/Pacific'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2013-01-01',tz='US/Pacific')])
+ self.assert_series_equal(expected, result)
+
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
| closes #11326
xref #11153
| https://api.github.com/repos/pandas-dev/pandas/pulls/11329 | 2015-10-14T19:26:29Z | 2015-10-17T15:04:49Z | 2015-10-17T15:04:49Z | 2015-10-17T15:04:49Z |
Fix for BUG: multi-index excel header fails if all numeric | diff --git a/asv_bench/asv.conf.json b/asv_bench/asv.conf.json
index dcea59545aae3..6a739873a032f 100644
--- a/asv_bench/asv.conf.json
+++ b/asv_bench/asv.conf.json
@@ -43,6 +43,7 @@
"numexpr": [],
"pytables": [],
"openpyxl": [],
+ "xlsxwriter": [],
"xlrd": [],
"xlwt": []
},
diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 9bece56e15c90..a04a9d0814a30 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -930,6 +930,16 @@ def time_frame_xs_row(self):
self.df.xs(50000)
+class frame_sort_index(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.df = DataFrame(randn(1000000, 2), columns=list('AB'))
+
+ def time_frame_sort_index(self):
+ self.df.sort_index()
+
+
class series_string_vector_slice(object):
goal_time = 0.2
diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 4b82781fc39d9..eeca2d54381b2 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -320,3 +320,49 @@ def time_nogil_kth_smallest(self):
def run(arr):
algos.kth_smallest(arr, self.k)
run()
+
+class nogil_datetime_fields(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000000
+ self.dti = pd.date_range('1900-01-01', periods=self.N, freq='D')
+ self.period = self.dti.to_period('D')
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+
+ def time_datetime_field_year(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.year
+ run(self.dti)
+
+ def time_datetime_field_day(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.day
+ run(self.dti)
+
+ def time_datetime_field_daysinmonth(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.days_in_month
+ run(self.dti)
+
+ def time_datetime_field_normalize(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.normalize()
+ run(self.dti)
+
+ def time_datetime_to_period(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.to_period('S')
+ run(self.dti)
+
+ def time_period_to_datetime(self):
+ @test_parallel(num_threads=2)
+ def run(period):
+ period.to_timestamp()
+ run(self.period)
diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index 37969a6949157..a40ed3f1d6482 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -71,3 +71,23 @@ def setup(self):
def time_series_nsmallest2(self):
self.s2.nsmallest(3, take_last=True)
self.s2.nsmallest(3, take_last=False)
+
+
+class series_dropna_int64(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(np.random.randint(1, 10, 1000000))
+
+ def time_series_dropna_int64(self):
+ self.s.dropna()
+
+class series_dropna_datetime(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.s = Series(pd.date_range('2000-01-01', freq='S', periods=1000000))
+ self.s[np.random.randint(1, 1000000, 100)] = pd.NaT
+
+ def time_series_dropna_datetime(self):
+ self.s.dropna()
diff --git a/ci/install_conda.sh b/ci/install_conda.sh
index 8d99034a86109..6873a1656a8a4 100755
--- a/ci/install_conda.sh
+++ b/ci/install_conda.sh
@@ -73,7 +73,7 @@ bash miniconda.sh -b -p $HOME/miniconda || exit 1
conda config --set always_yes yes --set changeps1 no || exit 1
conda update -q conda || exit 1
conda config --add channels conda-forge || exit 1
-conda config --add channels http://conda.binstar.org/pandas || exit 1
+conda config --add channels http://conda.anaconda.org/pandas || exit 1
conda config --set ssl_verify false || exit 1
# Useful for debugging any issues with conda
diff --git a/ci/requirements-2.7.pip b/ci/requirements-2.7.pip
index 644457d69b37f..9bc533110cea3 100644
--- a/ci/requirements-2.7.pip
+++ b/ci/requirements-2.7.pip
@@ -2,3 +2,5 @@ blosc
httplib2
google-api-python-client == 1.2
python-gflags == 2.0
+pathlib
+py
diff --git a/ci/requirements-2.7_SLOW.pip b/ci/requirements-2.7_SLOW.pip
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/ci/requirements-3.4.build b/ci/requirements-3.4.build
index 6fdffd41bd4c4..8e2a952b840f7 100644
--- a/ci/requirements-3.4.build
+++ b/ci/requirements-3.4.build
@@ -2,3 +2,4 @@ python-dateutil
pytz
numpy=1.8.1
cython
+libgfortran
diff --git a/doc/source/conf.py b/doc/source/conf.py
index f2a033eb82d9c..23095b7f4d24b 100644
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -299,8 +299,9 @@
intersphinx_mapping = {
'statsmodels': ('http://statsmodels.sourceforge.net/devel/', None),
'matplotlib': ('http://matplotlib.org/', None),
- 'python': ('http://docs.python.org/', None),
- 'numpy': ('http://docs.scipy.org/doc/numpy', None)
+ 'python': ('http://docs.python.org/3', None),
+ 'numpy': ('http://docs.scipy.org/doc/numpy', None),
+ 'py': ('http://pylib.readthedocs.org/en/latest/', None)
}
import glob
autosummary_generate = glob.glob("*.rst")
diff --git a/doc/source/io.rst b/doc/source/io.rst
index 014daa3f68dbb..a7c0d31189a75 100644
--- a/doc/source/io.rst
+++ b/doc/source/io.rst
@@ -79,9 +79,10 @@ for some advanced strategies
They can take a number of arguments:
- - ``filepath_or_buffer``: Either a string path to a file, URL
+ - ``filepath_or_buffer``: Either a path to a file (a :class:`python:str`,
+ :class:`python:pathlib.Path`, or :class:`py:py._path.local.LocalPath`), URL
(including http, ftp, and S3 locations), or any object with a ``read``
- method (such as an open file or ``StringIO``).
+ method (such as an open file or :class:`~python:io.StringIO`).
- ``sep`` or ``delimiter``: A delimiter / separator to split fields
on. With ``sep=None``, ``read_csv`` will try to infer the delimiter
automatically in some cases by "sniffing".
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 1eff7d01d9d91..6171e409652f3 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -17,6 +17,7 @@ Highlights include:
Enhancements
~~~~~~~~~~~~
+- ``DatetimeIndex`` now supports conversion to strings with astype(str)(:issue:`10442`)
- Support for ``compression`` (gzip/bz2) in :method:`DataFrame.to_csv` (:issue:`7615`)
@@ -27,6 +28,10 @@ Enhancements
Other Enhancements
^^^^^^^^^^^^^^^^^^
+- ``pd.read_*`` functions can now also accept :class:`python:pathlib.Path`, or :class:`py:py._path.local.LocalPath`
+ objects for the ``filepath_or_buffer`` argument. (:issue:`11033`)
+- Improve the error message displayed in :func:`pandas.io.gbq.to_gbq` when the DataFrame does not match the schema of the destination table (:issue:`11359`)
+
.. _whatsnew_0171.api:
API changes
@@ -37,17 +42,31 @@ API changes
- Regression from 0.16.2 for output formatting of long floats/nan, restored in (:issue:`11302`)
- Prettyprinting sets (e.g. in DataFrame cells) now uses set literal syntax (``{x, y}``) instead of
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
+- Indexing with a null key will raise a ``TypeError``, instead of a ``ValueError`` (:issue:`11356`)
.. _whatsnew_0171.deprecations:
Deprecations
^^^^^^^^^^^^
+- The ``pandas.io.ga`` module which implements ``google-analytics`` support is deprecated and will be removed in a future version (:issue:`11308`)
+- Deprecate the ``engine`` keyword from ``.to_csv()``, which will be removed in a future version (:issue:`11274`)
+
+
.. _whatsnew_0171.performance:
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- Checking monotonic-ness before sorting on an index (:issue:`11080`)
+- ``Series.dropna`` performance improvement when its dtype can't contain ``NaN`` (:issue:`11159`)
+
+
+- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
+
+
+- Improved performance to ``to_excel`` (:issue:`11352`)
+
.. _whatsnew_0171.bug_fixes:
Bug Fixes
@@ -58,13 +77,19 @@ Bug Fixes
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issues:`11295`)
+
+- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
+- Bug in comparisons of Series vs list-likes (:issue:`11339`)
+- Bug in ``DataFrame.replace`` with a ``datetime64[ns, tz]`` and a non-compat to_replace (:issue:`11326`, :issue:`11153`)
+- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
+- Bug in ``pivot_table`` with ``margins=True`` when indexes are of ``Categorical`` dtype (:issue:`10993`)
+- Bug in ``DataFrame.plot`` cannot use hex strings colors (:issue:`10299`)
@@ -88,5 +113,12 @@ Bug Fixes
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
+
- Fixed a bug that prevented the construction of an empty series of dtype
``datetime64[ns, tz]`` (:issue:`11245`).
+
+- Bug in ``read_excel`` with multi-index containing integers (:issue:`11317`)
+
+- Bug in ``to_excel`` with openpyxl 2.2+ and merging (:issue:`11408`)
+
+- Bug in ``DataFrame.to_dict()`` produces a ``np.datetime64`` object instead of ``Timestamp`` when only datetime is present in data (:issue:`11327`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 724843d379f64..c2c50bce04309 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -5,8 +5,6 @@
import re
import collections
import numbers
-import codecs
-import csv
import types
from datetime import datetime, timedelta
from functools import partial
@@ -19,7 +17,7 @@
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
-from pandas.compat import StringIO, BytesIO, range, long, u, zip, map, string_types, iteritems
+from pandas.compat import BytesIO, range, long, u, zip, map, string_types, iteritems
from pandas.core.dtypes import CategoricalDtype, CategoricalDtypeType, DatetimeTZDtype, DatetimeTZDtypeType
from pandas.core.config import get_option
@@ -446,14 +444,24 @@ def mask_missing(arr, values_to_mask):
mask = None
for x in nonna:
if mask is None:
- mask = arr == x
+
+ # numpy elementwise comparison warning
+ if is_numeric_v_string_like(arr, x):
+ mask = False
+ else:
+ mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
- mask |= arr == x
+
+ # numpy elementwise comparison warning
+ if is_numeric_v_string_like(arr, x):
+ mask |= False
+ else:
+ mask |= arr == x
if na_mask.any():
if mask is None:
@@ -2384,6 +2392,9 @@ def _maybe_make_list(obj):
is_complex = lib.is_complex
+def is_string_like(obj):
+ return isinstance(obj, (compat.text_type, compat.string_types))
+
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
@@ -2527,6 +2538,27 @@ def is_datetime_or_timedelta_dtype(arr_or_dtype):
return issubclass(tipo, (np.datetime64, np.timedelta64))
+def is_numeric_v_string_like(a, b):
+ """
+ numpy doesn't like to compare numeric arrays vs scalar string-likes
+
+ return a boolean result if this is the case for a,b or b,a
+
+ """
+ is_a_array = isinstance(a, np.ndarray)
+ is_b_array = isinstance(b, np.ndarray)
+
+ is_a_numeric_array = is_a_array and is_numeric_dtype(a)
+ is_b_numeric_array = is_b_array and is_numeric_dtype(b)
+
+ is_a_scalar_string_like = not is_a_array and is_string_like(a)
+ is_b_scalar_string_like = not is_b_array and is_string_like(b)
+
+ return (
+ is_a_numeric_array and is_b_scalar_string_like) or (
+ is_b_numeric_array and is_a_scalar_string_like
+ )
+
def is_datetimelike_v_numeric(a, b):
# return if we have an i8 convertible and numeric comparision
if not hasattr(a,'dtype'):
@@ -2808,154 +2840,6 @@ def _all_none(*args):
return True
-class UTF8Recoder:
-
- """
- Iterator that reads an encoded stream and reencodes the input to UTF-8
- """
-
- def __init__(self, f, encoding):
- self.reader = codecs.getreader(encoding)(f)
-
- def __iter__(self):
- return self
-
- def read(self, bytes=-1):
- return self.reader.read(bytes).encode('utf-8')
-
- def readline(self):
- return self.reader.readline().encode('utf-8')
-
- def next(self):
- return next(self.reader).encode("utf-8")
-
- # Python 3 iterator
- __next__ = next
-
-
-def _get_handle(path, mode, encoding=None, compression=None):
- """Gets file handle for given path and mode.
- NOTE: Under Python 3.2, getting a compressed file handle means reading in
- the entire file, decompressing it and decoding it to ``str`` all at once
- and then wrapping it in a StringIO.
- """
- if compression is not None:
- if encoding is not None and not compat.PY3:
- msg = 'encoding + compression not yet supported in Python 2'
- raise ValueError(msg)
-
- if compression == 'gzip':
- import gzip
- f = gzip.GzipFile(path, mode)
- elif compression == 'bz2':
- import bz2
- f = bz2.BZ2File(path, mode)
- else:
- raise ValueError('Unrecognized compression type: %s' %
- compression)
- if compat.PY3:
- from io import TextIOWrapper
- f = TextIOWrapper(f, encoding=encoding)
- return f
- else:
- if compat.PY3:
- if encoding:
- f = open(path, mode, encoding=encoding)
- else:
- f = open(path, mode, errors='replace')
- else:
- f = open(path, mode)
-
- return f
-
-
-if compat.PY3: # pragma: no cover
- def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
- # ignore encoding
- return csv.reader(f, dialect=dialect, **kwds)
-
- def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
- return csv.writer(f, dialect=dialect, **kwds)
-else:
- class UnicodeReader:
-
- """
- A CSV reader which will iterate over lines in the CSV file "f",
- which is encoded in the given encoding.
-
- On Python 3, this is replaced (below) by csv.reader, which handles
- unicode.
- """
-
- def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
- f = UTF8Recoder(f, encoding)
- self.reader = csv.reader(f, dialect=dialect, **kwds)
-
- def next(self):
- row = next(self.reader)
- return [compat.text_type(s, "utf-8") for s in row]
-
- # python 3 iterator
- __next__ = next
-
- def __iter__(self): # pragma: no cover
- return self
-
- class UnicodeWriter:
-
- """
- A CSV writer which will write rows to CSV file "f",
- which is encoded in the given encoding.
- """
-
- def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
- # Redirect output to a queue
- self.queue = StringIO()
- self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
- self.stream = f
- self.encoder = codecs.getincrementalencoder(encoding)()
- self.quoting = kwds.get("quoting", None)
-
- def writerow(self, row):
- def _check_as_is(x):
- return (self.quoting == csv.QUOTE_NONNUMERIC and
- is_number(x)) or isinstance(x, str)
-
- row = [x if _check_as_is(x)
- else pprint_thing(x).encode('utf-8') for x in row]
-
- self.writer.writerow([s for s in row])
- # Fetch UTF-8 output from the queue ...
- data = self.queue.getvalue()
- data = data.decode("utf-8")
- # ... and reencode it into the target encoding
- data = self.encoder.encode(data)
- # write to the target stream
- self.stream.write(data)
- # empty queue
- self.queue.truncate(0)
-
- def writerows(self, rows):
- def _check_as_is(x):
- return (self.quoting == csv.QUOTE_NONNUMERIC and
- is_number(x)) or isinstance(x, str)
-
- for i, row in enumerate(rows):
- rows[i] = [x if _check_as_is(x)
- else pprint_thing(x).encode('utf-8') for x in row]
-
- self.writer.writerows([[s for s in row] for row in rows])
- # Fetch UTF-8 output from the queue ...
- data = self.queue.getvalue()
- data = data.decode("utf-8")
- # ... and reencode it into the target encoding
- data = self.encoder.encode(data)
- # write to the target stream
- self.stream.write(data)
- # empty queue
- self.queue.truncate(0)
-
-
def get_dtype_kinds(l):
"""
Parameters
diff --git a/pandas/core/format.py b/pandas/core/format.py
index bf9b3bc8040de..efa4b182f1133 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -13,6 +13,7 @@
OrderedDict)
from pandas.util.terminal import get_terminal_size
from pandas.core.config import get_option, set_option
+from pandas.io.common import _get_handle, UnicodeWriter
import pandas.core.common as com
import pandas.lib as lib
from pandas.tslib import iNaT, Timestamp, Timedelta, format_array_from_datetime
@@ -23,6 +24,7 @@
import itertools
import csv
+import warnings
common_docstring = """
Parameters
@@ -1264,7 +1266,11 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
- self.engine = engine # remove for 0.13
+ if engine is not None:
+ warnings.warn("'engine' keyword is deprecated and "
+ "will be removed in a future version",
+ FutureWarning, stacklevel=3)
+ self.engine = engine # remove for 0.18
self.obj = obj
if path_or_buf is None:
@@ -1470,8 +1476,8 @@ def save(self):
f = self.path_or_buf
close = False
else:
- f = com._get_handle(self.path_or_buf, self.mode,
- encoding=self.encoding,
+ f = _get_handle(self.path_or_buf, self.mode,
+ encoding=self.encoding,
compression=self.compression)
close = True
@@ -1483,7 +1489,7 @@ def save(self):
quotechar=self.quotechar)
if self.encoding is not None:
writer_kwargs['encoding'] = self.encoding
- self.writer = com.UnicodeWriter(f, **writer_kwargs)
+ self.writer = UnicodeWriter(f, **writer_kwargs)
else:
self.writer = csv.writer(f, **writer_kwargs)
@@ -1702,9 +1708,9 @@ def _format_value(self, val):
if lib.checknull(val):
val = self.na_rep
elif com.is_float(val):
- if np.isposinf(val):
+ if lib.isposinf_scalar(val):
val = self.inf_rep
- elif np.isneginf(val):
+ elif lib.isneginf_scalar(val):
val = '-%s' % self.inf_rep
elif self.float_format is not None:
val = float(self.float_format % val)
@@ -1723,7 +1729,7 @@ def _format_header_mi(self):
return
columns = self.columns
- level_strs = columns.format(sparsify=True, adjoin=False, names=False)
+ level_strs = columns.format(sparsify=self.merge_cells, adjoin=False, names=False)
level_lengths = _get_level_lengths(level_strs)
coloffset = 0
lnum = 0
@@ -1867,8 +1873,9 @@ def _format_hierarchical_rows(self):
# MultiIndex columns require an extra row
# with index names (blank if None) for
- # unambigous round-trip
- if isinstance(self.columns, MultiIndex):
+ # unambigous round-trip, unless not merging,
+ # in which case the names all go on one row Issue #11328
+ if isinstance(self.columns, MultiIndex) and self.merge_cells:
self.rowcounter += 1
# if index labels are not empty go ahead and dump
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 920d9ad96c5b6..827373c9a330b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -802,11 +802,12 @@ def to_dict(self, orient='dict'):
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
- 'data': self.values.tolist()}
+ 'data': lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
+ .reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
- return dict((k, v) for k, v in compat.iteritems(self))
+ return dict((k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
- return [dict((k, v) for k, v in zip(self.columns, row))
+ return [dict((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
@@ -3157,6 +3158,15 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
else:
from pandas.core.groupby import _nargsort
+ # GH11080 - Check monotonic-ness before sort an index
+ # if monotonic (already sorted), return None or copy() according to 'inplace'
+ if (ascending and labels.is_monotonic_increasing) or \
+ (not ascending and labels.is_monotonic_decreasing):
+ if inplace:
+ return
+ else:
+ return self.copy()
+
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 98f9677fb6784..248203c259aaa 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2999,8 +2999,6 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
'{0!r}').format(type(to_replace).__name__)
raise TypeError(msg) # pragma: no cover
- new_data = new_data.convert(copy=not inplace, numeric=False)
-
if inplace:
self._update_inplace(new_data)
else:
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 256ece6539b6f..7049ac33feac6 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -627,6 +627,10 @@ def astype(self, dtype):
return Index(self.values.astype(dtype), name=self.name,
dtype=dtype)
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self
+
def to_datetime(self, dayfirst=False):
"""
For an Index containing strings or datetime.datetime objects, attempt
@@ -862,9 +866,10 @@ def to_int():
return self._invalid_indexer('label', key)
if is_float(key):
- if not self.is_floating():
- warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
- type(self).__name__), FutureWarning, stacklevel=3)
+ if isnull(key):
+ return self._invalid_indexer('label', key)
+ warnings.warn("scalar indexers for index type {0} should be integers and not floating point".format(
+ type(self).__name__), FutureWarning, stacklevel=3)
return to_int()
return key
@@ -982,10 +987,6 @@ def _convert_list_indexer(self, keyarr, kind=None):
if kind in [None, 'iloc', 'ix'] and is_integer_dtype(keyarr) \
and not self.is_floating() and not isinstance(keyarr, ABCPeriodIndex):
- if self.inferred_type != 'integer':
- keyarr = np.where(keyarr < 0,
- len(self) + keyarr, keyarr)
-
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
@@ -998,6 +999,8 @@ def _convert_list_indexer(self, keyarr, kind=None):
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
+ keyarr = np.where(keyarr < 0,
+ len(self) + keyarr, keyarr)
return keyarr
return None
@@ -3191,6 +3194,10 @@ def duplicated(self, keep='first'):
from pandas.hashtable import duplicated_int64
return duplicated_int64(self.codes.astype('i8'), keep)
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self.astype('object')
+
def get_loc(self, key, method=None):
"""
Get integer location for requested label
@@ -3723,9 +3730,23 @@ def astype(self, dtype):
return Index(self._values, name=self.name, dtype=dtype)
def _convert_scalar_indexer(self, key, kind=None):
+ """
+ convert a scalar indexer
+
+ Parameters
+ ----------
+ key : label of the slice bound
+ kind : optional, type of the indexing operation (loc/ix/iloc/None)
+
+ right now we are converting
+ floats -> ints if the index supports it
+ """
+
if kind == 'iloc':
- return super(Float64Index, self)._convert_scalar_indexer(key,
- kind=kind)
+ if is_integer(key):
+ return key
+ return super(Float64Index, self)._convert_scalar_indexer(key, kind=kind)
+
return key
def _convert_slice_indexer(self, key, kind=None):
@@ -4278,7 +4299,7 @@ def _reference_duplicate_name(self, name):
Returns True if the name refered to in self.names is duplicated.
"""
# count the times name equals an element in self.names.
- return np.sum(name == np.asarray(self.names)) > 1
+ return sum(name == n for n in self.names) > 1
def _format_native_types(self, **kwargs):
return self.values
@@ -4516,6 +4537,10 @@ def format(self, space=2, sparsify=None, adjoin=True, names=False,
else:
return result_levels
+ def _to_safe_for_reshape(self):
+ """ convert to object if we are a categorical """
+ return self.set_levels([ i._to_safe_for_reshape() for i in self.levels ])
+
def to_hierarchical(self, n_repeat, n_shuffle=1):
"""
Return a MultiIndex reshaped to conform to the
diff --git a/pandas/core/indexing.py b/pandas/core/indexing.py
index 8b4528ef451ef..5eb25a53d4533 100644
--- a/pandas/core/indexing.py
+++ b/pandas/core/indexing.py
@@ -1285,7 +1285,7 @@ def _has_valid_type(self, key, axis):
def error():
if isnull(key):
- raise ValueError(
+ raise TypeError(
"cannot use label indexing with a null key")
raise KeyError("the label [%s] is not in the [%s]" %
(key, self.obj._get_axis_name(axis)))
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index c8c834180c9f6..f1d82ec1f3b2e 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -17,7 +17,7 @@
is_datetime64tz_dtype, is_datetimetz, is_sparse,
array_equivalent, _maybe_convert_string_to_object,
is_categorical, needs_i8_conversion, is_datetimelike_v_numeric,
- is_internal_type)
+ is_numeric_v_string_like, is_internal_type)
from pandas.core.dtypes import DatetimeTZDtype
from pandas.core.index import Index, MultiIndex, _ensure_index
@@ -137,6 +137,11 @@ def get_values(self, dtype=None):
def to_dense(self):
return self.values.view()
+ def to_object_block(self, mgr):
+ """ return myself as an object block """
+ values = self.get_values(dtype=object)
+ return self.make_block(values,klass=ObjectBlock)
+
@property
def fill_value(self):
return np.nan
@@ -215,7 +220,7 @@ def _slice(self, slicer):
""" return a slice of my values """
return self.values[slicer]
- def reshape_nd(self, labels, shape, ref_items):
+ def reshape_nd(self, labels, shape, ref_items, mgr=None):
"""
Parameters
----------
@@ -312,7 +317,7 @@ def delete(self, loc):
self.values = np.delete(self.values, loc, 0)
self.mgr_locs = self.mgr_locs.delete(loc)
- def apply(self, func, **kwargs):
+ def apply(self, func, mgr=None, **kwargs):
""" apply the function to my values; return a block if we are not one """
result = func(self.values, **kwargs)
if not isinstance(result, Block):
@@ -320,13 +325,17 @@ def apply(self, func, **kwargs):
return result
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
+ """ fillna on the block with the value. If we fail, then convert to ObjectBlock
+ and try again """
+
if not self._can_hold_na:
if inplace:
- return [self]
+ return self
else:
- return [self.copy()]
+ return self.copy()
+ original_value = value
mask = isnull(self.values)
if limit is not None:
if self.ndim > 2:
@@ -334,9 +343,24 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
"is currently limited to 2")
mask[mask.cumsum(self.ndim-1) > limit] = False
- value = self._try_fill(value)
- blocks = self.putmask(mask, value, inplace=inplace)
- return self._maybe_downcast(blocks, downcast)
+ # fillna, but if we cannot coerce, then try again as an ObjectBlock
+ try:
+ values, _, value, _ = self._try_coerce_args(self.values, value)
+ blocks = self.putmask(mask, value, inplace=inplace)
+ blocks = [ b.make_block(values=self._try_coerce_result(b.values)) for b in blocks ]
+ return self._maybe_downcast(blocks, downcast)
+ except (TypeError, ValueError):
+
+ # we can't process the value, but nothing to do
+ if not mask.any():
+ return self if inplace else self.copy()
+
+ # we cannot coerce the underlying object, so
+ # make an ObjectBlock
+ return self.to_object_block(mgr=mgr).fillna(original_value,
+ limit=limit,
+ inplace=inplace,
+ downcast=False)
def _maybe_downcast(self, blocks, downcast=None):
@@ -347,18 +371,14 @@ def _maybe_downcast(self, blocks, downcast=None):
elif downcast is None and (self.is_timedelta or self.is_datetime):
return blocks
- result_blocks = []
- for b in blocks:
- result_blocks.extend(b.downcast(downcast))
+ return _extend_blocks([ b.downcast(downcast) for b in blocks ])
- return result_blocks
-
- def downcast(self, dtypes=None):
+ def downcast(self, dtypes=None, mgr=None):
""" try to downcast each item to the dict of dtypes if present """
# turn it off completely
if dtypes is False:
- return [self]
+ return self
values = self.values
@@ -370,12 +390,12 @@ def downcast(self, dtypes=None):
dtypes = 'infer'
nv = _possibly_downcast_to_dtype(values, dtypes)
- return [self.make_block(nv,
- fastpath=True)]
+ return self.make_block(nv,
+ fastpath=True)
# ndim > 1
if dtypes is None:
- return [self]
+ return self
if not (dtypes == 'infer' or isinstance(dtypes, dict)):
raise ValueError("downcast must have a dictionary or 'infer' as "
@@ -409,7 +429,7 @@ def astype(self, dtype, copy=False, raise_on_error=True, values=None, **kwargs):
values=values, **kwargs)
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
- klass=None, **kwargs):
+ klass=None, mgr=None, **kwargs):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
@@ -474,7 +494,7 @@ def convert(self, copy=True, **kwargs):
return a copy of the block (if copy = True)
by definition we are not an ObjectBlock here! """
- return [self.copy()] if copy else [self]
+ return self.copy() if copy else self
def _can_hold_element(self, value):
raise NotImplementedError()
@@ -520,7 +540,7 @@ def _try_operate(self, values):
def _try_coerce_args(self, values, other):
""" provide coercion to our input arguments """
- return values, other
+ return values, False, other, False
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -551,7 +571,7 @@ def to_native_types(self, slicer=None, na_rep='nan', quoting=None, **kwargs):
return values
# block actions ####
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
values = self.values
if deep:
values = values.copy()
@@ -560,23 +580,45 @@ def copy(self, deep=True):
fastpath=True)
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, convert=True, mgr=None):
""" replace the to_replace value with value, possible to create new
blocks here this is just a call to putmask. regex is not used here.
It is used in ObjectBlocks. It is here for API
compatibility."""
- mask = com.mask_missing(self.values, to_replace)
- if filter is not None:
- filtered_out = ~self.mgr_locs.isin(filter)
- mask[filtered_out.nonzero()[0]] = False
- if not mask.any():
- if inplace:
- return [self]
- return [self.copy()]
- return self.putmask(mask, value, inplace=inplace)
+ original_to_replace = to_replace
+
+ # try to replace, if we raise an error, convert to ObjectBlock and retry
+ try:
+ values, _, to_replace, _ = self._try_coerce_args(self.values, to_replace)
+ mask = com.mask_missing(values, to_replace)
+ if filter is not None:
+ filtered_out = ~self.mgr_locs.isin(filter)
+ mask[filtered_out.nonzero()[0]] = False
+
+ blocks = self.putmask(mask, value, inplace=inplace)
+ if convert:
+ blocks = [ b.convert(by_item=True, numeric=False, copy=not inplace) for b in blocks ]
+ return blocks
+ except (TypeError, ValueError):
+
+ # we can't process the value, but nothing to do
+ if not mask.any():
+ return self if inplace else self.copy()
- def setitem(self, indexer, value):
+ return self.to_object_block(mgr=mgr).replace(to_replace=original_to_replace,
+ value=value,
+ inplace=inplace,
+ filter=filter,
+ regex=regex,
+ convert=convert)
+
+
+ def _replace_single(self, *args, **kwargs):
+ """ no-op on a non-ObjectBlock """
+ return self if kwargs['inplace'] else self.copy()
+
+ def setitem(self, indexer, value, mgr=None):
""" set the value inplace; return a new block (of a possibly different
dtype)
@@ -590,7 +632,7 @@ def setitem(self, indexer, value):
value = np.nan
# coerce args
- values, value = self._try_coerce_args(self.values, value)
+ values, _, value, _ = self._try_coerce_args(self.values, value)
arr_value = np.array(value)
# cast the values to a type that can hold nan (if necessary)
@@ -682,7 +724,7 @@ def _is_empty_indexer(indexer):
return [self]
def putmask(self, mask, new, align=True, inplace=False,
- axis=0, transpose=False):
+ axis=0, transpose=False, mgr=None):
""" putmask the data to the block; it is possible that we may create a
new dtype of block
@@ -797,7 +839,7 @@ def putmask(self, mask, new, align=True, inplace=False,
def interpolate(self, method='pad', axis=0, index=None,
values=None, inplace=False, limit=None,
limit_direction='forward',
- fill_value=None, coerce=False, downcast=None, **kwargs):
+ fill_value=None, coerce=False, downcast=None, mgr=None, **kwargs):
def check_int_bool(self, inplace):
# Only FloatBlocks will contain NaNs.
@@ -824,7 +866,8 @@ def check_int_bool(self, inplace):
limit=limit,
fill_value=fill_value,
coerce=coerce,
- downcast=downcast)
+ downcast=downcast,
+ mgr=mgr)
# try an interp method
try:
m = com._clean_interp_method(method, **kwargs)
@@ -844,13 +887,14 @@ def check_int_bool(self, inplace):
fill_value=fill_value,
inplace=inplace,
downcast=downcast,
+ mgr=mgr,
**kwargs)
raise ValueError("invalid method '{0}' to interpolate.".format(method))
def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, coerce=False,
- downcast=None):
+ downcast=None, mgr=None):
""" fillna but using the interpolate machinery """
# if we are coercing, then don't force the conversion
@@ -862,8 +906,8 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
else:
return [self.copy()]
- fill_value = self._try_fill(fill_value)
values = self.values if inplace else self.values.copy()
+ values, _, fill_value, _ = self._try_coerce_args(values, fill_value)
values = self._try_operate(values)
values = com.interpolate_2d(values,
method=method,
@@ -881,7 +925,7 @@ def _interpolate_with_fill(self, method='pad', axis=0, inplace=False,
def _interpolate(self, method=None, index=None, values=None,
fill_value=None, axis=0, limit=None,
limit_direction='forward',
- inplace=False, downcast=None, **kwargs):
+ inplace=False, downcast=None, mgr=None, **kwargs):
""" interpolate using scipy wrappers """
data = self.values if inplace else self.values.copy()
@@ -957,13 +1001,13 @@ def take_nd(self, indexer, axis, new_mgr_locs=None, fill_tuple=None):
else:
return self.make_block_same_class(new_values, new_mgr_locs)
- def diff(self, n, axis=1):
+ def diff(self, n, axis=1, mgr=None):
""" return block for the diff of the values """
new_values = com.diff(self.values, n, axis=axis)
return [self.make_block(values=new_values,
fastpath=True)]
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods, possibly upcast """
# convert integer to float if necessary. need to do a lot more than
@@ -993,7 +1037,7 @@ def shift(self, periods, axis=0):
return [self.make_block(new_values,
fastpath=True)]
- def eval(self, func, other, raise_on_error=True, try_cast=False):
+ def eval(self, func, other, raise_on_error=True, try_cast=False, mgr=None):
"""
evaluate the block; return result block from the result
@@ -1003,6 +1047,7 @@ def eval(self, func, other, raise_on_error=True, try_cast=False):
other : a ndarray/object
raise_on_error : if True, raise when I can't perform the function,
False by default (and just return the data that we had coming in)
+ try_cast : try casting the results to the input type
Returns
-------
@@ -1032,11 +1077,34 @@ def eval(self, func, other, raise_on_error=True, try_cast=False):
transf = (lambda x: x.T) if is_transposed else (lambda x: x)
# coerce/transpose the args if needed
- values, other = self._try_coerce_args(transf(values), other)
+ values, values_mask, other, other_mask = self._try_coerce_args(transf(values), other)
# get the result, may need to transpose the other
def get_result(other):
- return self._try_coerce_result(func(values, other))
+
+ # avoid numpy warning of comparisons again None
+ if other is None:
+ result = not func.__name__ == 'eq'
+
+ # avoid numpy warning of elementwise comparisons to object
+ elif is_numeric_v_string_like(values, other):
+ result = False
+
+ else:
+ result = func(values, other)
+
+ # mask if needed
+ if isinstance(values_mask, np.ndarray) and values_mask.any():
+ result = result.astype('float64',copy=False)
+ result[values_mask] = np.nan
+ if other_mask is True:
+ result = result.astype('float64',copy=False)
+ result[:] = np.nan
+ elif isinstance(other_mask, np.ndarray) and other_mask.any():
+ result = result.astype('float64',copy=False)
+ result[other_mask.ravel()] = np.nan
+
+ return self._try_coerce_result(result)
# error handler if we have an issue operating with the function
def handle_error():
@@ -1086,7 +1154,7 @@ def handle_error():
fastpath=True,)]
def where(self, other, cond, align=True, raise_on_error=True,
- try_cast=False, axis=0, transpose=False):
+ try_cast=False, axis=0, transpose=False, mgr=None):
"""
evaluate the block; return result block(s) from the result
@@ -1128,22 +1196,22 @@ def where(self, other, cond, align=True, raise_on_error=True,
other = _maybe_convert_string_to_object(other)
# our where function
- def func(c, v, o):
- if c.ravel().all():
- return v
+ def func(cond, values, other):
+ if cond.ravel().all():
+ return values
- v, o = self._try_coerce_args(v, o)
+ values, values_mask, other, other_mask = self._try_coerce_args(values, other)
try:
return self._try_coerce_result(
- expressions.where(c, v, o, raise_on_error=True)
+ expressions.where(cond, values, other, raise_on_error=True)
)
except Exception as detail:
if raise_on_error:
raise TypeError('Could not operate [%s] with block values '
- '[%s]' % (repr(o), str(detail)))
+ '[%s]' % (repr(other), str(detail)))
else:
# return the values
- result = np.empty(v.shape, dtype='float64')
+ result = np.empty(values.shape, dtype='float64')
result.fill(np.nan)
return result
@@ -1253,6 +1321,34 @@ def get(self, item):
else:
return self.values
+ def putmask(self, mask, new, align=True, inplace=False,
+ axis=0, transpose=False, mgr=None):
+ """
+ putmask the data to the block; we must be a single block and not generate
+ other blocks
+
+ return the resulting block
+
+ Parameters
+ ----------
+ mask : the condition to respect
+ new : a ndarray/object
+ align : boolean, perform alignment on other/cond, default is True
+ inplace : perform inplace modification, default is False
+
+ Returns
+ -------
+ a new block(s), the result of the putmask
+ """
+ new_values = self.values if inplace else self.values.copy()
+ new_values, _, new, _ = self._try_coerce_args(new_values, new)
+
+ if isinstance(new, np.ndarray) and len(new) == len(mask):
+ new = new[mask]
+ new_values[mask] = new
+ new_values = self._try_coerce_result(new_values)
+ return [self.make_block(values=new_values)]
+
def _slice(self, slicer):
""" return a slice of my values (but densify first) """
return self.get_values()[slicer]
@@ -1386,45 +1482,56 @@ class TimeDeltaBlock(IntBlock):
def fill_value(self):
return tslib.iNaT
- def _try_fill(self, value):
- """ if we are a NaT, return the actual fill value """
- if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
- value = tslib.iNaT
- elif isinstance(value, Timedelta):
- value = value.value
- elif isinstance(value, np.timedelta64):
- pass
- elif com.is_integer(value):
- # coerce to seconds of timedelta
- value = np.timedelta64(int(value * 1e9))
- elif isinstance(value, timedelta):
- value = np.timedelta64(value)
+ def fillna(self, value, **kwargs):
- return value
+ # allow filling with integers to be
+ # interpreted as seconds
+ if not isinstance(value, np.timedelta64) and com.is_integer(value):
+ value = Timedelta(value,unit='s')
+ return super(TimeDeltaBlock, self).fillna(value, **kwargs)
def _try_coerce_args(self, values, other):
- """ Coerce values and other to float64, with null values converted to
- NaN. values is always ndarray-like, other may not be """
- def masker(v):
- mask = isnull(v)
- v = v.astype('float64')
- v[mask] = np.nan
- return v
-
- values = masker(values)
-
- if is_null_datelike_scalar(other):
- other = np.nan
- elif isinstance(other, (np.timedelta64, Timedelta, timedelta)):
- other = _coerce_scalar_to_timedelta_type(other, unit='s', box=False).item()
- if other == tslib.iNaT:
- other = np.nan
- elif lib.isscalar(other):
- other = np.float64(other)
+ """
+ Coerce values and other to int64, with null values converted to
+ iNaT. values is always ndarray-like, other may not be
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
+
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+
+ values_mask = isnull(values)
+ values = values.view('i8')
+ other_mask = False
+
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
+ other = tslib.iNaT
+ other_mask = True
+ elif isinstance(other, Timedelta):
+ other_mask = isnull(other)
+ other = other.value
+ elif isinstance(other, np.timedelta64):
+ other_mask = isnull(other)
+ other = other.view('i8')
+ elif isinstance(other, timedelta):
+ other = Timedelta(other).value
+ elif isinstance(other, np.ndarray):
+ other_mask = isnull(other)
+ other = other.astype('i8',copy=False).view('i8')
else:
- other = masker(other)
+ # scalar
+ other = Timedelta(other)
+ other_mask = isnull(other)
+ other = other.value
- return values, other
+ return values, values_mask, other, other_mask
def _try_operate(self, values):
""" return a version to operate on """
@@ -1496,13 +1603,13 @@ def should_store(self, value):
return issubclass(value.dtype.type, np.bool_)
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, mgr=None):
to_replace_values = np.atleast_1d(to_replace)
if not np.can_cast(to_replace_values, bool):
return self
return super(BoolBlock, self).replace(to_replace, value,
inplace=inplace, filter=filter,
- regex=regex)
+ regex=regex, mgr=mgr)
class ObjectBlock(Block):
@@ -1609,10 +1716,7 @@ def _maybe_downcast(self, blocks, downcast=None):
return blocks
# split and convert the blocks
- result_blocks = []
- for blk in blocks:
- result_blocks.extend(blk.convert(datetime=True, numeric=False))
- return result_blocks
+ return _extend_blocks([ b.convert(datetime=True, numeric=False) for b in blocks ])
def _can_hold_element(self, element):
return True
@@ -1626,38 +1730,53 @@ def should_store(self, value):
np.datetime64, np.bool_)) or is_internal_type(value))
def replace(self, to_replace, value, inplace=False, filter=None,
- regex=False):
- blk = [self]
+ regex=False, convert=True, mgr=None):
to_rep_is_list = com.is_list_like(to_replace)
value_is_list = com.is_list_like(value)
both_lists = to_rep_is_list and value_is_list
either_list = to_rep_is_list or value_is_list
+ result_blocks = []
+ blocks = [self]
+
if not either_list and com.is_re(to_replace):
- blk[0], = blk[0]._replace_single(to_replace, value,
- inplace=inplace, filter=filter,
- regex=True)
+ return self._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ regex=True, convert=convert, mgr=mgr)
elif not (either_list or regex):
- blk = super(ObjectBlock, self).replace(to_replace, value,
- inplace=inplace,
- filter=filter, regex=regex)
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
elif both_lists:
for to_rep, v in zip(to_replace, value):
- blk[0], = blk[0]._replace_single(to_rep, v, inplace=inplace,
- filter=filter, regex=regex)
+ result_blocks = []
+ for b in blocks:
+ result = b._replace_single(to_rep, v, inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
+ result_blocks = _extend_blocks(result, result_blocks)
+ blocks = result_blocks
+ return result_blocks
+
elif to_rep_is_list and regex:
for to_rep in to_replace:
- blk[0], = blk[0]._replace_single(to_rep, value,
- inplace=inplace,
- filter=filter, regex=regex)
- else:
- blk[0], = blk[0]._replace_single(to_replace, value,
- inplace=inplace, filter=filter,
- regex=regex)
- return blk
+ result_blocks = []
+ for b in blocks:
+ result = b._replace_single(to_rep, value,
+ inplace=inplace,
+ filter=filter, regex=regex,
+ convert=convert, mgr=mgr)
+ result_blocks = _extend_blocks(result, result_blocks)
+ blocks = result_blocks
+ return result_blocks
+
+ return self._replace_single(to_replace, value,
+ inplace=inplace, filter=filter,
+ convert=convert, regex=regex, mgr=mgr)
def _replace_single(self, to_replace, value, inplace=False, filter=None,
- regex=False):
+ regex=False, convert=True, mgr=None):
# to_replace is regex compilable
to_rep_re = regex and com.is_re_compilable(to_replace)
@@ -1689,13 +1808,11 @@ def _replace_single(self, to_replace, value, inplace=False, filter=None,
else:
# if the thing to replace is not a string or compiled regex call
# the superclass method -> to_replace is some kind of object
- result = super(ObjectBlock, self).replace(to_replace, value,
- inplace=inplace,
- filter=filter,
- regex=regex)
- if not isinstance(result, list):
- result = [result]
- return result
+ return super(ObjectBlock, self).replace(to_replace, value,
+ inplace=inplace,
+ filter=filter,
+ regex=regex,
+ mgr=mgr)
new_values = self.values if inplace else self.values.copy()
@@ -1725,9 +1842,12 @@ def re_replacer(s):
new_values[filt] = f(new_values[filt])
- return [self if inplace else
- self.make_block(new_values,
- fastpath=True)]
+ # convert
+ block = self.make_block(new_values)
+ if convert:
+ block = block.convert(by_item=True,numeric=False)
+
+ return block
class CategoricalBlock(NonConsolidatableMixIn, ObjectBlock):
__slots__ = ()
@@ -1753,7 +1873,7 @@ def to_dense(self):
return self.values.to_dense().view()
def convert(self, copy=True, **kwargs):
- return [self.copy() if copy else self]
+ return self.copy() if copy else self
@property
def array_dtype(self):
@@ -1767,16 +1887,16 @@ def _slice(self, slicer):
# return same dims as we currently have
return self.values._slice(slicer)
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
"not been implemented yet")
values = self.values if inplace else self.values.copy()
- return [self.make_block_same_class(values=values.fillna(value=value,
- limit=limit),
- placement=self.mgr_locs)]
+ values = self._try_coerce_result(values.fillna(value=value,
+ limit=limit))
+ return [self.make_block(values=values)]
def interpolate(self, method='pad', axis=0, inplace=False,
limit=None, fill_value=None, **kwargs):
@@ -1787,7 +1907,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
limit=limit),
placement=self.mgr_locs)
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
return self.make_block_same_class(values=self.values.shift(periods),
placement=self.mgr_locs)
@@ -1815,30 +1935,8 @@ def take_nd(self, indexer, axis=0, new_mgr_locs=None, fill_tuple=None):
return self.make_block_same_class(new_values, new_mgr_locs)
- def putmask(self, mask, new, align=True, inplace=False,
- axis=0, transpose=False):
- """ putmask the data to the block; it is possible that we may create a
- new dtype of block
-
- return the resulting block(s)
-
- Parameters
- ----------
- mask : the condition to respect
- new : a ndarray/object
- align : boolean, perform alignment on other/cond, default is True
- inplace : perform inplace modification, default is False
-
- Returns
- -------
- a new block(s), the result of the putmask
- """
- new_values = self.values if inplace else self.values.copy()
- new_values[mask] = new
- return [self.make_block_same_class(values=new_values, placement=self.mgr_locs)]
-
def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
- klass=None):
+ klass=None, mgr=None):
"""
Coerce to the new type (if copy=True, return a new copy)
raise on an except if raise == True
@@ -1882,7 +1980,7 @@ def __init__(self, values, placement,
fastpath=True, placement=placement,
**kwargs)
- def _astype(self, dtype, **kwargs):
+ def _astype(self, dtype, mgr=None, **kwargs):
"""
these automatically copy, so copy=True has no effect
raise on an except if raise == True
@@ -1921,22 +2019,52 @@ def _try_operate(self, values):
return values.view('i8')
def _try_coerce_args(self, values, other):
- """ Coerce values and other to dtype 'i8'. NaN and NaT convert to
- the smallest i8, and will correctly round-trip to NaT if converted
- back in _try_coerce_result. values is always ndarray-like, other
- may not be """
+ """
+ Coerce values and other to dtype 'i8'. NaN and NaT convert to
+ the smallest i8, and will correctly round-trip to NaT if converted
+ back in _try_coerce_result. values is always ndarray-like, other
+ may not be
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
+
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+
+ values_mask = isnull(values)
values = values.view('i8')
+ other_mask = False
- if is_null_datelike_scalar(other):
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
other = tslib.iNaT
+ other_mask = True
elif isinstance(other, (datetime, np.datetime64, date)):
- other = lib.Timestamp(other).asm8.view('i8')
+ other = lib.Timestamp(other)
+ if getattr(other,'tz') is not None:
+ raise TypeError("cannot coerce a Timestamp with a tz on a naive Block")
+ other_mask = isnull(other)
+ other = other.asm8.view('i8')
elif hasattr(other, 'dtype') and com.is_integer_dtype(other):
other = other.view('i8')
else:
- other = np.array(other, dtype='i8')
+ try:
+ other = np.asarray(other)
+ other_mask = isnull(other)
- return values, other
+ other = other.astype('i8',copy=False).view('i8')
+ except ValueError:
+
+ # coercion issues
+ # let higher levels handle
+ raise TypeError
+
+ return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -1951,52 +2079,6 @@ def _try_coerce_result(self, result):
def fill_value(self):
return tslib.iNaT
- def _try_fill(self, value):
- """ if we are a NaT, return the actual fill value """
- if isinstance(value, type(tslib.NaT)) or np.array(isnull(value)).all():
- value = tslib.iNaT
- return value
-
- def fillna(self, value, limit=None,
- inplace=False, downcast=None):
-
- mask = isnull(self.values)
- value = self._try_fill(value)
-
- if limit is not None:
- if self.ndim > 2:
- raise NotImplementedError("number of dimensions for 'fillna' "
- "is currently limited to 2")
- mask[mask.cumsum(self.ndim-1)>limit]=False
-
- if mask.any():
- try:
- return self._fillna_mask(mask, value, inplace=inplace)
- except TypeError:
- pass
- # _fillna_mask raises TypeError when it fails
- # cannot perform inplace op because of object coercion
- values = self.get_values(dtype=object)
- np.putmask(values, mask, value)
- return [self.make_block(values, fastpath=True)]
- else:
- return [self if inplace else self.copy()]
-
- def _fillna_mask(self, mask, value, inplace=False):
- if getattr(value, 'tzinfo', None) is None:
- # Series comes to this path
- values = self.values
- if not inplace:
- values = values.copy()
- try:
- np.putmask(values, mask, value)
- return [self if inplace else
- self.make_block(values, fastpath=True)]
- except (ValueError, TypeError):
- # scalar causes ValueError, and array causes TypeError
- pass
- raise TypeError
-
def to_native_types(self, slicer=None, na_rep=None, date_format=None,
quoting=None, **kwargs):
""" convert to our native types format, slicing if desired """
@@ -2068,28 +2150,25 @@ def get_values(self, dtype=None):
.reshape(self.values.shape)
return self.values
- def _fillna_mask(self, mask, value, inplace=False):
- # cannot perform inplace op for internal DatetimeIndex
- my_tz = tslib.get_timezone(self.values.tz)
- value_tz = tslib.get_timezone(getattr(value, 'tzinfo', None))
-
- if (my_tz == value_tz or self.dtype == getattr(value, 'dtype', None)):
- if my_tz == value_tz:
- # hack for PY2.6 / numpy 1.7.1.
- # Other versions can directly use self.values.putmask
- # --------------------------------------
- try:
- value = value.asm8
- except AttributeError:
- value = tslib.Timestamp(value).asm8
- ### ------------------------------------
+ def to_object_block(self, mgr):
+ """
+ return myself as an object block
- try:
- values = self.values.putmask(mask, value)
- return [self.make_block(values, fastpath=True)]
- except ValueError:
- pass
- raise TypeError
+ Since we keep the DTI as a 1-d object, this is different
+ depends on BlockManager's ndim
+ """
+ values = self.get_values(dtype=object)
+ kwargs = {}
+ if mgr.ndim > 1:
+ values = _block_shape(values,ndim=mgr.ndim)
+ kwargs['ndim'] = mgr.ndim
+ kwargs['placement']=[0]
+ return self.make_block(values, klass=ObjectBlock, **kwargs)
+
+ def replace(self, *args, **kwargs):
+ # if we are forced to ObjectBlock, then don't coerce (to UTC)
+ kwargs['convert'] = False
+ return super(DatetimeTZBlock, self).replace(*args, **kwargs)
def _slice(self, slicer):
""" return a slice of my values """
@@ -2101,22 +2180,46 @@ def _slice(self, slicer):
return self.values[slicer]
def _try_coerce_args(self, values, other):
- """ localize and return i8 for the values """
- values = values.tz_localize(None).asi8
+ """
+ localize and return i8 for the values
+
+ Parameters
+ ----------
+ values : ndarray-like
+ other : ndarray-like or scalar
- if is_null_datelike_scalar(other):
+ Returns
+ -------
+ base-type values, values mask, base-type other, other mask
+ """
+ values_mask = isnull(values)
+ values = values.tz_localize(None).asi8
+ other_mask = False
+
+ if isinstance(other, ABCSeries):
+ other = self._holder(other)
+ other_mask = isnull(other)
+ if isinstance(other, bool):
+ raise TypeError
+ elif is_null_datelike_scalar(other):
other = tslib.iNaT
+ other_mask = True
elif isinstance(other, self._holder):
if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
- else:
+ other_mask = isnull(other)
+ elif isinstance(other, (np.datetime64, datetime, date)):
other = lib.Timestamp(other)
- if not getattr(other, 'tz', None):
+ tz = getattr(other, 'tz', None)
+
+ # test we can have an equal time zone
+ if tz is None or str(tz) != str(self.values.tz):
raise ValueError("incompatible or non tz-aware value")
- other = other.value
+ other_mask = isnull(other)
+ other = other.tz_localize(None).value
- return values, other
+ return values, values_mask, other, other_mask
def _try_coerce_result(self, result):
""" reverse of try_coerce_args """
@@ -2128,7 +2231,7 @@ def _try_coerce_result(self, result):
result = lib.Timestamp(result, tz=self.values.tz)
return result
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
### think about moving this to the DatetimeIndex. This is a non-freq (number of periods) shift ###
@@ -2210,7 +2313,7 @@ def __len__(self):
except:
return 0
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
return self.make_block_same_class(values=self.values,
sparse_index=self.sp_index,
kind=self.kind, copy=deep,
@@ -2259,7 +2362,7 @@ def interpolate(self, method='pad', axis=0, inplace=False,
return self.make_block_same_class(values=values,
placement=self.mgr_locs)
- def fillna(self, value, limit=None, inplace=False, downcast=None):
+ def fillna(self, value, limit=None, inplace=False, downcast=None, mgr=None):
# we may need to upcast our fill to match our dtype
if limit is not None:
raise NotImplementedError("specifying a limit for 'fillna' has "
@@ -2271,7 +2374,7 @@ def fillna(self, value, limit=None, inplace=False, downcast=None):
fill_value=value,
placement=self.mgr_locs)]
- def shift(self, periods, axis=0):
+ def shift(self, periods, axis=0, mgr=None):
""" shift the block by periods """
N = len(self.values.T)
indexer = np.zeros(N, dtype=int)
@@ -2715,12 +2818,9 @@ def apply(self, f, axes=None, filter=None, do_integrity_check=False, consolidate
kwargs[k] = obj.reindex_axis(b_items, axis=axis,
copy=align_copy)
+ kwargs['mgr'] = self
applied = getattr(b, f)(**kwargs)
-
- if isinstance(applied, list):
- result_blocks.extend(applied)
- else:
- result_blocks.append(applied)
+ result_blocks = _extend_blocks(applied, result_blocks)
if len(result_blocks) == 0:
return self.make_empty(axes or self.axes)
@@ -2768,9 +2868,12 @@ def convert(self, **kwargs):
def replace(self, **kwargs):
return self.apply('replace', **kwargs)
- def replace_list(self, src_list, dest_list, inplace=False, regex=False):
+ def replace_list(self, src_list, dest_list, inplace=False, regex=False, mgr=None):
""" do a list replace """
+ if mgr is None:
+ mgr = self
+
# figure out our mask a-priori to avoid repeated replacements
values = self.as_matrix()
@@ -2792,11 +2895,8 @@ def comp(s):
for b in rb:
if b.dtype == np.object_:
result = b.replace(s, d, inplace=inplace,
- regex=regex)
- if isinstance(result, list):
- new_rb.extend(result)
- else:
- new_rb.append(result)
+ regex=regex, mgr=mgr)
+ new_rb = _extend_blocks(result, new_rb)
else:
# get our mask for this element, sized to this
# particular block
@@ -2930,7 +3030,7 @@ def __contains__(self, item):
def nblocks(self):
return len(self.blocks)
- def copy(self, deep=True):
+ def copy(self, deep=True, mgr=None):
"""
Make deep or shallow copy of BlockManager
@@ -3122,7 +3222,7 @@ def get(self, item, fastpath=True):
else:
if isnull(item):
- raise ValueError("cannot label index with a null key")
+ raise TypeError("cannot label index with a null key")
indexer = self.items.get_indexer_for([item])
return self.reindex_indexer(new_axis=self.items[indexer],
@@ -3327,6 +3427,9 @@ def insert(self, loc, item, value, allow_duplicates=False):
if not isinstance(loc, int):
raise TypeError("loc must be int")
+ # insert to the axis; this could possibly raise a TypeError
+ new_axis = self.items.insert(loc, item)
+
block = make_block(values=value,
ndim=self.ndim,
placement=slice(loc, loc+1))
@@ -3349,8 +3452,7 @@ def insert(self, loc, item, value, allow_duplicates=False):
self._blklocs = np.insert(self._blklocs, loc, 0)
self._blknos = np.insert(self._blknos, loc, len(self.blocks))
- self.axes[0] = self.items.insert(loc, item)
-
+ self.axes[0] = new_axis
self.blocks += (block,)
self._shape = None
@@ -4084,15 +4186,12 @@ def _consolidate(blocks):
for (_can_consolidate, dtype), group_blocks in grouper:
merged_blocks = _merge_blocks(list(group_blocks), dtype=dtype,
_can_consolidate=_can_consolidate)
- if isinstance(merged_blocks, list):
- new_blocks.extend(merged_blocks)
- else:
- new_blocks.append(merged_blocks)
-
+ new_blocks = _extend_blocks(merged_blocks, new_blocks)
return new_blocks
def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
+
if len(blocks) == 1:
return blocks[0]
@@ -4119,6 +4218,22 @@ def _merge_blocks(blocks, dtype=None, _can_consolidate=True):
return blocks
+def _extend_blocks(result, blocks=None):
+ """ return a new extended blocks, givin the result """
+ if blocks is None:
+ blocks = []
+ if isinstance(result, list):
+ for r in result:
+ if isinstance(r, list):
+ blocks.extend(r)
+ else:
+ blocks.append(r)
+ elif isinstance(result, BlockManager):
+ blocks.extend(result.blocks)
+ else:
+ blocks.append(result)
+ return blocks
+
def _block_shape(values, ndim=1, shape=None):
""" guarantee the shape of the values to be at least 1 d """
if values.ndim <= ndim:
@@ -4146,11 +4261,16 @@ def _possibly_compare(a, b, op):
# numpy deprecation warning to have i8 vs integer comparisions
if is_datetimelike_v_numeric(a, b):
- res = False
+ result = False
+
+ # numpy deprecation warning if comparing numeric vs string-like
+ elif is_numeric_v_string_like(a, b):
+ result = False
+
else:
- res = op(a, b)
+ result = op(a, b)
- if np.isscalar(res) and (is_a_array or is_b_array):
+ if lib.isscalar(result) and (is_a_array or is_b_array):
type_names = [type(a).__name__, type(b).__name__]
if is_a_array:
@@ -4160,7 +4280,7 @@ def _possibly_compare(a, b, op):
type_names[1] = 'ndarray(dtype=%s)' % b.dtype
raise TypeError("Cannot compare types %r and %r" % tuple(type_names))
- return res
+ return result
def _concat_indexes(indexes):
diff --git a/pandas/core/ops.py b/pandas/core/ops.py
index 5b3d6069f17ec..bf331ff1b781c 100644
--- a/pandas/core/ops.py
+++ b/pandas/core/ops.py
@@ -720,7 +720,7 @@ def wrapper(self, other, axis=None):
res = op(self.values, other)
else:
values = self.get_values()
- if is_list_like(other):
+ if isinstance(other, (list, np.ndarray)):
other = np.asarray(other)
res = na_op(values, other)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index 08ef82835830c..da0ab7bc59440 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -680,8 +680,8 @@ def _combine(self, other, func, axis=0):
elif np.isscalar(other):
return self._combine_const(other, func)
else:
- raise NotImplementedError(str(type(other)) +
- ' is not supported in combine operation with ' +
+ raise NotImplementedError(str(type(other)) +
+ ' is not supported in combine operation with ' +
str(type(self)))
def _combine_const(self, other, func):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index f4e3374626011..2fc90ef8596f1 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -2501,11 +2501,19 @@ def dropna(self, axis=0, inplace=False, **kwargs):
'argument "{0}"'.format(list(kwargs.keys())[0]))
axis = self._get_axis_number(axis or 0)
- result = remove_na(self)
- if inplace:
- self._update_inplace(result)
+
+ if self._can_hold_na:
+ result = remove_na(self)
+ if inplace:
+ self._update_inplace(result)
+ else:
+ return result
else:
- return result
+ if inplace:
+ # do nothing
+ pass
+ else:
+ return self.copy()
valid = lambda self, inplace=False, **kwargs: self.dropna(inplace=inplace,
**kwargs)
diff --git a/pandas/io/common.py b/pandas/io/common.py
index b9cdd44e52555..e46f609077810 100644
--- a/pandas/io/common.py
+++ b/pandas/io/common.py
@@ -2,11 +2,28 @@
import sys
import os
+import csv
+import codecs
import zipfile
from contextlib import contextmanager, closing
-from pandas.compat import StringIO, string_types, BytesIO
+from pandas.compat import StringIO, BytesIO, string_types, text_type
from pandas import compat
+from pandas.core.common import pprint_thing, is_number
+
+
+try:
+ import pathlib
+ _PATHLIB_INSTALLED = True
+except ImportError:
+ _PATHLIB_INSTALLED = False
+
+
+try:
+ from py.path import local as LocalPath
+ _PY_PATH_INSTALLED = True
+except:
+ _PY_PATH_INSTALLED = False
if compat.PY3:
@@ -201,6 +218,25 @@ def _validate_header_arg(header):
"header=int or list-like of ints to specify "
"the row(s) making up the column names")
+def _stringify_path(filepath_or_buffer):
+ """Return the argument coerced to a string if it was a pathlib.Path
+ or a py.path.local
+
+ Parameters
+ ----------
+ filepath_or_buffer : object to be converted
+
+ Returns
+ -------
+ str_filepath_or_buffer : a the string version of the input path
+ """
+ if _PATHLIB_INSTALLED and isinstance(filepath_or_buffer, pathlib.Path):
+ return text_type(filepath_or_buffer)
+ if _PY_PATH_INSTALLED and isinstance(filepath_or_buffer, LocalPath):
+ return filepath_or_buffer.strpath
+ return filepath_or_buffer
+
+
def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
compression=None):
"""
@@ -209,7 +245,8 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
Parameters
----------
- filepath_or_buffer : a url, filepath, or buffer
+ filepath_or_buffer : a url, filepath (str, py.path.local or pathlib.Path),
+ or buffer
encoding : the encoding to use to decode py3 bytes, default is 'utf-8'
Returns
@@ -257,6 +294,8 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None,
filepath_or_buffer = k
return filepath_or_buffer, None, compression
+ # It is a pathlib.Path/py.path.local or string
+ filepath_or_buffer = _stringify_path(filepath_or_buffer)
return _expand_user(filepath_or_buffer), None, compression
@@ -284,3 +323,148 @@ def ZipFile(*args, **kwargs):
yield zf
else:
ZipFile = zipfile.ZipFile
+
+
+def _get_handle(path, mode, encoding=None, compression=None):
+ """Gets file handle for given path and mode.
+ """
+ if compression is not None:
+ if encoding is not None and not compat.PY3:
+ msg = 'encoding + compression not yet supported in Python 2'
+ raise ValueError(msg)
+
+ if compression == 'gzip':
+ import gzip
+ f = gzip.GzipFile(path, mode)
+ elif compression == 'bz2':
+ import bz2
+ f = bz2.BZ2File(path, mode)
+ else:
+ raise ValueError('Unrecognized compression type: %s' %
+ compression)
+ if compat.PY3:
+ from io import TextIOWrapper
+ f = TextIOWrapper(f, encoding=encoding)
+ return f
+ else:
+ if compat.PY3:
+ if encoding:
+ f = open(path, mode, encoding=encoding)
+ else:
+ f = open(path, mode, errors='replace')
+ else:
+ f = open(path, mode)
+
+ return f
+
+
+class UTF8Recoder:
+
+ """
+ Iterator that reads an encoded stream and reencodes the input to UTF-8
+ """
+
+ def __init__(self, f, encoding):
+ self.reader = codecs.getreader(encoding)(f)
+
+ def __iter__(self):
+ return self
+
+ def read(self, bytes=-1):
+ return self.reader.read(bytes).encode("utf-8")
+
+ def readline(self):
+ return self.reader.readline().encode("utf-8")
+
+ def next(self):
+ return next(self.reader).encode("utf-8")
+
+ # Python 3 iterator
+ __next__ = next
+
+
+if compat.PY3: # pragma: no cover
+ def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
+ # ignore encoding
+ return csv.reader(f, dialect=dialect, **kwds)
+
+ def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
+ return csv.writer(f, dialect=dialect, **kwds)
+else:
+ class UnicodeReader:
+
+ """
+ A CSV reader which will iterate over lines in the CSV file "f",
+ which is encoded in the given encoding.
+
+ On Python 3, this is replaced (below) by csv.reader, which handles
+ unicode.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
+ f = UTF8Recoder(f, encoding)
+ self.reader = csv.reader(f, dialect=dialect, **kwds)
+
+ def next(self):
+ row = next(self.reader)
+ return [compat.text_type(s, "utf-8") for s in row]
+
+ # python 3 iterator
+ __next__ = next
+
+ def __iter__(self): # pragma: no cover
+ return self
+
+ class UnicodeWriter:
+
+ """
+ A CSV writer which will write rows to CSV file "f",
+ which is encoded in the given encoding.
+ """
+
+ def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
+ # Redirect output to a queue
+ self.queue = StringIO()
+ self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
+ self.stream = f
+ self.encoder = codecs.getincrementalencoder(encoding)()
+ self.quoting = kwds.get("quoting", None)
+
+ def writerow(self, row):
+ def _check_as_is(x):
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
+ is_number(x)) or isinstance(x, str)
+
+ row = [x if _check_as_is(x)
+ else pprint_thing(x).encode("utf-8") for x in row]
+
+ self.writer.writerow([s for s in row])
+ # Fetch UTF-8 output from the queue ...
+ data = self.queue.getvalue()
+ data = data.decode("utf-8")
+ # ... and reencode it into the target encoding
+ data = self.encoder.encode(data)
+ # write to the target stream
+ self.stream.write(data)
+ # empty queue
+ self.queue.truncate(0)
+
+ def writerows(self, rows):
+ def _check_as_is(x):
+ return (self.quoting == csv.QUOTE_NONNUMERIC and
+ is_number(x)) or isinstance(x, str)
+
+ for i, row in enumerate(rows):
+ rows[i] = [x if _check_as_is(x)
+ else pprint_thing(x).encode("utf-8") for x in row]
+
+ self.writer.writerows([[s for s in row] for row in rows])
+ # Fetch UTF-8 output from the queue ...
+ data = self.queue.getvalue()
+ data = data.decode("utf-8")
+ # ... and reencode it into the target encoding
+ data = self.encoder.encode(data)
+ # write to the target stream
+ self.stream.write(data)
+ # empty queue
+ self.queue.truncate(0)
\ No newline at end of file
diff --git a/pandas/io/data.py b/pandas/io/data.py
index 310b165101bdf..ac6f14e846bec 100644
--- a/pandas/io/data.py
+++ b/pandas/io/data.py
@@ -1024,7 +1024,7 @@ def _validate_expiry(self, expiry):
if expiry in expiry_dates:
return expiry
else:
- index = DatetimeIndex(expiry_dates).order()
+ index = DatetimeIndex(expiry_dates).sort_values()
return index[index.date >= expiry][0].date()
def get_forward_data(self, months, call=True, put=False, near=False,
diff --git a/pandas/io/excel.py b/pandas/io/excel.py
index a7a844cdfcb40..ffd2768c78824 100644
--- a/pandas/io/excel.py
+++ b/pandas/io/excel.py
@@ -708,7 +708,12 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
for cell in cells:
colletter = get_column_letter(startcol + cell.col + 1)
xcell = wks.cell("%s%s" % (colletter, startrow + cell.row + 1))
- xcell.value = _conv_value(cell.val)
+ if (isinstance(cell.val, compat.string_types)
+ and xcell.data_type_for_value(cell.val)
+ != xcell.TYPE_STRING):
+ xcell.set_value_explicit(cell.val)
+ else:
+ xcell.value = _conv_value(cell.val)
style = None
if cell.style:
style = self._convert_to_style(cell.style)
@@ -1240,7 +1245,7 @@ def write_cells(self, cells, sheet_name=None, startrow=0, startcol=0):
start_row=startrow + cell.row + 1,
start_column=startcol + cell.col + 1,
end_column=startcol + cell.mergeend + 1,
- end_row=startrow + cell.mergeend + 1
+ end_row=startrow + cell.mergestart + 1
)
# When cells are merged only the top-left cell is preserved
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
index b6b4081e3650f..a6f9c9ed9467f 100644
--- a/pandas/io/ga.py
+++ b/pandas/io/ga.py
@@ -20,6 +20,12 @@
from oauth2client.client import AccessTokenRefreshError
from pandas.compat import zip, u
+# GH11038
+import warnings
+warnings.warn("The pandas.io.ga module is deprecated and will be "
+ "removed in a future version.",
+ FutureWarning, stacklevel=2)
+
TYPE_MAP = {u('INTEGER'): int, u('FLOAT'): float, u('TIME'): int}
NO_CALLBACK = auth.OOB_CALLBACK_URN
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index e7241036b94c4..fff36a82529e3 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -511,7 +511,8 @@ def to_gbq(dataframe, destination_table, project_id, chunksize=10000,
connector.delete_and_recreate_table(dataset_id, table_id, table_schema, verbose)
elif if_exists == 'append':
if not connector.verify_schema(dataset_id, table_id, table_schema):
- raise InvalidSchema("The schema of the destination table does not match")
+ raise InvalidSchema("Please verify that the column order, structure and data types in the DataFrame "
+ "match the schema of the destination table.")
else:
table.create(table_id, table_schema)
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8ac1aed9d9af7..a9c7c1587ff43 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -17,7 +17,8 @@
from pandas.core.common import AbstractMethodError
from pandas.core.config import get_option
from pandas.io.date_converters import generic_parser
-from pandas.io.common import get_filepath_or_buffer, _validate_header_arg
+from pandas.io.common import (get_filepath_or_buffer, _validate_header_arg,
+ _get_handle, UnicodeReader, UTF8Recoder)
from pandas.tseries import tools
from pandas.util.decorators import Appender
@@ -865,17 +866,20 @@ def _extract_multi_indexer_columns(self, header, index_names, col_names,
# extract the columns
field_count = len(header[0])
-
+
def extract(r):
return tuple([r[i] for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
+ def tostr(x):
+ return str(x) if not isinstance(x, compat.string_types) else x
+
# if we find 'Unnamed' all of a single level, then our header was too
# long
for n in range(len(columns[0])):
- if all(['Unnamed' in c[n] for c in columns]):
+ if all(['Unnamed' in tostr(c[n]) for c in columns]):
raise _parser.CParserError(
"Passed header=[%s] are too many rows for this "
"multi_index of columns"
@@ -1084,7 +1088,7 @@ def __init__(self, src, **kwds):
if 'utf-16' in (kwds.get('encoding') or ''):
if isinstance(src, compat.string_types):
src = open(src, 'rb')
- src = com.UTF8Recoder(src, kwds['encoding'])
+ src = UTF8Recoder(src, kwds['encoding'])
kwds['encoding'] = 'utf-8'
# #2442
@@ -1420,7 +1424,7 @@ def __init__(self, f, **kwds):
self._comment_lines = []
if isinstance(f, compat.string_types):
- f = com._get_handle(f, 'r', encoding=self.encoding,
+ f = _get_handle(f, 'r', encoding=self.encoding,
compression=self.compression)
elif self.compression:
f = _wrap_compressed(f, self.compression, self.encoding)
@@ -1540,17 +1544,17 @@ class MyDialect(csv.Dialect):
dia.delimiter = sniffed.delimiter
if self.encoding is not None:
self.buf.extend(list(
- com.UnicodeReader(StringIO(line),
- dialect=dia,
- encoding=self.encoding)))
+ UnicodeReader(StringIO(line),
+ dialect=dia,
+ encoding=self.encoding)))
else:
self.buf.extend(list(csv.reader(StringIO(line),
dialect=dia)))
if self.encoding is not None:
- reader = com.UnicodeReader(f, dialect=dia,
- encoding=self.encoding,
- strict=True)
+ reader = UnicodeReader(f, dialect=dia,
+ encoding=self.encoding,
+ strict=True)
else:
reader = csv.reader(f, dialect=dia,
strict=True)
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 4de641bb67926..4e25b546bddf2 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1839,7 +1839,9 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
- block = block.fillna(nan_rep, downcast=False)[0]
+ block = block.fillna(nan_rep, downcast=False)
+ if isinstance(block, list):
+ block = block[0]
data = block.values
# see if we have a valid string type
diff --git a/pandas/io/tests/data/testmultiindex.xls b/pandas/io/tests/data/testmultiindex.xls
index 3664c5c8dedcc..51ef0f6c04cba 100644
Binary files a/pandas/io/tests/data/testmultiindex.xls and b/pandas/io/tests/data/testmultiindex.xls differ
diff --git a/pandas/io/tests/data/testmultiindex.xlsm b/pandas/io/tests/data/testmultiindex.xlsm
index 8f359782b57bb..28c92a5f0be38 100644
Binary files a/pandas/io/tests/data/testmultiindex.xlsm and b/pandas/io/tests/data/testmultiindex.xlsm differ
diff --git a/pandas/io/tests/data/testmultiindex.xlsx b/pandas/io/tests/data/testmultiindex.xlsx
index a70110caf1ec7..815f3b07342ca 100644
Binary files a/pandas/io/tests/data/testmultiindex.xlsx and b/pandas/io/tests/data/testmultiindex.xlsx differ
diff --git a/pandas/io/tests/data/testskiprows.xls b/pandas/io/tests/data/testskiprows.xls
new file mode 100644
index 0000000000000..21ccd30ec62da
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xls differ
diff --git a/pandas/io/tests/data/testskiprows.xlsm b/pandas/io/tests/data/testskiprows.xlsm
new file mode 100644
index 0000000000000..f5889ded4637a
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xlsm differ
diff --git a/pandas/io/tests/data/testskiprows.xlsx b/pandas/io/tests/data/testskiprows.xlsx
new file mode 100644
index 0000000000000..2d7ce943a7214
Binary files /dev/null and b/pandas/io/tests/data/testskiprows.xlsx differ
diff --git a/pandas/io/tests/test_common.py b/pandas/io/tests/test_common.py
index 03d1e4fb1f365..73cae1130c740 100644
--- a/pandas/io/tests/test_common.py
+++ b/pandas/io/tests/test_common.py
@@ -5,10 +5,20 @@
import os
from os.path import isabs
+import nose
import pandas.util.testing as tm
from pandas.io import common
+try:
+ from pathlib import Path
+except ImportError:
+ pass
+
+try:
+ from py.path import local as LocalPath
+except ImportError:
+ pass
class TestCommonIOCapabilities(tm.TestCase):
@@ -27,6 +37,22 @@ def test_expand_user_normal_path(self):
self.assertEqual(expanded_name, filename)
self.assertEqual(os.path.expanduser(filename), expanded_name)
+ def test_stringify_path_pathlib(self):
+ tm._skip_if_no_pathlib()
+
+ rel_path = common._stringify_path(Path('.'))
+ self.assertEqual(rel_path, '.')
+ redundant_path = common._stringify_path(Path('foo//bar'))
+ self.assertEqual(redundant_path, os.path.join('foo', 'bar'))
+
+ def test_stringify_path_localpath(self):
+ tm._skip_if_no_localpath()
+
+ path = os.path.join('foo', 'bar')
+ abs_path = os.path.abspath(path)
+ lpath = LocalPath(path)
+ self.assertEqual(common._stringify_path(lpath), abs_path)
+
def test_get_filepath_or_buffer_with_path(self):
filename = '~/sometest'
filepath_or_buffer, _, _ = common.get_filepath_or_buffer(filename)
diff --git a/pandas/io/tests/test_data.py b/pandas/io/tests/test_data.py
index ad0e05f91d184..afc61dc42f569 100644
--- a/pandas/io/tests/test_data.py
+++ b/pandas/io/tests/test_data.py
@@ -9,12 +9,15 @@
import numpy as np
import pandas as pd
from pandas import DataFrame, Timestamp
-from pandas.io import data as web
-from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
from pandas.util.testing import (assert_series_equal, assert_produces_warning,
network, assert_frame_equal)
import pandas.util.testing as tm
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ from pandas.io import data as web
+
+from pandas.io.data import DataReader, SymbolWarning, RemoteDataError, _yahoo_codes
+
if compat.PY3:
from urllib.error import HTTPError
else:
@@ -103,13 +106,15 @@ def test_get_multi1(self):
@network
def test_get_multi_invalid(self):
sl = ['AAPL', 'AMZN', 'INVALID']
- pan = web.get_data_google(sl, '2012')
- self.assertIn('INVALID', pan.minor_axis)
+ with tm.assert_produces_warning(SymbolWarning):
+ pan = web.get_data_google(sl, '2012')
+ self.assertIn('INVALID', pan.minor_axis)
@network
def test_get_multi_all_invalid(self):
sl = ['INVALID', 'INVALID2', 'INVALID3']
- self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012')
+ with tm.assert_produces_warning(SymbolWarning):
+ self.assertRaises(RemoteDataError, web.get_data_google, sl, '2012')
@network
def test_get_multi2(self):
@@ -291,6 +296,7 @@ def test_get_date_ret_index(self):
class TestYahooOptions(tm.TestCase):
+
@classmethod
def setUpClass(cls):
super(TestYahooOptions, cls).setUpClass()
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 40cbd97ea539f..4cb62edf71b1c 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -6,6 +6,7 @@
import os
from distutils.version import LooseVersion
+import warnings
import operator
import functools
import nose
@@ -557,6 +558,12 @@ def test_read_excel_multiindex(self):
actual = read_excel(mi_file, 'mi_column_name', header=[0,1], index_col=0)
tm.assert_frame_equal(actual, expected)
+ # Issue #11317
+ expected.columns = mi.set_levels([1,2],level=1).set_names(['c1', 'c2'])
+ actual = read_excel(mi_file, 'name_with_int', index_col=0, header=[0,1])
+ tm.assert_frame_equal(actual, expected)
+
+ expected.columns = mi.set_names(['c1', 'c2'])
expected.index = mi.set_names(['ilvl1', 'ilvl2'])
actual = read_excel(mi_file, 'both_name', index_col=[0,1], header=[0,1])
tm.assert_frame_equal(actual, expected)
@@ -660,6 +667,21 @@ def test_read_excel_chunksize(self):
pd.read_excel(os.path.join(self.dirpath, 'test1' + self.ext),
chunksize=100)
+ def test_read_excel_skiprows_list(self):
+ #GH 4903
+ actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=[0,2])
+ expected = DataFrame([[1, 2.5, pd.Timestamp('2015-01-01'), True],
+ [2, 3.5, pd.Timestamp('2015-01-02'), False],
+ [3, 4.5, pd.Timestamp('2015-01-03'), False],
+ [4, 5.5, pd.Timestamp('2015-01-04'), True]],
+ columns = ['a','b','c','d'])
+ tm.assert_frame_equal(actual, expected)
+
+ actual = pd.read_excel(os.path.join(self.dirpath, 'testskiprows' + self.ext),
+ 'skiprows_list', skiprows=np.array([0,2]))
+ tm.assert_frame_equal(actual, expected)
+
class XlsReaderTests(XlrdTests, tm.TestCase):
ext = '.xls'
engine_name = 'xlrd'
@@ -1067,7 +1089,38 @@ def test_to_excel_multiindex(self):
df = read_excel(reader, 'test1', index_col=[0, 1],
parse_dates=False)
tm.assert_frame_equal(frame, df)
- self.assertEqual(frame.index.names, df.index.names)
+
+ # Test for Issue 11328. If column indices are integers, make
+ # sure they are handled correctly for either setting of
+ # merge_cells
+ def test_to_excel_multiindex_cols(self):
+ _skip_if_no_xlrd()
+
+ frame = self.frame
+ arrays = np.arange(len(frame.index) * 2).reshape(2, -1)
+ new_index = MultiIndex.from_arrays(arrays,
+ names=['first', 'second'])
+ frame.index = new_index
+
+ new_cols_index = MultiIndex.from_tuples([(40, 1), (40, 2),
+ (50, 1), (50, 2)])
+ frame.columns = new_cols_index
+ header = [0, 1]
+ if not self.merge_cells:
+ header = 0
+
+ with ensure_clean(self.ext) as path:
+ # round trip
+ frame.to_excel(path, 'test1', merge_cells=self.merge_cells)
+ reader = ExcelFile(path)
+ df = read_excel(reader, 'test1', header=header,
+ index_col=[0, 1],
+ parse_dates=False)
+ if not self.merge_cells:
+ fm = frame.columns.format(sparsify=False,
+ adjoin=False, names=False)
+ frame.columns = [ ".".join(map(str, q)) for q in zip(*fm) ]
+ tm.assert_frame_equal(frame, df)
def test_to_excel_multiindex_dates(self):
_skip_if_no_xlrd()
@@ -1814,7 +1867,6 @@ def test_column_format(self):
# Applicable to xlsxwriter only.
_skip_if_no_xlsxwriter()
- import warnings
with warnings.catch_warnings():
# Ignore the openpyxl lxml warning.
warnings.simplefilter("ignore")
diff --git a/pandas/io/tests/test_ga.py b/pandas/io/tests/test_ga.py
index 13d31b43ac39a..965b3441d7405 100644
--- a/pandas/io/tests/test_ga.py
+++ b/pandas/io/tests/test_ga.py
@@ -1,6 +1,7 @@
import os
from datetime import datetime
+import warnings
import nose
import pandas as pd
from pandas import compat
@@ -13,7 +14,12 @@
try:
import httplib2
- import pandas.io.ga as ga
+ import apiclient
+
+ # deprecated
+ with warnings.catch_warnings(record=True):
+ import pandas.io.ga as ga
+
from pandas.io.ga import GAnalytics, read_ga
from pandas.io.auth import AuthenticationConfigError, reset_default_token_store
from pandas.io import auth
diff --git a/pandas/io/tests/test_packers.py b/pandas/io/tests/test_packers.py
index 894b699281c80..3434afc4129c4 100644
--- a/pandas/io/tests/test_packers.py
+++ b/pandas/io/tests/test_packers.py
@@ -461,20 +461,21 @@ def test_sparse_frame(self):
def test_sparse_panel(self):
- items = ['x', 'y', 'z']
- p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
- sp = p.to_sparse()
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ items = ['x', 'y', 'z']
+ p = Panel(dict((i, tm.makeDataFrame().ix[:2, :2]) for i in items))
+ sp = p.to_sparse()
- self._check_roundtrip(sp, tm.assert_panel_equal,
- check_panel_type=True)
+ self._check_roundtrip(sp, tm.assert_panel_equal,
+ check_panel_type=True)
- sp2 = p.to_sparse(kind='integer')
- self._check_roundtrip(sp2, tm.assert_panel_equal,
- check_panel_type=True)
+ sp2 = p.to_sparse(kind='integer')
+ self._check_roundtrip(sp2, tm.assert_panel_equal,
+ check_panel_type=True)
- sp3 = p.to_sparse(fill_value=0)
- self._check_roundtrip(sp3, tm.assert_panel_equal,
- check_panel_type=True)
+ sp3 = p.to_sparse(fill_value=0)
+ self._check_roundtrip(sp3, tm.assert_panel_equal,
+ check_panel_type=True)
class TestCompression(TestPackers):
diff --git a/pandas/io/tests/test_wb.py b/pandas/io/tests/test_wb.py
index 51d6ac02f0f20..ef72ad4964ff2 100644
--- a/pandas/io/tests/test_wb.py
+++ b/pandas/io/tests/test_wb.py
@@ -5,9 +5,11 @@
from pandas.util.testing import network
from pandas.util.testing import assert_frame_equal
from numpy.testing.decorators import slow
-from pandas.io.wb import search, download, get_countries
import pandas.util.testing as tm
+# deprecated
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ from pandas.io.wb import search, download, get_countries
class TestWB(tm.TestCase):
diff --git a/pandas/lib.pyx b/pandas/lib.pyx
index 2b4974155d44c..74842d9a165fe 100644
--- a/pandas/lib.pyx
+++ b/pandas/lib.pyx
@@ -269,6 +269,18 @@ cpdef checknull_old(object val):
else:
return util._checknull(val)
+cpdef isposinf_scalar(object val):
+ if util.is_float_object(val) and val == INF:
+ return True
+ else:
+ return False
+
+cpdef isneginf_scalar(object val):
+ if util.is_float_object(val) and val == NEGINF:
+ return True
+ else:
+ return False
+
def isscalar(object val):
"""
Return True if given value is scalar.
diff --git a/pandas/rpy/tests/test_common.py b/pandas/rpy/tests/test_common.py
index a2e6d08d07b58..4b579e9263742 100644
--- a/pandas/rpy/tests/test_common.py
+++ b/pandas/rpy/tests/test_common.py
@@ -6,6 +6,7 @@
import numpy as np
import unittest
import nose
+import warnings
import pandas.util.testing as tm
try:
diff --git a/pandas/sparse/array.py b/pandas/sparse/array.py
index b765fdb8d67be..f275a34ca90db 100644
--- a/pandas/sparse/array.py
+++ b/pandas/sparse/array.py
@@ -283,7 +283,15 @@ def __getitem__(self, key):
if com.is_integer(key):
return self._get_val_at(key)
else:
- data_slice = self.values[key]
+ if isinstance(key, SparseArray):
+ key = np.asarray(key)
+ if hasattr(key,'__len__') and len(self) != len(key):
+ indices = self.sp_index
+ if hasattr(indices,'to_int_index'):
+ indices = indices.to_int_index()
+ data_slice = self.values.take(indices.indices)[key]
+ else:
+ data_slice = self.values[key]
return self._constructor(data_slice)
def __getslice__(self, i, j):
@@ -513,7 +521,12 @@ def make_sparse(arr, kind='block', fill_value=nan):
else:
mask = arr != fill_value
- indices = np.arange(length, dtype=np.int32)[mask]
+ length = len(arr)
+ if length != mask.size:
+ # the arr is a SparseArray
+ indices = mask.sp_index.indices
+ else:
+ indices = np.arange(length, dtype=np.int32)[mask]
if kind == 'block':
locs, lens = splib.get_blocks(indices)
diff --git a/pandas/sparse/tests/test_sparse.py b/pandas/sparse/tests/test_sparse.py
index a86942718091c..9ce08c550dd0d 100644
--- a/pandas/sparse/tests/test_sparse.py
+++ b/pandas/sparse/tests/test_sparse.py
@@ -39,10 +39,6 @@
from pandas.sparse.tests.test_array import assert_sp_array_equal
-import warnings
-warnings.filterwarnings(action='ignore', category=FutureWarning)
-
-
def _test_data1():
# nan-based
arr = np.arange(20, dtype=float)
@@ -503,15 +499,6 @@ def check(a, b):
result = self.bseries + self.bseries.to_dense()
assert_sp_series_equal(result, self.bseries + self.bseries)
- # @dec.knownfailureif(True, 'Known NumPy failer as of 1.5.1')
- def test_operators_corner2(self):
- raise nose.SkipTest('known failer on numpy 1.5.1')
-
- # NumPy circumvents __r*__ operations
- val = np.float64(3.0)
- result = val - self.zbseries
- assert_sp_series_equal(result, 3 - self.zbseries)
-
def test_binary_operators(self):
# skipping for now #####
@@ -1778,20 +1765,23 @@ def setUp(self):
'ItemC': panel_data3(),
'ItemD': panel_data1(),
}
- self.panel = SparsePanel(self.data_dict)
+ with tm.assert_produces_warning(FutureWarning):
+ self.panel = SparsePanel(self.data_dict)
@staticmethod
def _test_op(panel, op):
# arithmetic tests
- result = op(panel, 1)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = op(panel, 1)
assert_sp_frame_equal(result['ItemA'], op(panel['ItemA'], 1))
def test_constructor(self):
- self.assertRaises(ValueError, SparsePanel, self.data_dict,
- items=['Item0', 'ItemA', 'ItemB'])
- with tm.assertRaisesRegexp(TypeError,
- "input must be a dict, a 'list' was passed"):
- SparsePanel(['a', 'b', 'c'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.assertRaises(ValueError, SparsePanel, self.data_dict,
+ items=['Item0', 'ItemA', 'ItemB'])
+ with tm.assertRaisesRegexp(TypeError,
+ "input must be a dict, a 'list' was passed"):
+ SparsePanel(['a', 'b', 'c'])
# deprecation GH11157
def test_deprecation(self):
@@ -1800,13 +1790,15 @@ def test_deprecation(self):
# GH 9272
def test_constructor_empty(self):
- sp = SparsePanel()
+ with tm.assert_produces_warning(FutureWarning):
+ sp = SparsePanel()
self.assertEqual(len(sp.items), 0)
self.assertEqual(len(sp.major_axis), 0)
self.assertEqual(len(sp.minor_axis), 0)
def test_from_dict(self):
- fd = SparsePanel.from_dict(self.data_dict)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ fd = SparsePanel.from_dict(self.data_dict)
assert_sp_panel_equal(fd, self.panel)
def test_pickle(self):
@@ -1830,21 +1822,25 @@ def test_to_dense(self):
assert_panel_equal(dwp, dwp2)
def test_to_frame(self):
- def _compare_with_dense(panel):
- slp = panel.to_frame()
- dlp = panel.to_dense().to_frame()
- self.assert_numpy_array_equal(slp.values, dlp.values)
- self.assertTrue(slp.index.equals(dlp.index))
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+
+ def _compare_with_dense(panel):
+ slp = panel.to_frame()
+ dlp = panel.to_dense().to_frame()
- _compare_with_dense(self.panel)
- _compare_with_dense(self.panel.reindex(items=['ItemA']))
+ self.assert_numpy_array_equal(slp.values, dlp.values)
+ self.assertTrue(slp.index.equals(dlp.index))
- zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
- self.assertRaises(Exception, zero_panel.to_frame)
+ _compare_with_dense(self.panel)
+ _compare_with_dense(self.panel.reindex(items=['ItemA']))
- self.assertRaises(Exception, self.panel.to_frame,
- filter_observations=False)
+ with tm.assert_produces_warning(FutureWarning):
+ zero_panel = SparsePanel(self.data_dict, default_fill_value=0)
+ self.assertRaises(Exception, zero_panel.to_frame)
+
+ self.assertRaises(Exception, self.panel.to_frame,
+ filter_observations=False)
def test_long_to_wide_sparse(self):
pass
@@ -1885,47 +1881,53 @@ def test_delitem_pop(self):
self.assertRaises(KeyError, self.panel.__delitem__, 'ItemC')
def test_copy(self):
- cop = self.panel.copy()
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ cop = self.panel.copy()
assert_sp_panel_equal(cop, self.panel)
def test_reindex(self):
- def _compare_with_dense(swp, items, major, minor):
- swp_re = swp.reindex(items=items, major=major,
- minor=minor)
- dwp_re = swp.to_dense().reindex(items=items, major=major,
- minor=minor)
- assert_panel_equal(swp_re.to_dense(), dwp_re)
-
- _compare_with_dense(self.panel, self.panel.items[:2],
- self.panel.major_axis[::2],
- self.panel.minor_axis[::2])
- _compare_with_dense(self.panel, None,
- self.panel.major_axis[::2],
- self.panel.minor_axis[::2])
-
- self.assertRaises(ValueError, self.panel.reindex)
-
- # TODO: do something about this later...
- self.assertRaises(Exception, self.panel.reindex,
- items=['item0', 'ItemA', 'ItemB'])
-
- # test copying
- cp = self.panel.reindex(self.panel.major_axis, copy=True)
- cp['ItemA']['E'] = cp['ItemA']['A']
- self.assertNotIn('E', self.panel['ItemA'])
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+
+ def _compare_with_dense(swp, items, major, minor):
+ swp_re = swp.reindex(items=items, major=major,
+ minor=minor)
+ dwp_re = swp.to_dense().reindex(items=items, major=major,
+ minor=minor)
+ assert_panel_equal(swp_re.to_dense(), dwp_re)
+
+ _compare_with_dense(self.panel, self.panel.items[:2],
+ self.panel.major_axis[::2],
+ self.panel.minor_axis[::2])
+ _compare_with_dense(self.panel, None,
+ self.panel.major_axis[::2],
+ self.panel.minor_axis[::2])
+
+ self.assertRaises(ValueError, self.panel.reindex)
+
+ # TODO: do something about this later...
+ self.assertRaises(Exception, self.panel.reindex,
+ items=['item0', 'ItemA', 'ItemB'])
+
+ # test copying
+ cp = self.panel.reindex(self.panel.major_axis, copy=True)
+ cp['ItemA']['E'] = cp['ItemA']['A']
+ self.assertNotIn('E', self.panel['ItemA'])
def test_operators(self):
def _check_ops(panel):
+
def _dense_comp(op):
- dense = panel.to_dense()
- sparse_result = op(panel)
- dense_result = op(dense)
- assert_panel_equal(sparse_result.to_dense(), dense_result)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ dense = panel.to_dense()
+ sparse_result = op(panel)
+ dense_result = op(dense)
+ assert_panel_equal(sparse_result.to_dense(), dense_result)
def _mixed_comp(op):
- result = op(panel, panel.to_dense())
- expected = op(panel.to_dense(), panel.to_dense())
- assert_panel_equal(result, expected)
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ result = op(panel, panel.to_dense())
+ expected = op(panel.to_dense(), panel.to_dense())
+ assert_panel_equal(result, expected)
op1 = lambda x: x + 2
diff --git a/pandas/src/datetime.pxd b/pandas/src/datetime.pxd
index 0896965162698..f2f764c785894 100644
--- a/pandas/src/datetime.pxd
+++ b/pandas/src/datetime.pxd
@@ -95,14 +95,14 @@ cdef extern from "datetime/np_datetime.h":
int apply_tzinfo)
npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr,
- pandas_datetimestruct *d)
+ pandas_datetimestruct *d) nogil
void pandas_datetime_to_datetimestruct(npy_datetime val,
PANDAS_DATETIMEUNIT fr,
- pandas_datetimestruct *result)
+ pandas_datetimestruct *result) nogil
int days_per_month_table[2][12]
- int dayofweek(int y, int m, int d)
- int is_leapyear(int64_t year)
+ int dayofweek(int y, int m, int d) nogil
+ int is_leapyear(int64_t year) nogil
PANDAS_DATETIMEUNIT get_datetime64_unit(object o)
cdef extern from "datetime/np_datetime_strings.h":
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index 2a7c2135f8045..b431bb58bc991 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -76,11 +76,11 @@ cdef extern from "period_helper.h":
int64_t get_period_ordinal(int year, int month, int day,
int hour, int minute, int second, int microseconds, int picoseconds,
- int freq) except INT32_MIN
+ int freq) nogil except INT32_MIN
int64_t get_python_ordinal(int64_t period_ordinal, int freq) except INT32_MIN
- int get_date_info(int64_t ordinal, int freq, date_info *dinfo) except INT32_MIN
+ int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil except INT32_MIN
double getAbsTime(int, int64_t, int64_t)
int pyear(int64_t ordinal, int freq) except INT32_MIN
@@ -139,13 +139,14 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
out = np.empty(l, dtype='i8')
if tz is None:
- for i in range(l):
- if dtarr[i] == iNaT:
- out[i] = iNaT
- continue
- pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
- out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ with nogil:
+ for i in range(l):
+ if dtarr[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
+ out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
else:
out = localize_dt64arr_to_period(dtarr, freq, tz)
return out
@@ -163,11 +164,12 @@ def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
out = np.empty(l, dtype='i8')
- for i in range(l):
- if periodarr[i] == iNaT:
- out[i] = iNaT
- continue
- out[i] = period_ordinal_to_dt64(periodarr[i], freq)
+ with nogil:
+ for i in range(l):
+ if periodarr[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+ out[i] = period_ordinal_to_dt64(periodarr[i], freq)
return out
@@ -245,13 +247,13 @@ def period_ordinal(int y, int m, int d, int h, int min, int s, int us, int ps, i
return get_period_ordinal(y, m, d, h, min, s, us, ps, freq)
-cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq):
+cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil:
cdef:
pandas_datetimestruct dts
date_info dinfo
float subsecond_fraction
- if ordinal == iNaT:
+ if ordinal == NPY_NAT:
return NPY_NAT
get_date_info(ordinal, freq, &dinfo)
diff --git a/pandas/src/period_helper.c b/pandas/src/period_helper.c
index 032bc44de6355..e056b1fa9a522 100644
--- a/pandas/src/period_helper.c
+++ b/pandas/src/period_helper.c
@@ -113,7 +113,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
int yearoffset;
/* Range check */
- Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366),
+ Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366),
PyExc_ValueError,
"year out of range: %i",
year);
@@ -136,7 +136,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
day);
yearoffset = dInfoCalc_YearOffset(year, calendar);
- if (PyErr_Occurred()) goto onError;
+ if (yearoffset == INT_ERR_CODE) goto onError;
absdate = day + month_offset[leap][month - 1] + yearoffset;
@@ -155,7 +155,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
/* Calculate the absolute time */
{
- Py_AssertWithArg(hour >= 0 && hour <= 23,
+ Py_AssertWithArg(hour >= 0 && hour <= 23,
PyExc_ValueError,
"hour out of range (0-23): %i",
hour);
@@ -212,8 +212,7 @@ int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo,
while (1) {
/* Calculate the year offset */
yearoffset = dInfoCalc_YearOffset(year, calendar);
- if (PyErr_Occurred())
- goto onError;
+ if (yearoffset == INT_ERR_CODE) goto onError;
/* Backward correction: absdate must be greater than the
yearoffset */
@@ -310,7 +309,7 @@ static int calc_conversion_factors_matrix_size() {
}
matrix_size = max_value(matrix_size, period_value);
}
- return matrix_size + 1;
+ return matrix_size + 1;
}
static void alloc_conversion_factors_matrix(int matrix_size) {
diff --git a/pandas/stats/tests/test_moments.py b/pandas/stats/tests/test_moments.py
index 3615cc3dc8ad8..e2ed27156d2b5 100644
--- a/pandas/stats/tests/test_moments.py
+++ b/pandas/stats/tests/test_moments.py
@@ -43,7 +43,6 @@ class TestMoments(Base):
def setUp(self):
self._create_data()
- warnings.simplefilter("ignore", category=FutureWarning)
def test_centered_axis_validation(self):
# ok
@@ -887,7 +886,6 @@ def _create_data(self):
def setUp(self):
self._create_data()
- warnings.simplefilter("ignore", category=FutureWarning)
def _test_moments_consistency(self,
min_periods,
@@ -1513,9 +1511,6 @@ def test_rolling_functions_window_non_shrinkage(self):
functions = [lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
- # rolling_corr_pairwise is depracated, so the following line should be deleted
- # when rolling_corr_pairwise is removed.
- lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df_result_panel = f(df)
@@ -1582,9 +1577,6 @@ def test_moment_functions_zero_length(self):
lambda x: mom.expanding_corr(x, x, pairwise=True, min_periods=5),
lambda x: mom.rolling_cov(x, x, pairwise=True, window=10, min_periods=5),
lambda x: mom.rolling_corr(x, x, pairwise=True, window=10, min_periods=5),
- # rolling_corr_pairwise is depracated, so the following line should be deleted
- # when rolling_corr_pairwise is removed.
- lambda x: mom.rolling_corr_pairwise(x, x, window=10, min_periods=5),
]
for f in functions:
df1_result_panel = f(df1)
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index bf2cfc6216a60..140b54225b8e8 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2952,6 +2952,12 @@ def test_to_csv_date_format(self):
self.assertEqual(df_day.to_csv(), expected_default_day)
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d'), expected_default_day)
+ # deprecation GH11274
+ def test_to_csv_engine_kw_deprecation(self):
+ with tm.assert_produces_warning(FutureWarning):
+ df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
+ df.to_csv(engine='python')
+
def test_round_dataframe(self):
# GH 2665
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 8a9afcb7d1291..dc0e0e2670565 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -381,15 +381,11 @@ def test_getitem_boolean(self):
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
- import warnings
- warnings.filterwarnings(action='ignore', category=UserWarning)
-
- indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
-
- subframe_obj = self.tsframe[indexer_obj]
- assert_frame_equal(subframe_obj, subframe)
+ with tm.assert_produces_warning(UserWarning):
+ indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
- warnings.filterwarnings(action='default', category=UserWarning)
+ subframe_obj = self.tsframe[indexer_obj]
+ assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [ self.tsframe, self.mixed_frame, self.mixed_float, self.mixed_int ]:
@@ -488,6 +484,18 @@ def test_getitem_ix_mixed_integer(self):
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
+ # 11320
+ df = pd.DataFrame({ "rna": (1.5,2.2,3.2,4.5),
+ -1000: [11,21,36,40],
+ 0: [10,22,43,34],
+ 1000:[0, 10, 20, 30] },columns=['rna',-1000,0,1000])
+ result = df[[1000]]
+ expected = df.iloc[:,[3]]
+ assert_frame_equal(result, expected)
+ result = df[[-1000]]
+ expected = df.iloc[:,[1]]
+ assert_frame_equal(result, expected)
+
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
@@ -4716,6 +4724,58 @@ def test_to_dict(self):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
+ def test_to_dict_timestamp(self):
+
+ # GH11247
+ # split/records producing np.datetime64 rather than Timestamps
+ # on datetime64[ns] dtypes only
+
+ tsmp = Timestamp('20130101')
+ test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
+ test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
+
+ expected_records = [{'A': tsmp, 'B': tsmp},
+ {'A': tsmp, 'B': tsmp}]
+ expected_records_mixed = [{'A': tsmp, 'B': 1},
+ {'A': tsmp, 'B': 2}]
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='records'), expected_records)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='records'), expected_records_mixed)
+
+ expected_series = {
+ 'A': Series([tsmp, tsmp]),
+ 'B': Series([tsmp, tsmp]),
+ }
+ expected_series_mixed = {
+ 'A': Series([tsmp, tsmp]),
+ 'B': Series([1, 2]),
+ }
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='series'), expected_series)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='series'), expected_series_mixed)
+
+ expected_split = {
+ 'index': [0, 1],
+ 'data': [[tsmp, tsmp],
+ [tsmp, tsmp]],
+ 'columns': ['A', 'B']
+ }
+ expected_split_mixed = {
+ 'index': [0, 1],
+ 'data': [[tsmp, 1],
+ [tsmp, 2]],
+ 'columns': ['A', 'B']
+ }
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='split'), expected_split)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='split'), expected_split_mixed)
+
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
@@ -5779,7 +5839,7 @@ def check(df):
def f():
df.loc[:,np.nan]
- self.assertRaises(ValueError, f)
+ self.assertRaises(TypeError, f)
df = DataFrame([[1,2,3],[4,5,6]], index=[1,np.nan])
@@ -6618,31 +6678,25 @@ def test_to_csv_cols_reordering(self):
# GH3454
import pandas as pd
- def _check_df(df,cols=None):
- with ensure_clean() as path:
- df.to_csv(path,columns = cols,engine='python')
- rs_p = pd.read_csv(path,index_col=0)
- df.to_csv(path,columns = cols,chunksize=chunksize)
- rs_c = pd.read_csv(path,index_col=0)
-
- if cols:
- df = df[cols]
- assert (rs_c.columns==rs_p.columns).all()
- assert_frame_equal(df,rs_c,check_names=False)
-
chunksize=5
N = int(chunksize*2.5)
df= mkdf(N, 3)
cs = df.columns
cols = [cs[2],cs[0]]
- _check_df(df,cols)
+
+ with ensure_clean() as path:
+ df.to_csv(path,columns = cols,chunksize=chunksize)
+ rs_c = pd.read_csv(path,index_col=0)
+
+ assert_frame_equal(df[cols],rs_c,check_names=False)
def test_to_csv_legacy_raises_on_dupe_cols(self):
df= mkdf(10, 3)
df.columns = ['a','a','b']
with ensure_clean() as path:
- self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
+ with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ self.assertRaises(NotImplementedError,df.to_csv,path,engine='python')
def test_to_csv_new_dupe_cols(self):
import pandas as pd
@@ -7163,6 +7217,7 @@ def test_to_csv_chunking(self):
rs = read_csv(filename,index_col=0)
assert_frame_equal(rs, aa)
+ @slow
def test_to_csv_wide_frame_formatting(self):
# Issue #8621
df = DataFrame(np.random.randn(1, 100010), columns=None, index=None)
@@ -9400,18 +9455,20 @@ def test_regex_replace_dict_nested(self):
def test_regex_replace_dict_nested_gh4115(self):
df = pd.DataFrame({'Type':['Q','T','Q','Q','T'], 'tmp':2})
expected = DataFrame({'Type': [0,1,0,0,1], 'tmp': 2})
- assert_frame_equal(df.replace({'Type': {'Q':0,'T':1}}), expected)
+ result = df.replace({'Type': {'Q':0,'T':1}})
+ assert_frame_equal(result, expected)
def test_regex_replace_list_to_scalar(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
+ expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
+ 'c': [nan, nan, nan, 'd']})
+
res = df.replace([r'\s*\.\s*', 'a|b'], nan, regex=True)
res2 = df.copy()
res3 = df.copy()
res2.replace([r'\s*\.\s*', 'a|b'], nan, regex=True, inplace=True)
res3.replace(regex=[r'\s*\.\s*', 'a|b'], value=nan, inplace=True)
- expec = DataFrame({'a': mix['a'], 'b': np.array([nan] * 4),
- 'c': [nan, nan, nan, 'd']})
assert_frame_equal(res, expec)
assert_frame_equal(res2, expec)
assert_frame_equal(res3, expec)
@@ -9465,8 +9522,8 @@ def test_regex_replace_series_of_regexes(self):
def test_regex_replace_numeric_to_object_conversion(self):
mix = {'a': lrange(4), 'b': list('ab..'), 'c': ['a', 'b', nan, 'd']}
df = DataFrame(mix)
- res = df.replace(0, 'a')
expec = DataFrame({'a': ['a', 1, 2, 3], 'b': mix['b'], 'c': mix['c']})
+ res = df.replace(0, 'a')
assert_frame_equal(res, expec)
self.assertEqual(res.a.dtype, np.object_)
@@ -9895,6 +9952,56 @@ def test_replace_datetime(self):
result = df.replace(d)
tm.assert_frame_equal(result, expected)
+ def test_replace_datetimetz(self):
+
+ # GH 11326
+ # behaving poorly when presented with a datetime64[ns, tz]
+ df = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : [0, np.nan, 2]})
+ result = df.replace(np.nan,1)
+ expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : Series([0, 1, 2],dtype='float64')})
+ assert_frame_equal(result, expected)
+
+ result = df.fillna(1)
+ assert_frame_equal(result, expected)
+
+ result = df.replace(0,np.nan)
+ expected = DataFrame({'A' : date_range('20130101',periods=3,tz='US/Eastern'),
+ 'B' : [np.nan, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.replace(Timestamp('20130102',tz='US/Eastern'),Timestamp('20130104',tz='US/Eastern'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104',tz='US/Eastern'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Eastern'))
+ assert_frame_equal(result, expected)
+
+ # coerce to object
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : pd.NaT }, Timestamp('20130104',tz='US/Pacific'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104',tz='US/Pacific'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
+ result = df.copy()
+ result.iloc[1,0] = np.nan
+ result = result.replace({'A' : np.nan }, Timestamp('20130104'))
+ expected = DataFrame({'A' : [Timestamp('20130101',tz='US/Eastern'),
+ Timestamp('20130104'),
+ Timestamp('20130103',tz='US/Eastern')],
+ 'B' : [0, np.nan, 2]})
+ assert_frame_equal(result, expected)
+
def test_combine_multiple_frames_dtypes(self):
# GH 2759
@@ -15198,10 +15305,14 @@ def test_to_csv_date_format(self):
pname = '__tmp_to_csv_date_format__'
with ensure_clean(pname) as path:
for engine in [None, 'python']:
+ w = FutureWarning if engine == 'python' else None
+
dt_index = self.tsframe.index
datetime_frame = DataFrame({'A': dt_index, 'B': dt_index.shift(1)}, index=dt_index)
- datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
+ with tm.assert_produces_warning(w, check_stacklevel=False):
+ datetime_frame.to_csv(path, date_format='%Y%m%d', engine=engine)
+
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
@@ -15210,7 +15321,9 @@ def test_to_csv_date_format(self):
assert_frame_equal(test, datetime_frame_int)
- datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
+ with tm.assert_produces_warning(w, check_stacklevel=False):
+ datetime_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
+
# Check that the data was put in the specified format
test = read_csv(path, index_col=0)
datetime_frame_str = datetime_frame.applymap(lambda x: x.strftime('%Y-%m-%d'))
@@ -15221,7 +15334,8 @@ def test_to_csv_date_format(self):
# Check that columns get converted
datetime_frame_columns = datetime_frame.T
- datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
+ with tm.assert_produces_warning(w, check_stacklevel=False):
+ datetime_frame_columns.to_csv(path, date_format='%Y%m%d', engine=engine)
test = read_csv(path, index_col=0)
@@ -15235,7 +15349,8 @@ def test_to_csv_date_format(self):
nat_index = to_datetime(['NaT'] * 10 + ['2000-01-01', '1/1/2000', '1-1-2000'])
nat_frame = DataFrame({'A': nat_index}, index=nat_index)
- nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
+ with tm.assert_produces_warning(w, check_stacklevel=False):
+ nat_frame.to_csv(path, date_format='%Y-%m-%d', engine=engine)
test = read_csv(path, parse_dates=[0, 1], index_col=0)
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 061382e0e16de..d29673e96ecdd 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -39,8 +39,7 @@ class Generic(object):
_multiprocess_can_split_ = True
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
+ pass
@property
def _ndim(self):
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index b2d8ff8ba0b00..b85f4628ae013 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -2689,6 +2689,18 @@ def test_line_colors(self):
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
+ # GH 10299
+ custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
+ ax = df.plot(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+ tm.close()
+
+ with tm.assertRaises(ValueError):
+ # Color contains shorthand hex value results in ValueError
+ custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
+ # Forced show plot
+ _check_plot_works(df.plot, color=custom_colors)
+
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
@@ -2725,6 +2737,20 @@ def test_line_colors_and_styles_subplots(self):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
+ # GH 10299
+ custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
+ axes = df.plot(color=custom_colors, subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ with tm.assertRaises(ValueError):
+ # Color contains shorthand hex value results in ValueError
+ custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
+ # Forced show plot
+ _check_plot_works(df.plot, color=custom_colors, subplots=True,
+ filterwarnings='ignore')
+
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
@@ -3143,6 +3169,7 @@ def test_pie_df_nan(self):
ax.get_legend().get_texts()],
base_expected[:i] + base_expected[i+1:])
+ @slow
def test_errorbar_plot(self):
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 8eb641ce8f494..46026a4c887a6 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -1655,6 +1655,7 @@ def check_nunique(df, keys):
check_nunique(frame, ['jim'])
check_nunique(frame, ['jim', 'joe'])
+ @slow
def test_series_groupby_value_counts(self):
from itertools import product
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 90f85b3f4576d..a2d789aaf8b70 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -25,6 +25,7 @@
import pandas.util.testing as tm
from pandas import date_range
+from numpy.testing.decorators import slow
_verbose = False
@@ -1689,74 +1690,71 @@ def test_multiindex_perf_warn(self):
with tm.assert_produces_warning(PerformanceWarning):
_ = df.loc[(0,)]
+ @slow
def test_multiindex_get_loc(self): # GH7724, GH2646
- # ignore the warning here
- warnings.simplefilter('ignore', PerformanceWarning)
+ with warnings.catch_warnings(record=True):
- # test indexing into a multi-index before & past the lexsort depth
- from numpy.random import randint, choice, randn
- cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
+ # test indexing into a multi-index before & past the lexsort depth
+ from numpy.random import randint, choice, randn
+ cols = ['jim', 'joe', 'jolie', 'joline', 'jolia']
- def validate(mi, df, key):
- mask = np.ones(len(df)).astype('bool')
+ def validate(mi, df, key):
+ mask = np.ones(len(df)).astype('bool')
- # test for all partials of this key
- for i, k in enumerate(key):
- mask &= df.iloc[:, i] == k
+ # test for all partials of this key
+ for i, k in enumerate(key):
+ mask &= df.iloc[:, i] == k
- if not mask.any():
- self.assertNotIn(key[:i+1], mi.index)
- continue
-
- self.assertIn(key[:i+1], mi.index)
- right = df[mask].copy()
+ if not mask.any():
+ self.assertNotIn(key[:i+1], mi.index)
+ continue
- if i + 1 != len(key): # partial key
- right.drop(cols[:i+1], axis=1, inplace=True)
- right.set_index(cols[i+1:-1], inplace=True)
- assert_frame_equal(mi.loc[key[:i+1]], right)
+ self.assertIn(key[:i+1], mi.index)
+ right = df[mask].copy()
- else: # full key
- right.set_index(cols[:-1], inplace=True)
- if len(right) == 1: # single hit
- right = Series(right['jolia'].values,
- name=right.index[0], index=['jolia'])
- assert_series_equal(mi.loc[key[:i+1]], right)
- else: # multi hit
+ if i + 1 != len(key): # partial key
+ right.drop(cols[:i+1], axis=1, inplace=True)
+ right.set_index(cols[i+1:-1], inplace=True)
assert_frame_equal(mi.loc[key[:i+1]], right)
- def loop(mi, df, keys):
- for key in keys:
- validate(mi, df, key)
-
- n, m = 1000, 50
-
- vals = [randint(0, 10, n), choice(list('abcdefghij'), n),
- choice(pd.date_range('20141009', periods=10).tolist(), n),
- choice(list('ZYXWVUTSRQ'), n), randn(n)]
- vals = list(map(tuple, zip(*vals)))
-
- # bunch of keys for testing
- keys = [randint(0, 11, m), choice(list('abcdefghijk'), m),
- choice(pd.date_range('20141009', periods=11).tolist(), m),
- choice(list('ZYXWVUTSRQP'), m)]
- keys = list(map(tuple, zip(*keys)))
- keys += list(map(lambda t: t[:-1], vals[::n//m]))
-
- # covers both unique index and non-unique index
- df = pd.DataFrame(vals, columns=cols)
- a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
-
- for frame in a, b:
- for i in range(5): # lexsort depth
- df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
- mi = df.set_index(cols[:-1])
- assert not mi.index.lexsort_depth < i
- loop(mi, df, keys)
-
- # restore
- warnings.simplefilter('always', PerformanceWarning)
+ else: # full key
+ right.set_index(cols[:-1], inplace=True)
+ if len(right) == 1: # single hit
+ right = Series(right['jolia'].values,
+ name=right.index[0], index=['jolia'])
+ assert_series_equal(mi.loc[key[:i+1]], right)
+ else: # multi hit
+ assert_frame_equal(mi.loc[key[:i+1]], right)
+
+ def loop(mi, df, keys):
+ for key in keys:
+ validate(mi, df, key)
+
+ n, m = 1000, 50
+
+ vals = [randint(0, 10, n), choice(list('abcdefghij'), n),
+ choice(pd.date_range('20141009', periods=10).tolist(), n),
+ choice(list('ZYXWVUTSRQ'), n), randn(n)]
+ vals = list(map(tuple, zip(*vals)))
+
+ # bunch of keys for testing
+ keys = [randint(0, 11, m), choice(list('abcdefghijk'), m),
+ choice(pd.date_range('20141009', periods=11).tolist(), m),
+ choice(list('ZYXWVUTSRQP'), m)]
+ keys = list(map(tuple, zip(*keys)))
+ keys += list(map(lambda t: t[:-1], vals[::n//m]))
+
+ # covers both unique index and non-unique index
+ df = pd.DataFrame(vals, columns=cols)
+ a, b = pd.concat([df, df]), df.drop_duplicates(subset=cols[:-1])
+
+ for frame in a, b:
+ for i in range(5): # lexsort depth
+ df = frame.copy() if i == 0 else frame.sort_values(by=cols[:i])
+ mi = df.set_index(cols[:-1])
+ assert not mi.index.lexsort_depth < i
+ loop(mi, df, keys)
def test_series_getitem_multiindex(self):
@@ -4653,6 +4651,7 @@ def test_indexing_dtypes_on_empty(self):
assert_series_equal(df2.loc[:,'a'], df2.iloc[:,0])
assert_series_equal(df2.loc[:,'a'], df2.ix[:,0])
+ @slow
def test_large_dataframe_indexing(self):
#GH10692
result = DataFrame({'x': range(10**6)},dtype='int64')
@@ -4660,6 +4659,7 @@ def test_large_dataframe_indexing(self):
expected = DataFrame({'x': range(10**6 + 1)},dtype='int64')
assert_frame_equal(result, expected)
+ @slow
def test_large_mi_dataframe_indexing(self):
#GH10645
result = MultiIndex.from_arrays([range(10**6), range(10**6)])
diff --git a/pandas/tests/test_internals.py b/pandas/tests/test_internals.py
index 00553102e172f..fbab0d2a92203 100644
--- a/pandas/tests/test_internals.py
+++ b/pandas/tests/test_internals.py
@@ -306,7 +306,7 @@ def test_try_coerce_arg(self):
block = create_block('datetime', [0])
# coerce None
- none_coerced = block._try_coerce_args(block.values, None)[1]
+ none_coerced = block._try_coerce_args(block.values, None)[2]
self.assertTrue(pd.Timestamp(none_coerced) is pd.NaT)
# coerce different types of date bojects
@@ -314,7 +314,7 @@ def test_try_coerce_arg(self):
datetime(2010, 10, 10),
date(2010, 10, 10))
for val in vals:
- coerced = block._try_coerce_args(block.values, val)[1]
+ coerced = block._try_coerce_args(block.values, val)[2]
self.assertEqual(np.int64, type(coerced))
self.assertEqual(pd.Timestamp('2010-10-10'), pd.Timestamp(coerced))
diff --git a/pandas/tests/test_lib.py b/pandas/tests/test_lib.py
index cfc98f5c20360..a24f71482c404 100644
--- a/pandas/tests/test_lib.py
+++ b/pandas/tests/test_lib.py
@@ -161,6 +161,19 @@ def test_maybe_indices_to_slice_middle(self):
self.assert_numpy_array_equal(maybe_slice, indices)
self.assert_numpy_array_equal(target[indices], target[maybe_slice])
+ def test_isinf_scalar(self):
+ #GH 11352
+ self.assertTrue(lib.isposinf_scalar(float('inf')))
+ self.assertTrue(lib.isposinf_scalar(np.inf))
+ self.assertFalse(lib.isposinf_scalar(-np.inf))
+ self.assertFalse(lib.isposinf_scalar(1))
+ self.assertFalse(lib.isposinf_scalar('a'))
+
+ self.assertTrue(lib.isneginf_scalar(float('-inf')))
+ self.assertTrue(lib.isneginf_scalar(-np.inf))
+ self.assertFalse(lib.isneginf_scalar(np.inf))
+ self.assertFalse(lib.isneginf_scalar(1))
+ self.assertFalse(lib.isneginf_scalar('a'))
class Testisscalar(tm.TestCase):
@@ -232,4 +245,4 @@ def test_lisscalar_pandas_containers(self):
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
- exit=False)
\ No newline at end of file
+ exit=False)
diff --git a/pandas/tests/test_multilevel.py b/pandas/tests/test_multilevel.py
index df61387734cb3..5b00ea163d85f 100644
--- a/pandas/tests/test_multilevel.py
+++ b/pandas/tests/test_multilevel.py
@@ -28,8 +28,6 @@ class TestMultiLevel(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'],
['one', 'two', 'three']],
diff --git a/pandas/tests/test_nanops.py b/pandas/tests/test_nanops.py
index 6d6c289a6dfa6..b9db95fe06a43 100644
--- a/pandas/tests/test_nanops.py
+++ b/pandas/tests/test_nanops.py
@@ -3,6 +3,7 @@
from functools import partial
+import warnings
import numpy as np
from pandas import Series
from pandas.core.common import isnull, is_integer_dtype
@@ -135,7 +136,7 @@ def _coerce_tds(targ, res):
return targ, res
try:
- if axis != 0 and hasattr(targ, 'shape') and targ.ndim:
+ if axis != 0 and hasattr(targ, 'shape') and targ.ndim and targ.shape != res.shape:
res = np.split(res, [targ.shape[0]], axis=0)[0]
except:
targ, res = _coerce_tds(targ, res)
@@ -364,10 +365,11 @@ def test_returned_dtype(self):
"return dtype expected from %s is %s, got %s instead" % (method, dtype, result.dtype))
def test_nanmedian(self):
- self.check_funs(nanops.nanmedian, np.median,
- allow_complex=False, allow_str=False, allow_date=False,
- allow_tdelta=True,
- allow_obj='convert')
+ with warnings.catch_warnings(record=True):
+ self.check_funs(nanops.nanmedian, np.median,
+ allow_complex=False, allow_str=False, allow_date=False,
+ allow_tdelta=True,
+ allow_obj='convert')
def test_nanvar(self):
self.check_funs_ddof(nanops.nanvar, np.var,
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index 0dad55a9133b6..1f8bcf8c9879f 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -5,6 +5,7 @@
from inspect import getargspec
import operator
import nose
+from functools import wraps
import numpy as np
import pandas as pd
@@ -17,6 +18,7 @@
import pandas.core.common as com
from pandas import compat
from pandas.compat import range, lrange, StringIO, OrderedDict
+from pandas import SparsePanel
from pandas.util.testing import (assert_panel_equal,
assert_frame_equal,
@@ -31,6 +33,22 @@
import pandas.core.panel as panelm
import pandas.util.testing as tm
+def ignore_sparse_panel_future_warning(func):
+ """
+ decorator to ignore FutureWarning if we have a SparsePanel
+
+ can be removed when SparsePanel is fully removed
+ """
+ @wraps(func)
+ def wrapper(self, *args, **kwargs):
+
+ if isinstance(self.panel, SparsePanel):
+ with assert_produces_warning(FutureWarning, check_stacklevel=False):
+ return func(self, *args, **kwargs)
+ else:
+ return func(self, *args, **kwargs)
+
+ return wrapper
class PanelTests(object):
panel = None
@@ -56,6 +74,7 @@ class SafeForLongAndSparse(object):
def test_repr(self):
foo = repr(self.panel)
+ @ignore_sparse_panel_future_warning
def test_copy_names(self):
for attr in ('major_axis', 'minor_axis'):
getattr(self.panel, attr).name = None
@@ -233,6 +252,7 @@ def test_get_plane_axes(self):
index, columns = self.panel._get_plane_axes('minor_axis')
index, columns = self.panel._get_plane_axes(0)
+ @ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.major_axis
start, end = dates[1], dates[5]
@@ -293,6 +313,7 @@ def test_iteritems(self):
self.assertEqual(len(list(compat.iteritems(self.panel))),
len(self.panel.items))
+ @ignore_sparse_panel_future_warning
def test_combineFrame(self):
def check_op(op, name):
# items
@@ -321,7 +342,7 @@ def check_op(op, name):
assert_frame_equal(result.minor_xs(idx),
op(self.panel.minor_xs(idx), xs))
- from pandas import SparsePanel
+
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv']
if not compat.PY3:
ops.append('div')
@@ -348,16 +369,18 @@ def check_op(op, name):
com.pprint_thing("Failing operation: %r" % name)
raise
+ @ignore_sparse_panel_future_warning
def test_combinePanel(self):
result = self.panel.add(self.panel)
self.assert_panel_equal(result, self.panel * 2)
+ @ignore_sparse_panel_future_warning
def test_neg(self):
self.assert_panel_equal(-self.panel, self.panel * -1)
# issue 7692
def test_raise_when_not_implemented(self):
- p = Panel(np.arange(3*4*5).reshape(3,4,5), items=['ItemA','ItemB','ItemC'],
+ p = Panel(np.arange(3*4*5).reshape(3,4,5), items=['ItemA','ItemB','ItemC'],
major_axis=pd.date_range('20130101',periods=4),minor_axis=list('ABCDE'))
d = p.sum(axis=1).ix[0]
ops = ['add', 'sub', 'mul', 'truediv', 'floordiv', 'div', 'mod', 'pow']
@@ -365,6 +388,7 @@ def test_raise_when_not_implemented(self):
with self.assertRaises(NotImplementedError):
getattr(p,op)(d, axis=0)
+ @ignore_sparse_panel_future_warning
def test_select(self):
p = self.panel
@@ -396,7 +420,9 @@ def test_get_value(self):
expected = self.panel[item][mnr][mjr]
assert_almost_equal(result, expected)
+ @ignore_sparse_panel_future_warning
def test_abs(self):
+
result = self.panel.abs()
result2 = abs(self.panel)
expected = np.abs(self.panel)
@@ -872,9 +898,6 @@ def assert_panel_equal(cls, x, y):
assert_panel_equal(x, y)
def setUp(self):
- import warnings
- warnings.filterwarnings(action='ignore', category=FutureWarning)
-
self.panel = _panel.copy()
self.panel.major_axis.name = None
self.panel.minor_axis.name = None
@@ -1534,6 +1557,7 @@ def test_transpose_copy(self):
panel.values[0, 1, 1] = np.nan
self.assertTrue(notnull(result.values[1, 0, 1]))
+ @ignore_sparse_panel_future_warning
def test_to_frame(self):
# filtered
filtered = self.panel.to_frame()
@@ -2313,6 +2337,7 @@ def test_to_string(self):
buf = StringIO()
self.panel.to_string(buf)
+ @ignore_sparse_panel_future_warning
def test_truncate(self):
dates = self.panel.index.levels[0]
start, end = dates[1], dates[5]
diff --git a/pandas/tests/test_rplot.py b/pandas/tests/test_rplot.py
index e79acfcbc58d8..4342417db193b 100644
--- a/pandas/tests/test_rplot.py
+++ b/pandas/tests/test_rplot.py
@@ -1,12 +1,12 @@
# -*- coding: utf-8 -*-
from pandas.compat import range
-import pandas.tools.rplot as rplot
import pandas.util.testing as tm
from pandas import read_csv
import os
-
import nose
+with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
+ import pandas.tools.rplot as rplot
def curpath():
pth, _ = os.path.split(os.path.abspath(__file__))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 9c86c3f894c67..5ce25f5d93800 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -4032,6 +4032,21 @@ def test_datetime64_tz_fillna(self):
Timestamp('2011-01-04 10:00', tz=tz)])
self.assert_series_equal(expected, result)
+ # filling with a naive/other zone, coerce to object
+ result = s.fillna(Timestamp('20130101'))
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2013-01-01'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2013-01-01')])
+ self.assert_series_equal(expected, result)
+
+ result = s.fillna(Timestamp('20130101',tz='US/Pacific'))
+ expected = Series([Timestamp('2011-01-01 10:00', tz=tz),
+ Timestamp('2013-01-01',tz='US/Pacific'),
+ Timestamp('2011-01-03 10:00', tz=tz),
+ Timestamp('2013-01-01',tz='US/Pacific')])
+ self.assert_series_equal(expected, result)
+
def test_fillna_int(self):
s = Series(np.random.randint(-100, 100, 50))
s.fillna(method='ffill', inplace=True)
@@ -4269,6 +4284,43 @@ def test_object_comparisons(self):
expected = -(s == 'a')
assert_series_equal(result, expected)
+ def test_comparison_tuples(self):
+ # GH11339
+ # comparisons vs tuple
+ s = Series([(1,1),(1,2)])
+
+ result = s == (1,2)
+ expected = Series([False,True])
+ assert_series_equal(result, expected)
+
+ result = s != (1,2)
+ expected = Series([True, False])
+ assert_series_equal(result, expected)
+
+ result = s == (0,0)
+ expected = Series([False, False])
+ assert_series_equal(result, expected)
+
+ result = s != (0,0)
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+ s = Series([(1,1),(1,1)])
+
+ result = s == (1,1)
+ expected = Series([True, True])
+ assert_series_equal(result, expected)
+
+ result = s != (1,1)
+ expected = Series([False, False])
+ assert_series_equal(result, expected)
+
+ s = Series([frozenset([1]),frozenset([1,2])])
+
+ result = s == frozenset([1])
+ expected = Series([True, False])
+ assert_series_equal(result, expected)
+
def test_comparison_operators_with_nas(self):
s = Series(bdate_range('1/1/2000', periods=10), dtype=object)
s[::2] = np.nan
@@ -5117,7 +5169,6 @@ def test_dropna_empty(self):
# invalid axis
self.assertRaises(ValueError, s.dropna, axis=1)
-
def test_datetime64_tz_dropna(self):
# DatetimeBlock
s = Series([Timestamp('2011-01-01 10:00'), pd.NaT,
@@ -5140,6 +5191,18 @@ def test_datetime64_tz_dropna(self):
self.assertEqual(result.dtype, 'datetime64[ns, Asia/Tokyo]')
self.assert_series_equal(result, expected)
+ def test_dropna_no_nan(self):
+ for s in [Series([1, 2, 3], name='x'),
+ Series([False, True, False], name='x')]:
+
+ result = s.dropna()
+ self.assert_series_equal(result, s)
+ self.assertFalse(result is s)
+
+ s2 = s.copy()
+ s2.dropna(inplace=True)
+ self.assert_series_equal(s2, s)
+
def test_axis_alias(self):
s = Series([1, 2, np.nan])
assert_series_equal(s.dropna(axis='rows'), s.dropna(axis='index'))
diff --git a/pandas/tools/pivot.py b/pandas/tools/pivot.py
index 89fe9463282b6..de7a5f5a73f3d 100644
--- a/pandas/tools/pivot.py
+++ b/pandas/tools/pivot.py
@@ -189,7 +189,13 @@ def _add_margins(table, data, values, rows, cols, aggfunc):
margin_dummy = DataFrame(row_margin, columns=[key]).T
row_names = result.index.names
- result = result.append(margin_dummy)
+ try:
+ result = result.append(margin_dummy)
+ except TypeError:
+
+ # we cannot reshape, so coerce the axis
+ result.index = result.index._to_safe_for_reshape()
+ result = result.append(margin_dummy)
result.index.names = row_names
return result
@@ -218,6 +224,7 @@ def _compute_grand_margin(data, values, aggfunc):
def _generate_marginal_results(table, data, values, rows, cols, aggfunc, grand_margin):
+
if len(cols) > 0:
# need to "interleave" the margins
table_pieces = []
@@ -235,7 +242,13 @@ def _all_key(key):
# we are going to mutate this, so need to copy!
piece = piece.copy()
- piece[all_key] = margin[key]
+ try:
+ piece[all_key] = margin[key]
+ except TypeError:
+
+ # we cannot reshape, so coerce the axis
+ piece.set_axis(cat_axis, piece._get_axis(cat_axis)._to_safe_for_reshape())
+ piece[all_key] = margin[key]
table_pieces.append(piece)
margin_keys.append(all_key)
diff --git a/pandas/tools/tests/test_merge.py b/pandas/tools/tests/test_merge.py
index 929a72cfd4adc..b555a7dc2b3a1 100644
--- a/pandas/tools/tests/test_merge.py
+++ b/pandas/tools/tests/test_merge.py
@@ -20,6 +20,7 @@
from pandas import isnull, DataFrame, Index, MultiIndex, Panel, Series, date_range, read_table, read_csv
import pandas.algos as algos
import pandas.util.testing as tm
+from numpy.testing.decorators import slow
a_ = np.array
@@ -1410,6 +1411,7 @@ def test_merge_na_keys(self):
tm.assert_frame_equal(result, expected)
+ @slow
def test_int64_overflow_issues(self):
from itertools import product
from collections import defaultdict
diff --git a/pandas/tools/tests/test_pivot.py b/pandas/tools/tests/test_pivot.py
index 50ae574c03067..f0052774d66a2 100644
--- a/pandas/tools/tests/test_pivot.py
+++ b/pandas/tools/tests/test_pivot.py
@@ -719,6 +719,26 @@ def test_crosstab_dropna(self):
('two', 'dull'), ('two', 'shiny')])
assert_equal(res.columns.values, m.values)
+ def test_categorical_margins(self):
+ # GH 10989
+ df = pd.DataFrame({'x': np.arange(8),
+ 'y': np.arange(8) // 4,
+ 'z': np.arange(8) % 2})
+
+ expected = pd.DataFrame([[1.0, 2.0, 1.5],[5, 6, 5.5],[3, 4, 3.5]])
+ expected.index = Index([0,1,'All'],name='y')
+ expected.columns = Index([0,1,'All'],name='z')
+
+ data = df.copy()
+ table = data.pivot_table('x', 'y', 'z', margins=True)
+ tm.assert_frame_equal(table, expected)
+
+ data = df.copy()
+ data.y = data.y.astype('category')
+ data.z = data.z.astype('category')
+ table = data.pivot_table('x', 'y', 'z', margins=True)
+ tm.assert_frame_equal(table, expected)
+
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 814a9ccc45582..868057c675594 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -756,6 +756,8 @@ def astype(self, dtype):
return self.asi8.copy()
elif dtype == _NS_DTYPE and self.tz is not None:
return self.tz_convert('UTC').tz_localize(None)
+ elif dtype == str:
+ return self._shallow_copy(values=self.format(), infer=True)
else: # pragma: no cover
raise ValueError('Cannot cast DatetimeIndex to dtype %s' % dtype)
diff --git a/pandas/tseries/tests/test_base.py b/pandas/tseries/tests/test_base.py
index 24edc54582ec1..4d353eccba972 100644
--- a/pandas/tseries/tests/test_base.py
+++ b/pandas/tseries/tests/test_base.py
@@ -45,6 +45,32 @@ def test_ops_properties_basic(self):
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
+ def test_astype_str(self):
+ # test astype string - #10442
+ result = date_range('2012-01-01', periods=4, name='test_name').astype(str)
+ expected = Index(['2012-01-01', '2012-01-02', '2012-01-03','2012-01-04'],
+ name='test_name', dtype=object)
+ tm.assert_index_equal(result, expected)
+
+ # test astype string with tz and name
+ result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str)
+ expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00',
+ '2012-01-03 00:00:00-05:00'], name='test_name', dtype=object)
+ tm.assert_index_equal(result, expected)
+
+ # test astype string with freqH and name
+ result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str)
+ expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'],
+ name='test_name', dtype=object)
+ tm.assert_index_equal(result, expected)
+
+ # test astype string with freqH and timezone
+ result = date_range('3/6/2012 00:00', periods=2, freq='H',
+ tz='Europe/London', name='test_name').astype(str)
+ expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'],
+ dtype=object, name='test_name')
+ tm.assert_index_equal(result, expected)
+
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
@@ -503,7 +529,6 @@ def test_infer_freq(self):
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
-
class TestTimedeltaIndexOps(Ops):
def setUp(self):
diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index a80bdf970cccb..230016f00374f 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -2223,6 +2223,7 @@ def test_append_join_nondatetimeindex(self):
# it works
rng.join(idx, how='outer')
+
def test_astype(self):
rng = date_range('1/1/2000', periods=10)
@@ -2235,6 +2236,17 @@ def test_astype(self):
expected = date_range('1/1/2000', periods=10, tz='US/Eastern').tz_convert('UTC').tz_localize(None)
tm.assert_index_equal(result, expected)
+ # BUG#10442 : testing astype(str) is correct for Series/DatetimeIndex
+ result = pd.Series(pd.date_range('2012-01-01', periods=3)).astype(str)
+ expected = pd.Series(['2012-01-01', '2012-01-02', '2012-01-03'], dtype=object)
+ tm.assert_series_equal(result, expected)
+
+ result = Series(pd.date_range('2012-01-01', periods=3, tz='US/Eastern')).astype(str)
+ expected = Series(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00', '2012-01-03 00:00:00-05:00'],
+ dtype=object)
+ tm.assert_series_equal(result, expected)
+
+
def test_to_period_nofreq(self):
idx = DatetimeIndex(['2000-01-01', '2000-01-02', '2000-01-04'])
self.assertRaises(ValueError, idx.to_period)
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 398c5f0232de1..8e6d4019c69a3 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -3849,6 +3849,7 @@ def get_time_micros(ndarray[int64_t] dtindex):
@cython.wraparound(False)
[email protected](False)
def get_date_field(ndarray[int64_t] dtindex, object field):
'''
Given a int64-based datetime index, extract the year, month, etc.,
@@ -3872,130 +3873,142 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
out = np.empty(count, dtype='i4')
if field == 'Y':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.year
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.year
return out
elif field == 'M':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.month
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.month
return out
elif field == 'D':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.day
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.day
return out
elif field == 'h':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.hour
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.hour
return out
elif field == 'm':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.min
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.min
return out
elif field == 's':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.sec
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.sec
return out
elif field == 'us':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.us
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.us
return out
elif field == 'ns':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.ps / 1000
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.ps / 1000
return out
elif field == 'doy':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- isleap = is_leapyear(dts.year)
- out[i] = _month_offset[isleap, dts.month-1] + dts.day
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ isleap = is_leapyear(dts.year)
+ out[i] = _month_offset[isleap, dts.month-1] + dts.day
return out
elif field == 'dow':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- ts = convert_to_tsobject(dtindex[i], None, None)
- out[i] = ts_dayofweek(ts)
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dayofweek(dts.year, dts.month, dts.day)
return out
elif field == 'woy':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
-
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None)
- isleap = is_leapyear(dts.year)
- isleap_prev = is_leapyear(dts.year - 1)
- mo_off = _month_offset[isleap, dts.month - 1]
- doy = mo_off + dts.day
- dow = ts_dayofweek(ts)
-
- #estimate
- woy = (doy - 1) - dow + 3
- if woy >= 0:
- woy = woy / 7 + 1
-
- # verify
- if woy < 0:
- if (woy > -2) or (woy == -2 and isleap_prev):
- woy = 53
- else:
- woy = 52
- elif woy == 53:
- if 31 - dts.day + dow < 3:
- woy = 1
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
+
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ isleap = is_leapyear(dts.year)
+ isleap_prev = is_leapyear(dts.year - 1)
+ mo_off = _month_offset[isleap, dts.month - 1]
+ doy = mo_off + dts.day
+ dow = dayofweek(dts.year, dts.month, dts.day)
+
+ #estimate
+ woy = (doy - 1) - dow + 3
+ if woy >= 0:
+ woy = woy / 7 + 1
+
+ # verify
+ if woy < 0:
+ if (woy > -2) or (woy == -2 and isleap_prev):
+ woy = 53
+ else:
+ woy = 52
+ elif woy == 53:
+ if 31 - dts.day + dow < 3:
+ woy = 1
- out[i] = woy
+ out[i] = woy
return out
elif field == 'q':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.month
- out[i] = ((out[i] - 1) / 3) + 1
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.month
+ out[i] = ((out[i] - 1) / 3) + 1
return out
elif field == 'dim':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = monthrange(dts.year, dts.month)[1]
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = days_in_month(dts)
return out
raise ValueError("Field %s not supported" % field)
@@ -4239,12 +4252,13 @@ def date_normalize(ndarray[int64_t] stamps, tz=None):
tz = maybe_get_tz(tz)
result = _normalize_local(stamps, tz)
else:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
- result[i] = _normalized_stamp(&dts)
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ result[i] = _normalized_stamp(&dts)
return result
@@ -4256,12 +4270,13 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pandas_datetimestruct dts
if _is_utc(tz):
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
- result[i] = _normalized_stamp(&dts)
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ result[i] = _normalized_stamp(&dts)
elif _is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
@@ -4304,7 +4319,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
return result
-cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts):
+cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil:
dts.hour = 0
dts.min = 0
dts.sec = 0
@@ -4369,6 +4384,8 @@ def monthrange(int64_t year, int64_t month):
cdef inline int64_t ts_dayofweek(_TSObject ts):
return dayofweek(ts.dts.year, ts.dts.month, ts.dts.day)
+cdef inline int days_in_month(pandas_datetimestruct dts) nogil:
+ return days_per_month_table[is_leapyear(dts.year)][dts.month-1]
cpdef normalize_date(object dt):
'''
@@ -4388,17 +4405,18 @@ cpdef normalize_date(object dt):
cdef inline int _year_add_months(pandas_datetimestruct dts,
- int months):
+ int months) nogil:
'''new year number after shifting pandas_datetimestruct number of months'''
return dts.year + (dts.month + months - 1) / 12
cdef inline int _month_add_months(pandas_datetimestruct dts,
- int months):
+ int months) nogil:
'''new month number after shifting pandas_datetimestruct number of months'''
cdef int new_month = (dts.month + months) % 12
return 12 if new_month == 0 else new_month
@cython.wraparound(False)
[email protected](False)
def shift_months(int64_t[:] dtindex, int months, object day=None):
'''
Given an int64-based datetime index, shift all elements
@@ -4411,24 +4429,26 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
'''
cdef:
Py_ssize_t i
- int days_in_month
pandas_datetimestruct dts
int count = len(dtindex)
+ cdef int days_in_current_month
int64_t[:] out = np.empty(count, dtype='int64')
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = NPY_NAT
- else:
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
-
- if day is None:
+ if day is None:
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
- #prevent day from wrapping around month end
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
- dts.day = min(dts.day, days_in_month)
- elif day == 'start':
+
+ dts.day = min(dts.day, days_in_month(dts))
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ elif day == 'start':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
@@ -4439,21 +4459,28 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
dts.month = _month_add_months(dts, -1)
else:
dts.day = 1
- elif day == 'end':
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ elif day == 'end':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ days_in_current_month = days_in_month(dts)
+
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
# similar semantics - when adding shift forward by one
# month if already at an end of month
- if months >= 0 and dts.day == days_in_month:
+ if months >= 0 and dts.day == days_in_current_month:
dts.year = _year_add_months(dts, 1)
dts.month = _month_add_months(dts, 1)
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
- dts.day = days_in_month
+ dts.day = days_in_month(dts)
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ else:
+ raise ValueError("day must be None, 'start' or 'end'")
- out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
return np.asarray(out)
#----------------------------------------------------------------------
diff --git a/pandas/util/testing.py b/pandas/util/testing.py
index 362351c7c31c2..a278c4d0f9045 100644
--- a/pandas/util/testing.py
+++ b/pandas/util/testing.py
@@ -59,7 +59,6 @@ def reset_testing_mode():
if 'deprecate' in testing_mode:
warnings.simplefilter('ignore', DeprecationWarning)
-
set_testing_mode()
class TestCase(unittest.TestCase):
@@ -255,6 +254,23 @@ def _skip_if_python26():
import nose
raise nose.SkipTest("skipping on python2.6")
+
+def _skip_if_no_pathlib():
+ try:
+ from pathlib import Path
+ except ImportError:
+ import nose
+ raise nose.SkipTest("pathlib not available")
+
+
+def _skip_if_no_localpath():
+ try:
+ from py.path import local as LocalPath
+ except ImportError:
+ import nose
+ raise nose.SkipTest("py.path not installed")
+
+
def _incompat_bottleneck_version(method):
""" skip if we have bottleneck installed
and its >= 1.0
@@ -1958,7 +1974,6 @@ def handle_success(self, exc_type, exc_value, traceback):
raise_with_traceback(e, traceback)
return True
-
@contextmanager
def assert_produces_warning(expected_warning=Warning, filter_level="always",
clear=None, check_stacklevel=True):
@@ -2005,6 +2020,7 @@ def assert_produces_warning(expected_warning=Warning, filter_level="always",
warnings.simplefilter(filter_level)
yield w
extra_warnings = []
+
for actual_warning in w:
if (expected_warning and issubclass(actual_warning.category,
expected_warning)):
| replaces #11317
closes #11408
This includes updates to 3 Excel files, plus a test in test_excel.py,
plus the fix in parsers.py
| https://api.github.com/repos/pandas-dev/pandas/pulls/11328 | 2015-10-14T16:15:06Z | 2015-10-25T14:02:58Z | null | 2015-10-26T14:00:34Z |
BUG: Fix to_dict() problem when using datetime DataFrame #11247 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index fa08a9790f789..88c46f3bcb863 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -96,3 +96,4 @@ Bug Fixes
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
- Fixed a bug that prevented the construction of an empty series of dtype
``datetime64[ns, tz]`` (:issue:`11245`).
+- Bug in ``DataFrame.to_dict()`` produces an datetime object instead of Timestamp when only datetime is present in data (:issue:`11327`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index e92de770ac4bd..827373c9a330b 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -802,11 +802,12 @@ def to_dict(self, orient='dict'):
elif orient.lower().startswith('sp'):
return {'index': self.index.tolist(),
'columns': self.columns.tolist(),
- 'data': self.values.tolist()}
+ 'data': lib.map_infer(self.values.ravel(), _maybe_box_datetimelike)
+ .reshape(self.values.shape).tolist()}
elif orient.lower().startswith('s'):
- return dict((k, v) for k, v in compat.iteritems(self))
+ return dict((k, _maybe_box_datetimelike(v)) for k, v in compat.iteritems(self))
elif orient.lower().startswith('r'):
- return [dict((k, v) for k, v in zip(self.columns, row))
+ return [dict((k, _maybe_box_datetimelike(v)) for k, v in zip(self.columns, row))
for row in self.values]
elif orient.lower().startswith('i'):
return dict((k, v.to_dict()) for k, v in self.iterrows())
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 6667d389bd6c5..c3abbae137418 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4728,6 +4728,55 @@ def test_to_dict(self):
for k2, v2 in compat.iteritems(v):
self.assertEqual(v2, recons_data[k2][k])
+ def test_to_dict_timestamp(self):
+ # GH11247
+ tsmp = Timestamp('20130101')
+ test_data = DataFrame({'A': [tsmp, tsmp], 'B': [tsmp, tsmp]})
+ test_data_mixed = DataFrame({'A': [tsmp, tsmp], 'B': [1, 2]})
+
+ expected_records = [{'A': tsmp, 'B': tsmp},
+ {'A': tsmp, 'B': tsmp}]
+ expected_records_mixed = [{'A': tsmp, 'B': 1},
+ {'A': tsmp, 'B': 2}]
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='records'), expected_records)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='records'), expected_records_mixed)
+
+ expected_series = {
+ 'A': Series([tsmp, tsmp]),
+ 'B': Series([tsmp, tsmp]),
+ }
+ expected_series_mixed = {
+ 'A': Series([tsmp, tsmp]),
+ 'B': Series([1, 2]),
+ }
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='series'), expected_series)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='series'), expected_series_mixed)
+
+ expected_split = {
+ 'index': [0, 1],
+ 'data': [[tsmp, tsmp],
+ [tsmp, tsmp]],
+ 'columns': ['A', 'B']
+ }
+ expected_split_mixed = {
+ 'index': [0, 1],
+ 'data': [[tsmp, 1],
+ [tsmp, 2]],
+ 'columns': ['A', 'B']
+ }
+
+ tm.assert_almost_equal(test_data.to_dict(
+ orient='split'), expected_split)
+ tm.assert_almost_equal(test_data_mixed.to_dict(
+ orient='split'), expected_split_mixed)
+
+
def test_to_dict_invalid_orient(self):
df = DataFrame({'A':[0, 1]})
self.assertRaises(ValueError, df.to_dict, orient='xinvalid')
| closes #11247
This is my first time contributing through PR so forgive me if I make any silly mistake.
The issue can be referred to in GH11247
I followed @jreback 's suggestion to box things up in `to_dict()` but I am not sure if this is what he means because I do not know exactly when `_maybe_datetime()` should be used. Initially I tried to tweak `internals.py` but it fails.
Let me know how I can improve from here. Hope I can really learn from my first PR.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11327 | 2015-10-14T16:13:51Z | 2015-10-16T16:40:27Z | null | 2015-10-17T03:33:38Z |
ENH: itertuples() returns namedtuples (closes #11269) | diff --git a/doc/source/basics.rst b/doc/source/basics.rst
index e11c612a510db..757cff43f87e7 100644
--- a/doc/source/basics.rst
+++ b/doc/source/basics.rst
@@ -1211,9 +1211,10 @@ To iterate over the rows of a DataFrame, you can use the following methods:
* :meth:`~DataFrame.iterrows`: Iterate over the rows of a DataFrame as (index, Series) pairs.
This converts the rows to Series objects, which can change the dtypes and has some
performance implications.
-* :meth:`~DataFrame.itertuples`: Iterate over the rows of a DataFrame as tuples of the values.
- This is a lot faster as :meth:`~DataFrame.iterrows`, and is in most cases preferable to
- use to iterate over the values of a DataFrame.
+* :meth:`~DataFrame.itertuples`: Iterate over the rows of a DataFrame
+ as namedtuples of the values. This is a lot faster as
+ :meth:`~DataFrame.iterrows`, and is in most cases preferable to use
+ to iterate over the values of a DataFrame.
.. warning::
@@ -1307,7 +1308,7 @@ index value along with a Series containing the data in each row:
df_orig['int'].dtype
To preserve dtypes while iterating over the rows, it is better
- to use :meth:`~DataFrame.itertuples` which returns tuples of the values
+ to use :meth:`~DataFrame.itertuples` which returns namedtuples of the values
and which is generally much faster as ``iterrows``.
For instance, a contrived way to transpose the DataFrame would be:
@@ -1325,9 +1326,9 @@ itertuples
~~~~~~~~~~
The :meth:`~DataFrame.itertuples` method will return an iterator
-yielding a tuple for each row in the DataFrame. The first element
-of the tuple will be the row's corresponding index value,
-while the remaining values are the row values.
+yielding a namedtuple for each row in the DataFrame. The first element
+of the tuple will be the row's corresponding index value, while the
+remaining values are the row values.
For instance,
@@ -1336,9 +1337,16 @@ For instance,
for row in df.itertuples():
print(row)
-This method does not convert the row to a Series object but just returns the
-values inside a tuple. Therefore, :meth:`~DataFrame.itertuples` preserves the
-data type of the values and is generally faster as :meth:`~DataFrame.iterrows`.
+This method does not convert the row to a Series object but just
+returns the values inside a namedtuple. Therefore,
+:meth:`~DataFrame.itertuples` preserves the data type of the values
+and is generally faster as :meth:`~DataFrame.iterrows`.
+
+.. note::
+
+ The columns names will be renamed to positional names if they are
+ invalid Python identifiers, repeated, or start with an underscore.
+ With a large number of columns (>255), regular tuples are returned.
.. _basics.dt_accessors:
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index e303adfd356da..84db16e338d87 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -38,6 +38,7 @@ API changes
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
- Indexing with a null key will raise a ``TypeError``, instead of a ``ValueError`` (:issue:`11356`)
- ``Series.sort_index()`` now correctly handles the ``inplace`` option (:issue:`11402`)
+- ``DataFrame.itertuples()`` now returns ``namedtuple`` objects, when possible. (:issue:`11269`)
.. _whatsnew_0171.deprecations:
@@ -71,7 +72,7 @@ Bug Fixes
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
- Bug in merging ``datetime64[ns, tz]`` dtypes (:issue:`11405`)
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
-- Bug in using ``DataFrame.ix`` with a multi-index indexer(:issue:`11372`)
+- Bug in using ``DataFrame.ix`` with a multi-index indexer(:issue:`11372`)
- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issue:`11295`)
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 4774fc4f17a91..b06f1b947bbe7 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -584,7 +584,7 @@ def iteritems(self):
See also
--------
iterrows : Iterate over the rows of a DataFrame as (index, Series) pairs.
- itertuples : Iterate over the rows of a DataFrame as tuples of the values.
+ itertuples : Iterate over the rows of a DataFrame as namedtuples of the values.
"""
if self.columns.is_unique and hasattr(self, '_item_cache'):
@@ -617,7 +617,7 @@ def iterrows(self):
int64
To preserve dtypes while iterating over the rows, it is better
- to use :meth:`itertuples` which returns tuples of the values
+ to use :meth:`itertuples` which returns namedtuples of the values
and which is generally faster as ``iterrows``.
2. You should **never modify** something you are iterating over.
@@ -632,7 +632,7 @@ def iterrows(self):
See also
--------
- itertuples : Iterate over the rows of a DataFrame as tuples of the values.
+ itertuples : Iterate over the rows of a DataFrame as namedtuples of the values.
iteritems : Iterate over (column name, Series) pairs.
"""
@@ -641,15 +641,23 @@ def iterrows(self):
s = Series(v, index=columns, name=k)
yield k, s
- def itertuples(self, index=True):
+ def itertuples(self, index=True, name="Pandas"):
"""
- Iterate over the rows of DataFrame as tuples, with index value
+ Iterate over the rows of DataFrame as namedtuples, with index value
as first element of the tuple.
Parameters
----------
index : boolean, default True
If True, return the index as the first element of the tuple.
+ name : string, default "Pandas"
+ The name of the returned namedtuple.
+
+ Notes
+ -----
+ The columns names will be renamed to positional names if they are
+ invalid Python identifiers, repeated, or start with an underscore.
+ With a large number of columns (>255), regular tuples are returned.
See also
--------
@@ -666,16 +674,32 @@ def itertuples(self, index=True):
b 2 0.2
>>> for row in df.itertuples():
... print(row)
- ('a', 1, 0.10000000000000001)
- ('b', 2, 0.20000000000000001)
+ ...
+ Pandas(Index='a', col1=1, col2=0.10000000000000001)
+ Pandas(Index='b', col1=2, col2=0.20000000000000001)
"""
arrays = []
+ fields = []
if index:
arrays.append(self.index)
+ fields.append("Index")
# use integer indexing because of possible duplicate column names
arrays.extend(self.iloc[:, k] for k in range(len(self.columns)))
+
+ # Python 3 supports at most 255 arguments to constructor, and
+ # things get slow with this many fields in Python 2
+ if len(self.columns) + index < 256:
+ # `rename` is unsupported in Python 2.6
+ try:
+ itertuple = collections.namedtuple(
+ name, fields+list(self.columns), rename=True)
+ return (itertuple(*row) for row in zip(*arrays))
+ except:
+ pass
+
+ # fallback to regular tuples
return zip(*arrays)
if compat.PY3: # pragma: no cover
@@ -1213,7 +1237,7 @@ def to_panel(self):
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
- mode='w', encoding=None, compression=None, quoting=None,
+ mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.', **kwds):
@@ -1251,7 +1275,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
compression : string, optional
- a string representing the compression to use in the output file,
+ a string representing the compression to use in the output file,
allowed values are 'gzip', 'bz2',
only used when the first argument is a filename
line_terminator : string, default '\\n'
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index dfbd21997568d..1b57d53a548f3 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -5545,6 +5545,27 @@ def test_itertuples(self):
dfaa = df[['a', 'a']]
self.assertEqual(list(dfaa.itertuples()), [(0, 1, 1), (1, 2, 2), (2, 3, 3)])
+ tup = next(df.itertuples(name='TestName'))
+
+ # no support for field renaming in Python 2.6, regular tuples are returned
+ if sys.version >= LooseVersion('2.7'):
+ self.assertEqual(tup._fields, ('Index', 'a', 'b'))
+ self.assertEqual((tup.Index, tup.a, tup.b), tup)
+ self.assertEqual(type(tup).__name__, 'TestName')
+
+ df.columns = ['def', 'return']
+ tup2 = next(df.itertuples(name='TestName'))
+ self.assertEqual(tup2, (0, 1, 4))
+
+ if sys.version >= LooseVersion('2.7'):
+ self.assertEqual(tup2._fields, ('Index', '_1', '_2'))
+
+ df3 = DataFrame(dict(('f'+str(i), [i]) for i in range(1024)))
+ # will raise SyntaxError if trying to create namedtuple
+ tup3 = next(df3.itertuples())
+ self.assertFalse(hasattr(tup3, '_fields'))
+ self.assertIsInstance(tup3, tuple)
+
def test_len(self):
self.assertEqual(len(self.frame), len(self.frame.index))
| closes #11269
This will make itertuples return namedtuples. I'm not sure about tests, here. Since `namedtuple` is a drop-in replacement for ordinary tuples (once they are created) I naively expect things to work.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11325 | 2015-10-14T13:00:55Z | 2015-10-28T11:13:27Z | 2015-10-28T11:13:27Z | 2015-10-28T11:58:03Z |
BUG: Bug in list-like indexing with a mixed-integer Index, #11320 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 507c829e5763f..86f6a85898a15 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -65,7 +65,7 @@ Bug Fixes
- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issues:`11295`)
-
+- Bug in list-like indexing with a mixed-integer Index (:issue:`11320`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 256ece6539b6f..b4c690fe8973b 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -982,10 +982,6 @@ def _convert_list_indexer(self, keyarr, kind=None):
if kind in [None, 'iloc', 'ix'] and is_integer_dtype(keyarr) \
and not self.is_floating() and not isinstance(keyarr, ABCPeriodIndex):
- if self.inferred_type != 'integer':
- keyarr = np.where(keyarr < 0,
- len(self) + keyarr, keyarr)
-
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
@@ -998,6 +994,8 @@ def _convert_list_indexer(self, keyarr, kind=None):
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
+ keyarr = np.where(keyarr < 0,
+ len(self) + keyarr, keyarr)
return keyarr
return None
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 172c1e30686e1..6667d389bd6c5 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -488,6 +488,18 @@ def test_getitem_ix_mixed_integer(self):
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
+ # 11320
+ df = pd.DataFrame({ "rna": (1.5,2.2,3.2,4.5),
+ -1000: [11,21,36,40],
+ 0: [10,22,43,34],
+ 1000:[0, 10, 20, 30] },columns=['rna',-1000,0,1000])
+ result = df[[1000]]
+ expected = df.iloc[:,[3]]
+ assert_frame_equal(result, expected)
+ result = df[[-1000]]
+ expected = df.iloc[:,[1]]
+ assert_frame_equal(result, expected)
+
def test_getitem_setitem_ix_negative_integers(self):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
| closes #11320
| https://api.github.com/repos/pandas-dev/pandas/pulls/11322 | 2015-10-14T10:55:15Z | 2015-10-14T11:43:50Z | 2015-10-14T11:43:50Z | 2015-10-14T11:43:50Z |
DEPR: deprecate `engine` keyword from to_csv #11274 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 94f66f8cfc672..a9f160988acf8 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -40,6 +40,8 @@ API changes
.. _whatsnew_0171.deprecations:
+- ``engine`` keyword is deprecated from ``.to_csv()`` and will be removed in a future version (:issue:`11274`)
+
Deprecations
^^^^^^^^^^^^
diff --git a/pandas/core/format.py b/pandas/core/format.py
index bf9b3bc8040de..8899e72cb2d48 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -23,6 +23,7 @@
import itertools
import csv
+import warnings
common_docstring = """
Parameters
@@ -1264,7 +1265,11 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
- self.engine = engine # remove for 0.13
+ if engine is not None:
+ warnings.warn("'engine' keyword is deprecated and "
+ "will be removed in a future version",
+ FutureWarning, stacklevel=3)
+ self.engine = engine # remove for 0.18
self.obj = obj
if path_or_buf is None:
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index bf2cfc6216a60..140b54225b8e8 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2952,6 +2952,12 @@ def test_to_csv_date_format(self):
self.assertEqual(df_day.to_csv(), expected_default_day)
self.assertEqual(df_day.to_csv(date_format='%Y-%m-%d'), expected_default_day)
+ # deprecation GH11274
+ def test_to_csv_engine_kw_deprecation(self):
+ with tm.assert_produces_warning(FutureWarning):
+ df = DataFrame({'col1' : [1], 'col2' : ['a'], 'col3' : [10.1] })
+ df.to_csv(engine='python')
+
def test_round_dataframe(self):
# GH 2665
| closes #11274
| https://api.github.com/repos/pandas-dev/pandas/pulls/11319 | 2015-10-13T23:53:44Z | 2015-10-14T00:25:31Z | null | 2015-10-14T00:25:31Z |
DEPR: deprecate pandas.io.ga, #11308 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 1eff7d01d9d91..b9e674c819e49 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -43,6 +43,8 @@ API changes
Deprecations
^^^^^^^^^^^^
+- The ``pandas.io.ga`` module which implements ``google-analytics`` support is deprecated and will be removed in a future version (:issue:`11308`)
+
.. _whatsnew_0171.performance:
Performance Improvements
diff --git a/pandas/io/ga.py b/pandas/io/ga.py
index b6b4081e3650f..5525b34951524 100644
--- a/pandas/io/ga.py
+++ b/pandas/io/ga.py
@@ -4,6 +4,13 @@
3. Goto APIs and register for OAuth2.0 for installed applications
4. Download JSON secret file and move into same directory as this file
"""
+
+# GH11038
+import warnings
+warnings.warn("The pandas.io.ga module is deprecated and will be "
+ "removed in a future version.",
+ FutureWarning, stacklevel=2)
+
from datetime import datetime
import re
from pandas import compat
| closes #11308
| https://api.github.com/repos/pandas-dev/pandas/pulls/11318 | 2015-10-13T22:12:13Z | 2015-10-14T00:09:21Z | 2015-10-14T00:09:21Z | 2015-10-14T00:09:21Z |
BUG: multi-index excel header fails if all numeric | diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index 8ac1aed9d9af7..9d62c0c4b5d8d 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -867,7 +867,7 @@ def _extract_multi_indexer_columns(self, header, index_names, col_names,
field_count = len(header[0])
def extract(r):
- return tuple([r[i] for i in range(field_count) if i not in sic])
+ return tuple([str(r[i]) for i in range(field_count) if i not in sic])
columns = lzip(*[extract(r) for r in header])
names = ic + columns
| If the multi-line headers come from Excel, and the header was not a string, the line all(['Unnamed' in c[n] for c in columns]): will fail because c[n] is an int and not iterable. So either force the headers to be strings (the proposed change) or come up with some other test when looking for 'Unnamed'
| https://api.github.com/repos/pandas-dev/pandas/pulls/11317 | 2015-10-13T20:46:47Z | 2015-10-14T18:51:30Z | null | 2015-10-23T16:45:56Z |
TST: validation tests for #11314, set_index with a tz | diff --git a/pandas/tseries/tests/test_timeseries.py b/pandas/tseries/tests/test_timeseries.py
index ed174bc285e4f..a80bdf970cccb 100644
--- a/pandas/tseries/tests/test_timeseries.py
+++ b/pandas/tseries/tests/test_timeseries.py
@@ -3376,6 +3376,14 @@ def test_dti_set_index_reindex(self):
df = df.reindex(idx2)
self.assertTrue(df.index.equals(idx2))
+ # 11314
+ # with tz
+ index = date_range(datetime(2015, 10, 1), datetime(2015,10,1,23), freq='H', tz='US/Eastern')
+ df = DataFrame(np.random.randn(24, 1), columns=['a'], index=index)
+ new_index = date_range(datetime(2015, 10, 2), datetime(2015,10,2,23), freq='H', tz='US/Eastern')
+ result = df.set_index(new_index)
+ self.assertEqual(new_index.freq,index.freq)
+
def test_datetimeindex_union_join_empty(self):
dti = DatetimeIndex(start='1/1/2001', end='2/1/2001', freq='D')
empty = Index([])
| xref #11314
| https://api.github.com/repos/pandas-dev/pandas/pulls/11316 | 2015-10-13T19:06:15Z | 2015-10-13T19:31:47Z | 2015-10-13T19:31:47Z | 2015-10-13T19:31:48Z |
Fixed typo | diff --git a/doc/source/10min.rst b/doc/source/10min.rst
index 91e607757e4f1..3cf05698dd9d0 100644
--- a/doc/source/10min.rst
+++ b/doc/source/10min.rst
@@ -169,7 +169,7 @@ Selection
recommend the optimized pandas data access methods, ``.at``, ``.iat``,
``.loc``, ``.iloc`` and ``.ix``.
-See the indexing documentation :ref:`Indexing and Selecing Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`
+See the indexing documentation :ref:`Indexing and Selecting Data <indexing>` and :ref:`MultiIndex / Advanced Indexing <advanced>`
Getting
~~~~~~~
| https://api.github.com/repos/pandas-dev/pandas/pulls/11315 | 2015-10-13T18:35:36Z | 2015-10-13T18:39:13Z | 2015-10-13T18:39:13Z | 2015-10-13T18:39:17Z |
|
Correct typo in error message. | diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2cdb6d9b04341..920d9ad96c5b6 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1925,7 +1925,7 @@ def _getitem_column(self, key):
if self.columns.is_unique:
return self._get_item_cache(key)
- # duplicate columns & possible reduce dimensionaility
+ # duplicate columns & possible reduce dimensionality
result = self._constructor(self._data.get(key))
if result.columns.is_unique:
result = result[key]
| Change `# duplicate columns & possible reduce dimensionaility` error message comment to `# duplicate columns & possible reduce dimensionality`.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11313 | 2015-10-13T15:26:45Z | 2015-10-13T15:50:55Z | 2015-10-13T15:50:55Z | 2015-10-13T15:58:47Z |
REGR: change in output formatting for long floats/nan, #11302 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 94f66f8cfc672..d119a27c9cefa 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -34,7 +34,7 @@ API changes
- min and max reductions on ``datetime64`` and ``timedelta64`` dtyped series now
result in ``NaT`` and not ``nan`` (:issue:`11245`).
-
+- Regression from 0.16.2 for output formatting of long floats/nan, restored in (:issue:`11302`)
- Prettyprinting sets (e.g. in DataFrame cells) now uses set literal syntax (``{x, y}``) instead of
Legacy Python syntax (``set([x, y])``) (:issue:`11215`)
diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 51f6c7043817f..c8c834180c9f6 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -435,7 +435,15 @@ def _astype(self, dtype, copy=False, raise_on_error=True, values=None,
if values is None:
if issubclass(dtype.type, (compat.text_type, compat.string_types)):
- values = self.to_native_types()
+
+ # use native type formatting for datetime/tz/timedelta
+ if self.is_datelike:
+ values = self.to_native_types()
+
+ # astype formatting
+ else:
+ values = self.values
+
else:
values = self.get_values(dtype=dtype)
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index eb88fec716627..8a9afcb7d1291 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4621,6 +4621,7 @@ def test_astype_str(self):
df = DataFrame({'a' : a, 'b' : b, 'c' : c, 'd' : d, 'e' : e})
+ # datetimelike
# Test str and unicode on python 2.x and just str on python 3.x
for tt in set([str, compat.text_type]):
result = df.astype(tt)
@@ -4635,6 +4636,18 @@ def test_astype_str(self):
assert_frame_equal(result, expected)
+ # float/nan
+ # 11302
+ # consistency in astype(str)
+ for tt in set([str, compat.text_type]):
+ result = DataFrame([np.NaN]).astype(tt)
+ expected = DataFrame(['nan'])
+ assert_frame_equal(result, expected)
+
+ result = DataFrame([1.12345678901234567890]).astype(tt)
+ expected = DataFrame(['1.12345678901'])
+ assert_frame_equal(result, expected)
+
def test_array_interface(self):
result = np.sqrt(self.frame)
tm.assertIsInstance(result, type(self.frame))
| closes #11302
| https://api.github.com/repos/pandas-dev/pandas/pulls/11309 | 2015-10-13T11:43:48Z | 2015-10-13T13:57:40Z | 2015-10-13T13:57:40Z | 2015-10-13T13:57:40Z |
Checking for length of categories before doing string conversion. fix… | diff --git a/asv_bench/benchmarks/categoricals.py b/asv_bench/benchmarks/categoricals.py
index a0f9383336940..d32c19d6d0bb8 100644
--- a/asv_bench/benchmarks/categoricals.py
+++ b/asv_bench/benchmarks/categoricals.py
@@ -1,6 +1,7 @@
from .pandas_vb_common import *
import string
+
class concat_categorical(object):
goal_time = 0.2
@@ -26,6 +27,7 @@ def time_value_counts(self):
def time_value_counts_dropna(self):
self.ts.value_counts(dropna=True)
+
class categorical_constructor(object):
goal_time = 0.2
@@ -43,3 +45,16 @@ def time_regular_constructor(self):
def time_fastpath(self):
Categorical(self.codes, self.cat_idx, fastpath=True)
+
+class categorical_rendering(object):
+ goal_time = 3e-3
+
+ def setup(self):
+ n = 1000
+ items = [str(i) for i in range(n)]
+ s = pd.Series(items, dtype='category')
+ df = pd.DataFrame({'C': s, 'data': np.random.randn(n)})
+ self.data = df[df.C == '20']
+
+ def time_rendering(self):
+ str(self.data.C)
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 94f66f8cfc672..bd12050efbd12 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -3,7 +3,7 @@
v0.17.1 (November ??, 2015)
---------------------------
-This is a minor bug-fix release from 0.17.0 and includes a a large number of
+This is a minor bug-fix release from 0.17.0 and includes a large number of
bug fixes along several new features, enhancements, and performance improvements.
We recommend that all users upgrade to this version.
@@ -50,6 +50,8 @@ Performance Improvements
.. _whatsnew_0171.bug_fixes:
+- Performance bug in ``Categorical._repr_categories`` was rendering string before chopping them for display (:issue: `11305`)
+
Bug Fixes
~~~~~~~~~
@@ -65,7 +67,6 @@ Bug Fixes
-
- Bug in ``squeeze()`` with zero length arrays (:issue:`11230`, :issue:`8999`)
diff --git a/pandas/core/categorical.py b/pandas/core/categorical.py
index 9decd5e212cbf..8068ad785b6d8 100644
--- a/pandas/core/categorical.py
+++ b/pandas/core/categorical.py
@@ -1389,12 +1389,13 @@ def _repr_categories(self):
max_categories = (10 if get_option("display.max_categories") == 0
else get_option("display.max_categories"))
from pandas.core import format as fmt
- category_strs = fmt.format_array(self.categories, None)
- if len(category_strs) > max_categories:
+ if len(self.categories) > max_categories:
num = max_categories // 2
- head = category_strs[:num]
- tail = category_strs[-(max_categories - num):]
+ head = fmt.format_array(self.categories[:num], None)
+ tail = fmt.format_array(self.categories[-num:], None)
category_strs = head + ["..."] + tail
+ else:
+ category_strs = fmt.format_array(self.categories, None)
# Strip all leading spaces, which format_array adds for columns...
category_strs = [x.strip() for x in category_strs]
| closes #11305
| https://api.github.com/repos/pandas-dev/pandas/pulls/11306 | 2015-10-13T02:04:54Z | 2015-11-13T15:13:19Z | null | 2015-11-13T15:13:19Z |
DOC Use plot.<kind> instead of plot(kind=<kind>) GH11043 | diff --git a/doc/source/visualization.rst b/doc/source/visualization.rst
index b6ee2d83fd131..b60996cd79d8e 100644
--- a/doc/source/visualization.rst
+++ b/doc/source/visualization.rst
@@ -135,7 +135,16 @@ These include:
* :ref:`'hexbin' <visualization.hexbin>` for hexagonal bin plots
* :ref:`'pie' <visualization.pie>` for pie plots
-.. versionadded:: 0.17
+For example, a bar plot can be created the following way:
+
+.. ipython:: python
+
+ plt.figure();
+
+ @savefig bar_plot_ex.png
+ df.ix[5].plot(kind='bar'); plt.axhline(0, color='k')
+
+.. versionadded:: 0.17.0
You can also create these other plots using the methods ``DataFrame.plot.<kind>`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use:
@@ -178,9 +187,9 @@ For labeled, non-time series data, you may wish to produce a bar plot:
plt.figure();
@savefig bar_plot_ex.png
- df.ix[5].plot(kind='bar'); plt.axhline(0, color='k')
+ df.ix[5].plot.bar(); plt.axhline(0, color='k')
-Calling a DataFrame's :meth:`~DataFrame.plot` method with ``kind='bar'`` produces a multiple
+Calling a DataFrame's :meth:`~DataFrame.plot.bar` method produces a multiple
bar plot:
.. ipython:: python
@@ -195,7 +204,7 @@ bar plot:
df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
@savefig bar_plot_multi_ex.png
- df2.plot(kind='bar');
+ df2.plot.bar();
To produce a stacked bar plot, pass ``stacked=True``:
@@ -208,9 +217,9 @@ To produce a stacked bar plot, pass ``stacked=True``:
.. ipython:: python
@savefig bar_plot_stacked_ex.png
- df2.plot(kind='bar', stacked=True);
+ df2.plot.bar(stacked=True);
-To get horizontal bar plots, pass ``kind='barh'``:
+To get horizontal bar plots, use the ``barh`` method:
.. ipython:: python
:suppress:
@@ -221,7 +230,7 @@ To get horizontal bar plots, pass ``kind='barh'``:
.. ipython:: python
@savefig barh_plot_stacked_ex.png
- df2.plot(kind='barh', stacked=True);
+ df2.plot.barh(stacked=True);
.. _visualization.hist:
@@ -230,7 +239,7 @@ Histograms
.. versionadded:: 0.15.0
-Histogram can be drawn specifying ``kind='hist'``.
+Histogram can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Series.plot.hist` methods.
.. ipython:: python
@@ -240,7 +249,7 @@ Histogram can be drawn specifying ``kind='hist'``.
plt.figure();
@savefig hist_new.png
- df4.plot(kind='hist', alpha=0.5)
+ df4.plot.hist(alpha=0.5)
.. ipython:: python
@@ -255,7 +264,7 @@ Histogram can be stacked by ``stacked=True``. Bin size can be changed by ``bins`
plt.figure();
@savefig hist_new_stacked.png
- df4.plot(kind='hist', stacked=True, bins=20)
+ df4.plot.hist(stacked=True, bins=20)
.. ipython:: python
:suppress:
@@ -269,7 +278,7 @@ You can pass other keywords supported by matplotlib ``hist``. For example, horiz
plt.figure();
@savefig hist_new_kwargs.png
- df4['a'].plot(kind='hist', orientation='horizontal', cumulative=True)
+ df4['a'].plot.hist(orientation='horizontal', cumulative=True)
.. ipython:: python
:suppress:
@@ -329,12 +338,10 @@ The ``by`` keyword can be specified to plot grouped histograms:
Box Plots
~~~~~~~~~
-Boxplot can be drawn calling a ``Series`` and ``DataFrame.plot`` with ``kind='box'``,
-or ``DataFrame.boxplot`` to visualize the distribution of values within each column.
-
.. versionadded:: 0.15.0
-``plot`` method now supports ``kind='box'`` to draw boxplot.
+Boxplot can be drawn calling :meth:`Series.plot.box` and :meth:`DataFrame.plot.box`,
+or :meth:`DataFrame.boxplot` to visualize the distribution of values within each column.
For instance, here is a boxplot representing five trials of 10 observations of
a uniform random variable on [0,1).
@@ -350,7 +357,7 @@ a uniform random variable on [0,1).
df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E'])
@savefig box_plot_new.png
- df.plot(kind='box')
+ df.plot.box()
Boxplot can be colorized by passing ``color`` keyword. You can pass a ``dict``
whose keys are ``boxes``, ``whiskers``, ``medians`` and ``caps``.
@@ -371,7 +378,7 @@ more complicated colorization, you can get each drawn artists by passing
medians='DarkBlue', caps='Gray')
@savefig box_new_colorize.png
- df.plot(kind='box', color=color, sym='r+')
+ df.plot.box(color=color, sym='r+')
.. ipython:: python
:suppress:
@@ -385,7 +392,7 @@ For example, horizontal and custom-positioned boxplot can be drawn by
.. ipython:: python
@savefig box_new_kwargs.png
- df.plot(kind='box', vert=False, positions=[1, 4, 5, 6, 8])
+ df.plot.box(vert=False, positions=[1, 4, 5, 6, 8])
See the :meth:`boxplot <matplotlib.axes.Axes.boxplot>` method and the
@@ -464,7 +471,7 @@ When ``subplots=False`` / ``by`` is ``None``:
* if ``return_type`` is ``'dict'``, a dictionary containing the :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned. The keys are "boxes", "caps", "fliers", "medians", and "whiskers".
This is the default of ``boxplot`` in historical reason.
- Note that ``plot(kind='box')`` returns ``Axes`` as default as the same as other plots.
+ Note that ``plot.box()`` returns ``Axes`` by default same as other plots.
* if ``return_type`` is ``'axes'``, a :class:`matplotlib Axes <matplotlib.axes.Axes>` containing the boxplot is returned.
* if ``return_type`` is ``'both'`` a namedtuple containging the :class:`matplotlib Axes <matplotlib.axes.Axes>`
and :class:`matplotlib Lines <matplotlib.lines.Line2D>` is returned
@@ -516,7 +523,8 @@ Area Plot
.. versionadded:: 0.14
-You can create area plots with ``Series.plot`` and ``DataFrame.plot`` by passing ``kind='area'``. Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values.
+You can create area plots with :meth:`Series.plot.area` and :meth:`DataFrame.plot.area`.
+Area plots are stacked by default. To produce stacked area plot, each column must be either all positive or all negative values.
When input data contains `NaN`, it will be automatically filled by 0. If you want to drop or fill by different values, use :func:`dataframe.dropna` or :func:`dataframe.fillna` before calling `plot`.
@@ -531,7 +539,7 @@ When input data contains `NaN`, it will be automatically filled by 0. If you wan
df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd'])
@savefig area_plot_stacked.png
- df.plot(kind='area');
+ df.plot.area();
To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 unless otherwise specified:
@@ -544,7 +552,7 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5
.. ipython:: python
@savefig area_plot_unstacked.png
- df.plot(kind='area', stacked=False);
+ df.plot.area(stacked=False);
.. _visualization.scatter:
@@ -553,7 +561,7 @@ Scatter Plot
.. versionadded:: 0.13
-You can create scatter plots with ``DataFrame.plot`` by passing ``kind='scatter'``.
+Scatter plot can be drawn by using the :meth:`DataFrame.plot.scatter` method.
Scatter plot requires numeric columns for x and y axis.
These can be specified by ``x`` and ``y`` keywords each.
@@ -569,18 +577,16 @@ These can be specified by ``x`` and ``y`` keywords each.
df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])
@savefig scatter_plot.png
- df.plot(kind='scatter', x='a', y='b');
+ df.plot.scatter(x='a', y='b');
To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``.
It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups.
.. ipython:: python
- ax = df.plot(kind='scatter', x='a', y='b',
- color='DarkBlue', label='Group 1');
+ ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1');
@savefig scatter_plot_repeated.png
- df.plot(kind='scatter', x='c', y='d',
- color='DarkGreen', label='Group 2', ax=ax);
+ df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax);
.. ipython:: python
:suppress:
@@ -593,7 +599,7 @@ each point:
.. ipython:: python
@savefig scatter_plot_colored.png
- df.plot(kind='scatter', x='a', y='b', c='c', s=50);
+ df.plot.scatter(x='a', y='b', c='c', s=50);
.. ipython:: python
@@ -607,7 +613,7 @@ Below example shows a bubble chart using a dataframe column values as bubble siz
.. ipython:: python
@savefig scatter_plot_bubble.png
- df.plot(kind='scatter', x='a', y='b', s=df['c']*200);
+ df.plot.scatter(x='a', y='b', s=df['c']*200);
.. ipython:: python
:suppress:
@@ -624,8 +630,7 @@ Hexagonal Bin Plot
.. versionadded:: 0.14
-You can create hexagonal bin plots with :meth:`DataFrame.plot` and
-``kind='hexbin'``.
+You can create hexagonal bin plots with :meth:`DataFrame.plot.hexbin`.
Hexbin plots can be a useful alternative to scatter plots if your data are
too dense to plot each point individually.
@@ -641,7 +646,7 @@ too dense to plot each point individually.
df['b'] = df['b'] + np.arange(1000)
@savefig hexbin_plot.png
- df.plot(kind='hexbin', x='a', y='b', gridsize=25)
+ df.plot.hexbin(x='a', y='b', gridsize=25)
A useful keyword argument is ``gridsize``; it controls the number of hexagons
@@ -670,7 +675,7 @@ given by column ``z``. The bins are aggregated with numpy's ``max`` function.
df['z'] = np.random.uniform(0, 3, 1000)
@savefig hexbin_plot_agg.png
- df.plot(kind='hexbin', x='a', y='b', C='z', reduce_C_function=np.max,
+ df.plot.hexbin(x='a', y='b', C='z', reduce_C_function=np.max,
gridsize=25)
.. ipython:: python
@@ -688,7 +693,7 @@ Pie plot
.. versionadded:: 0.14
-You can create a pie plot with :meth:`DataFrame.plot` or :meth:`Series.plot` with ``kind='pie'``.
+You can create a pie plot with :meth:`DataFrame.plot.pie` or :meth:`Series.plot.pie`.
If your data includes any ``NaN``, they will be automatically filled with 0.
A ``ValueError`` will be raised if there are any negative values in your data.
@@ -703,7 +708,7 @@ A ``ValueError`` will be raised if there are any negative values in your data.
series = pd.Series(3 * np.random.rand(4), index=['a', 'b', 'c', 'd'], name='series')
@savefig series_pie_plot.png
- series.plot(kind='pie', figsize=(6, 6))
+ series.plot.pie(figsize=(6, 6))
.. ipython:: python
:suppress:
@@ -730,7 +735,7 @@ A legend will be drawn in each pie plots by default; specify ``legend=False`` to
df = pd.DataFrame(3 * np.random.rand(4, 2), index=['a', 'b', 'c', 'd'], columns=['x', 'y'])
@savefig df_pie_plot.png
- df.plot(kind='pie', subplots=True, figsize=(8, 4))
+ df.plot.pie(subplots=True, figsize=(8, 4))
.. ipython:: python
:suppress:
@@ -757,8 +762,8 @@ Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used.
.. ipython:: python
@savefig series_pie_plot_options.png
- series.plot(kind='pie', labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'],
- autopct='%.2f', fontsize=20, figsize=(6, 6))
+ series.plot.pie(labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'],
+ autopct='%.2f', fontsize=20, figsize=(6, 6))
If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle.
@@ -773,7 +778,7 @@ If you pass values whose sum total is less than 1.0, matplotlib draws a semicirc
series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2')
@savefig series_pie_plot_semi.png
- series.plot(kind='pie', figsize=(6, 6))
+ series.plot.pie(figsize=(6, 6))
See the `matplotlib pie documentation <http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.pie>`__ for more.
@@ -863,8 +868,7 @@ Density Plot
.. versionadded:: 0.8.0
-You can create density plots using the Series/DataFrame.plot and
-setting ``kind='kde'``:
+You can create density plots using the :meth:`Series.plot.kde` and :meth:`DataFrame.plot.kde` methods.
.. ipython:: python
:suppress:
@@ -877,7 +881,7 @@ setting ``kind='kde'``:
ser = pd.Series(np.random.randn(1000))
@savefig kde_plot.png
- ser.plot(kind='kde')
+ ser.plot.kde()
.. ipython:: python
:suppress:
@@ -1392,7 +1396,7 @@ Here is an example of one way to easily plot group means with standard deviation
# Plot
fig, ax = plt.subplots()
@savefig errorbar_example.png
- means.plot(yerr=errors, ax=ax, kind='bar')
+ means.plot.bar(yerr=errors, ax=ax)
.. ipython:: python
:suppress:
@@ -1532,7 +1536,7 @@ Colormaps can also be used other plot types, like bar charts:
plt.figure()
@savefig greens.png
- dd.plot(kind='bar', colormap='Greens')
+ dd.plot.bar(colormap='Greens')
.. ipython:: python
:suppress:
| closes #11043
I modified the visualization documentation to use plot.<kind> instead of plot(kind=<kind>) in the examples as explained by [issue 11043](https://github.com/pydata/pandas/issues/11043).
| https://api.github.com/repos/pandas-dev/pandas/pulls/11303 | 2015-10-12T20:11:24Z | 2016-02-12T03:10:54Z | null | 2016-02-15T19:22:24Z |
BUG: Bug in tz-conversions with an ambiguous time and .dt accessors, #11295 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index e4372ad1d23b8..79eef77b74ff7 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -55,8 +55,12 @@ Bug Fixes
- Bug in ``.to_latex()`` output broken when the index has a name (:issue: `10660`)
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
+
- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
+- Bug in tz-conversions with an ambiguous time and ``.dt`` accessors (:issues:`11295`)
+
+
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 81ebc7efdbdd9..f7d93a978a46a 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -3348,6 +3348,15 @@ def test_construction_with_alt(self):
i2 = DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz=i.dtype.tz)
self.assert_index_equal(i, i2)
+ # localize into the provided tz
+ i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
+ expected = i.tz_localize(None).tz_localize('UTC')
+ self.assert_index_equal(i2, expected)
+
+ i2 = DatetimeIndex(i, tz='UTC')
+ expected = i.tz_convert('UTC')
+ self.assert_index_equal(i2, expected)
+
# incompat tz/dtype
self.assertRaises(ValueError, lambda : DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 3a13af60ae86f..9c86c3f894c67 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -224,6 +224,18 @@ def get_dir(s):
results = get_dir(s)
tm.assert_almost_equal(results, list(sorted(set(ok_for_period + ok_for_period_methods))))
+ # 11295
+ # ambiguous time error on the conversions
+ s = Series(pd.date_range('2015-01-01', '2016-01-01', freq='T'))
+ s = s.dt.tz_localize('UTC').dt.tz_convert('America/Chicago')
+ results = get_dir(s)
+ tm.assert_almost_equal(results, list(sorted(set(ok_for_dt + ok_for_dt_methods))))
+ expected = Series(pd.date_range('2015-01-01',
+ '2016-01-01',
+ freq='T',
+ tz='UTC').tz_convert('America/Chicago'))
+ tm.assert_series_equal(s, expected)
+
# no setting allowed
s = Series(date_range('20130101',periods=5,freq='D'))
with tm.assertRaisesRegexp(ValueError, "modifications"):
diff --git a/pandas/tseries/common.py b/pandas/tseries/common.py
index ba9f2b8343a3d..dcfe809074a0b 100644
--- a/pandas/tseries/common.py
+++ b/pandas/tseries/common.py
@@ -45,8 +45,10 @@ def maybe_to_datetimelike(data, copy=False):
raise TypeError("cannot convert an object of type {0} to a datetimelike index".format(type(data)))
index = data.index
- if is_datetime64_dtype(data.dtype) or is_datetime64tz_dtype(data.dtype):
+ if is_datetime64_dtype(data.dtype):
return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer'), index, name=data.name)
+ elif is_datetime64tz_dtype(data.dtype):
+ return DatetimeProperties(DatetimeIndex(data, copy=copy, freq='infer', ambiguous='infer'), index, name=data.name)
elif is_timedelta64_dtype(data.dtype):
return TimedeltaProperties(TimedeltaIndex(data, copy=copy, freq='infer'), index, name=data.name)
else:
diff --git a/pandas/tseries/frequencies.py b/pandas/tseries/frequencies.py
index 6a4257d101473..07546a76be431 100644
--- a/pandas/tseries/frequencies.py
+++ b/pandas/tseries/frequencies.py
@@ -851,10 +851,11 @@ def infer_freq(index, warn=True):
raise TypeError("cannot infer freq from a non-convertible index type {0}".format(type(index)))
index = index.values
- try:
- index = pd.DatetimeIndex(index)
- except AmbiguousTimeError:
- index = pd.DatetimeIndex(index.asi8)
+ if not isinstance(index, pd.DatetimeIndex):
+ try:
+ index = pd.DatetimeIndex(index)
+ except AmbiguousTimeError:
+ index = pd.DatetimeIndex(index.asi8)
inferer = _FrequencyInferer(index, warn=warn)
return inferer.get_freq()
diff --git a/pandas/tseries/index.py b/pandas/tseries/index.py
index 8f6b924ebd850..814a9ccc45582 100644
--- a/pandas/tseries/index.py
+++ b/pandas/tseries/index.py
@@ -194,7 +194,8 @@ def _join_i8_wrapper(joinf, **kwargs):
_datetimelike_ops = ['year','month','day','hour','minute','second',
'weekofyear','week','dayofweek','weekday','dayofyear','quarter', 'days_in_month', 'daysinmonth',
'date','time','microsecond','nanosecond','is_month_start','is_month_end',
- 'is_quarter_start','is_quarter_end','is_year_start','is_year_end','tz','freq']
+ 'is_quarter_start','is_quarter_end','is_year_start','is_year_end',
+ 'tz','freq']
_is_numeric_dtype = False
@@ -269,14 +270,7 @@ def __new__(cls, data=None,
dayfirst=dayfirst,
yearfirst=yearfirst)
- if is_datetimetz(data):
- # extract the data whether a Series or Index
- if isinstance(data, ABCSeries):
- data = data._values
- tz = data.tz
- data = data.tz_localize(None, ambiguous='infer').values
-
- if issubclass(data.dtype.type, np.datetime64):
+ if issubclass(data.dtype.type, np.datetime64) or is_datetimetz(data):
if isinstance(data, ABCSeries):
data = data._values
if isinstance(data, DatetimeIndex):
| closes #11295
| https://api.github.com/repos/pandas-dev/pandas/pulls/11301 | 2015-10-12T15:45:45Z | 2015-10-13T12:25:28Z | 2015-10-13T12:25:27Z | 2015-10-13T12:25:28Z |
BUG: GH10355 groupby std() no longer sqrts grouping cols | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index add5080a69ee4..9b514a29afd79 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -789,7 +789,12 @@ def std(self, ddof=1):
For multiple groupings, the result index will be a MultiIndex
"""
# todo, implement at cython level?
- return np.sqrt(self.var(ddof=ddof))
+ if ddof == 1:
+ return self._cython_agg_general('std')
+ else:
+ self._set_selection_from_grouper()
+ f = lambda x: x.std(ddof=ddof)
+ return self._python_agg_general(f)
def var(self, ddof=1):
"""
@@ -1467,6 +1472,10 @@ def get_group_levels(self):
#------------------------------------------------------------
# Aggregation functions
+ def _cython_std(group_var, out, b, c, d):
+ group_var(out, b, c, d)
+ out **= 0.5 # needs to be applied in place
+
_cython_functions = {
'add': 'group_add',
'prod': 'group_prod',
@@ -1477,6 +1486,10 @@ def get_group_levels(self):
'name': 'group_median'
},
'var': 'group_var',
+ 'std': {
+ 'name': 'group_var',
+ 'f': _cython_std,
+ },
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d: func(a, b, c, d, 1)
@@ -1512,7 +1525,7 @@ def get_func(fname):
# a sub-function
f = ftype.get('f')
- if f is not None:
+ if f is not None and afunc is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 8eb641ce8f494..078f32470161b 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -5545,6 +5545,36 @@ def test_nunique_with_object(self):
expected = pd.Series([1] * 5, name='name', index=index)
tm.assert_series_equal(result, expected)
+ def test_std_with_as_index_false(self):
+ # GH 10355
+ df = pd.DataFrame({
+ 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ 'b': [1, 2, 3, 4, 5, 6, 7, 8, 9],
+ })
+ sd = df.groupby('a', as_index=False).std()
+
+ expected = pd.DataFrame({
+ 'a': [1, 2, 3],
+ 'b': [1, 1, 1],
+ })
+ tm.assert_frame_equal(expected, sd)
+
+ def test_std_with_ddof(self):
+ df = pd.DataFrame({
+ 'a': [1, 1, 1, 2, 2, 2, 3, 3, 3],
+ 'b': [1, 2, 3, 1, 5, 6, 7, 8, 10],
+ })
+ sd = df.groupby('a', as_index=False).std(ddof=0)
+
+ expected = pd.DataFrame({
+ 'a': [1, 2, 3],
+ 'b': [
+ np.std([1, 2, 3], ddof=0),
+ np.std([1, 5, 6], ddof=0),
+ np.std([7, 8, 10], ddof=0)],
+ })
+ tm.assert_frame_equal(expected, sd)
+
def assert_fp_equal(a, b):
assert (np.abs(a - b) < 1e-12).all()
| closes #10355
Previously grouping columns were square rooted when as_index=False
New method closely follows the format of var() method.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11300 | 2015-10-12T14:40:57Z | 2015-11-05T12:57:18Z | null | 2019-02-14T05:23:36Z |
PERF: Checking monotonic-ness before sorting on an index #11080 | diff --git a/asv_bench/benchmarks/frame_methods.py b/asv_bench/benchmarks/frame_methods.py
index 9bece56e15c90..a04a9d0814a30 100644
--- a/asv_bench/benchmarks/frame_methods.py
+++ b/asv_bench/benchmarks/frame_methods.py
@@ -930,6 +930,16 @@ def time_frame_xs_row(self):
self.df.xs(50000)
+class frame_sort_index(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.df = DataFrame(randn(1000000, 2), columns=list('AB'))
+
+ def time_frame_sort_index(self):
+ self.df.sort_index()
+
+
class series_string_vector_slice(object):
goal_time = 0.2
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 507c829e5763f..945840184a00c 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -52,6 +52,8 @@ Deprecations
Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
+- Checking monotonic-ness before sorting on an index (:issue:`11080`)
+
.. _whatsnew_0171.bug_fixes:
Bug Fixes
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 920d9ad96c5b6..e92de770ac4bd 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -3157,6 +3157,15 @@ def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
else:
from pandas.core.groupby import _nargsort
+ # GH11080 - Check monotonic-ness before sort an index
+ # if monotonic (already sorted), return None or copy() according to 'inplace'
+ if (ascending and labels.is_monotonic_increasing) or \
+ (not ascending and labels.is_monotonic_decreasing):
+ if inplace:
+ return
+ else:
+ return self.copy()
+
indexer = _nargsort(labels, kind=kind, ascending=ascending,
na_position=na_position)
| closes #11080
Worked for this during PyCon JP 2015 pandas sprint /w @sinhrks
I found `test_frame.py:TestDataFrame.test_sort_index` cover this change.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11294 | 2015-10-12T08:26:46Z | 2015-10-14T10:56:26Z | 2015-10-14T10:56:26Z | 2015-10-14T10:56:38Z |
TST: Add hex color strings test | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 1eff7d01d9d91..5696d8d7416fc 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -64,7 +64,7 @@ Bug Fixes
-
+- Bug in ``DataFrame.plot`` cannot use hex strings colors (:issue:`10299`)
diff --git a/pandas/tests/test_graphics.py b/pandas/tests/test_graphics.py
index b2d8ff8ba0b00..83b76393f30e0 100644
--- a/pandas/tests/test_graphics.py
+++ b/pandas/tests/test_graphics.py
@@ -2689,6 +2689,18 @@ def test_line_colors(self):
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
+ # GH 10299
+ custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
+ ax = df.plot(color=custom_colors)
+ self._check_colors(ax.get_lines(), linecolors=custom_colors)
+ tm.close()
+
+ with tm.assertRaises(ValueError):
+ # Color contains shorthand hex value results in ValueError
+ custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
+ # Forced show plot
+ _check_plot_works(df.plot, color=custom_colors)
+
@slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
@@ -2725,6 +2737,20 @@ def test_line_colors_and_styles_subplots(self):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
+ # GH 10299
+ custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
+ axes = df.plot(color=custom_colors, subplots=True)
+ for ax, c in zip(axes, list(custom_colors)):
+ self._check_colors(ax.get_lines(), linecolors=[c])
+ tm.close()
+
+ with tm.assertRaises(ValueError):
+ # Color contains shorthand hex value results in ValueError
+ custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
+ # Forced show plot
+ _check_plot_works(df.plot, color=custom_colors, subplots=True,
+ filterwarnings='ignore')
+
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
| It's already resolved. I added hex string color test.
closes #10299
| https://api.github.com/repos/pandas-dev/pandas/pulls/11293 | 2015-10-12T07:40:01Z | 2015-10-15T14:20:58Z | 2015-10-15T14:20:58Z | 2015-10-15T14:45:02Z |
Fix mistake in Pytables querying with numpy scalar value. Fixes #11283 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 74ace42eb7e22..47a83e9894521 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -53,7 +53,7 @@ Bug Fixes
- Bug in ``.to_latex()`` output broken when the index has a name (:issue: `10660`)
- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
-
+- Bug in ``HDFStore.select`` when comparing with a numpy scalar in a where clause (:issue:`11283`)
diff --git a/pandas/computation/pytables.py b/pandas/computation/pytables.py
index bc4e60f70f2b4..1bc5b8b723657 100644
--- a/pandas/computation/pytables.py
+++ b/pandas/computation/pytables.py
@@ -129,7 +129,7 @@ def conform(self, rhs):
""" inplace conform rhs """
if not com.is_list_like(rhs):
rhs = [rhs]
- if hasattr(self.rhs, 'ravel'):
+ if isinstance(rhs, np.ndarray):
rhs = rhs.ravel()
return rhs
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index 167170f7cd7c5..6c78f9cf3937c 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -3049,6 +3049,18 @@ def test_select_dtypes(self):
result = store.select(
'df4', where='values>2.0')
tm.assert_frame_equal(expected, result)
+
+ # test selection with comparison against numpy scalar
+ # GH 11283
+ with ensure_clean_store(self.path) as store:
+ df = tm.makeDataFrame()
+
+ expected = df[df['A']>0]
+
+ store.append('df', df, data_columns=True)
+ np_zero = np.float64(0)
+ result = store.select('df', where=["A>np_zero"])
+ tm.assert_frame_equal(expected, result)
def test_select_with_many_inputs(self):
| Fixes #11283
First PR to this project. The fix is minuscule, but let me know if I did anything awry.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11291 | 2015-10-12T03:13:05Z | 2015-10-13T11:49:26Z | 2015-10-13T11:49:26Z | 2015-10-13T11:49:29Z |
ENH: Improvement to the BigQuery streaming insert failure message #11285 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 668873e838597..41a3ea9347a04 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -20,6 +20,8 @@ Enhancements
.. _whatsnew_0171.enhancements.other:
+- Improve the error message in :func:`pandas.io.gbq.to_gbq` when a streaming insert fails (:issue:`11285`)
+
Other Enhancements
^^^^^^^^^^^^^^^^^^
diff --git a/pandas/io/gbq.py b/pandas/io/gbq.py
index e9568db06f391..e7241036b94c4 100644
--- a/pandas/io/gbq.py
+++ b/pandas/io/gbq.py
@@ -185,7 +185,8 @@ def process_insert_errors(insert_errors, verbose):
for error in errors:
reason = error['reason']
message = error['message']
- error_message = 'Error at Row: {0}, Reason: {1}, Message: {2}'.format(row, reason, message)
+ location = error['location']
+ error_message = 'Error at Row: {0}, Reason: {1}, Location: {2}, Message: {3}'.format(row, reason, location, message)
# Report all error messages if verbose is set
if verbose:
| closes #11285
This is a minor change to include `'Location'` in the error message reported when a streaming insert fails using the `df.to_gbq()`
| https://api.github.com/repos/pandas-dev/pandas/pulls/11286 | 2015-10-11T12:03:18Z | 2015-10-11T15:40:28Z | 2015-10-11T15:40:28Z | 2015-10-14T12:00:45Z |
DOC: whatsnew fixes | diff --git a/doc/source/whatsnew/v0.12.0.txt b/doc/source/whatsnew/v0.12.0.txt
index fd726af3912f0..4c7d799ec5202 100644
--- a/doc/source/whatsnew/v0.12.0.txt
+++ b/doc/source/whatsnew/v0.12.0.txt
@@ -191,7 +191,7 @@ I/O Enhancements
df = DataFrame({'a': range(3), 'b': list('abc')})
print(df)
html = df.to_html()
- alist = pd.read_html(html, infer_types=True, index_col=0)
+ alist = pd.read_html(html, index_col=0)
print(df == alist[0])
Note that ``alist`` here is a Python ``list`` so ``pd.read_html()`` and
@@ -252,19 +252,31 @@ I/O Enhancements
- Iterator support via ``read_hdf`` that automatically opens and closes the
store when iteration is finished. This is only for *tables*
- .. ipython:: python
- :okwarning:
+ .. code-block:: python
- path = 'store_iterator.h5'
- DataFrame(randn(10,2)).to_hdf(path,'df',table=True)
- for df in read_hdf(path,'df', chunksize=3):
- print(df)
+ In [25]: path = 'store_iterator.h5'
+
+ In [26]: DataFrame(randn(10,2)).to_hdf(path,'df',table=True)
+
+ In [27]: for df in read_hdf(path,'df', chunksize=3):
+ ....: print df
+ ....:
+ 0 1
+ 0 0.713216 -0.778461
+ 1 -0.661062 0.862877
+ 2 0.344342 0.149565
+ 0 1
+ 3 -0.626968 -0.875772
+ 4 -0.930687 -0.218983
+ 5 0.949965 -0.442354
+ 0 1
+ 6 -0.402985 1.111358
+ 7 -0.241527 -0.670477
+ 8 0.049355 0.632633
+ 0 1
+ 9 -1.502767 -1.225492
- .. ipython:: python
- :suppress:
- import os
- os.remove(path)
- ``read_csv`` will now throw a more informative error message when a file
contains no columns, e.g., all newline characters
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 58973d491e263..0b2bb0b5a475c 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -517,7 +517,7 @@ Other enhancements
- Allow passing `kwargs` to the interpolation methods (:issue:`10378`).
-- Improved error message when concatenating an empty iterable of ``Dataframe``s (:issue:`9157`)
+- Improved error message when concatenating an empty iterable of ``Dataframe`` objects (:issue:`9157`)
- ``pd.read_csv`` can now read bz2-compressed files incrementally, and the C parser can read bz2-compressed files from AWS S3 (:issue:`11070`, :issue:`11072`).
| Some fixes I applied to the v0.17.0 docs I uploaded (older whatsnew was failing due to changes in 0.17)
| https://api.github.com/repos/pandas-dev/pandas/pulls/11280 | 2015-10-10T10:12:33Z | 2015-10-10T10:12:54Z | 2015-10-10T10:12:54Z | 2015-10-10T10:12:54Z |
PERF: use .values in index difference | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index d8e92b41c6593..58b136827a62b 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -136,6 +136,7 @@ Performance Improvements
~~~~~~~~~~~~~~~~~~~~~~~~
- Checking monotonic-ness before sorting on an index (:issue:`11080`)
+- Performance improvements in ``Index.difference``, particularly for ``PeriodIndex`` (:issue:`11278`)
- ``Series.dropna`` performance improvement when its dtype can't contain ``NaN`` (:issue:`11159`)
- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
- Release the GIL on some rolling algos: ``rolling_median``, ``rolling_mean``, ``rolling_max``, ``rolling_min``, ``rolling_var``, ``rolling_kurt``, ``rolling_skew`` (:issue:`11450`)
diff --git a/pandas/core/index.py b/pandas/core/index.py
index fa23f2e1efe3f..d1f421fffb252 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -1711,12 +1711,12 @@ def difference(self, other):
self._assert_can_do_setop(other)
if self.equals(other):
- return Index([], name=self.name)
+ return self._shallow_copy([])
other, result_name = self._convert_can_do_setop(other)
- theDiff = sorted(set(self) - set(other))
- return Index(theDiff, name=result_name)
+ diff = sorted(set(self.values) - set(other.values))
+ return self._shallow_copy(diff, name=result_name)
diff = deprecate('diff', difference)
| The existing `.difference` method 'unboxed' all the objects, which has a severe performance impact on `PeriodIndex` in particular.
``` python
In [3]: long_index = pd.period_range(start='2000', freq='s', periods=1000)
In [4]: empty_index = pd.PeriodIndex([],freq='s')
In [24]: %timeit long_index.difference(empty_index)
# existing:
1 loops, best of 1: 1.02 s per loop
# updated:
1000 loops, best of 3: 538 µs per loop
```
...so around 2000x
I haven't worked with asv or the like - is this a case where a test like that is required?
| https://api.github.com/repos/pandas-dev/pandas/pulls/11279 | 2015-10-10T01:16:40Z | 2016-01-06T17:18:33Z | null | 2016-12-22T05:57:03Z |
Update sql.py | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 721a2c1f350ee..799d1e88260f2 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -309,8 +309,8 @@ def read_sql_table(table_name, con, schema=None, index_col=None,
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
- index_col : string, optional, default: None
- Column to set as index
+ index_col : string or list of strings, optional, default: None
+ Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
@@ -384,8 +384,8 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- index_col : string, optional, default: None
- Column name to use as index for the returned DataFrame object.
+ index_col : string or list of strings, optional, default: None
+ Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
@@ -443,8 +443,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
- index_col : string, optional, default: None
- column name to use as index for the returned DataFrame object.
+ index_col : string or list of strings, optional, default: None
+ Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values to non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
| Fix documentation for index_col parameter to read_sql, read_sql_table and read_sql_query to reflect that a list can be passed.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11277 | 2015-10-09T21:13:48Z | 2015-10-10T10:10:18Z | 2015-10-10T10:10:18Z | 2015-10-10T10:10:18Z |
PERF: Removed the GIL from parts of the TextReader class | diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index fdeace108f76e..2eb6786356511 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -1,15 +1,24 @@
from .pandas_vb_common import *
from pandas.core import common as com
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from io import StringIO
+
try:
from pandas.util.testing import test_parallel
+
have_real_test_parallel = True
except ImportError:
have_real_test_parallel = False
+
def test_parallel(num_threads=1):
def wrapper(fname):
return fname
+
return wrapper
@@ -321,6 +330,7 @@ def run(arr):
algos.kth_smallest(arr, self.k)
run()
+
class nogil_datetime_fields(object):
goal_time = 0.2
@@ -435,4 +445,47 @@ def time_nogil_rolling_std(self):
@test_parallel(num_threads=2)
def run(arr, win):
rolling_std(arr, win)
- run(self.arr, self.win)
\ No newline at end of file
+ run(self.arr, self.win)
+
+
+class nogil_read_csv(object):
+ number = 1
+ repeat = 5
+
+ def setup(self):
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+ # Using the values
+ self.df = DataFrame(np.random.randn(10000, 50))
+ self.df.to_csv('__test__.csv')
+
+ self.rng = date_range('1/1/2000', periods=10000)
+ self.df_date_time = DataFrame(np.random.randn(10000, 50), index=self.rng)
+ self.df_date_time.to_csv('__test_datetime__.csv')
+
+ self.df_object = DataFrame('foo', index=self.df.index, columns=self.create_cols('object'))
+ self.df_object.to_csv('__test_object__.csv')
+
+ def create_cols(self, name):
+ return [('%s%03d' % (name, i)) for i in range(5)]
+
+ @test_parallel(num_threads=2)
+ def pg_read_csv(self):
+ read_csv('__test__.csv', sep=',', header=None, float_precision=None)
+
+ def time_nogil_read_csv(self):
+ self.pg_read_csv()
+
+ @test_parallel(num_threads=2)
+ def pg_read_csv_object(self):
+ read_csv('__test_object__.csv', sep=',')
+
+ def time_nogil_read_csv_object(self):
+ self.pg_read_csv_object()
+
+ @test_parallel(num_threads=2)
+ def pg_read_csv_datetime(self):
+ read_csv('__test_datetime__.csv', sep=',', header=None)
+
+ def time_nogil_read_csv_datetime(self):
+ self.pg_read_csv_datetime()
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 3ca56ecc00d36..0d0f4c66c1fec 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -60,6 +60,7 @@ Performance Improvements
- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
- Release the GIL on some srolling algos (``rolling_median``, ``rolling_mean``, ``rolling_max``, ``rolling_min``, ``rolling_var``, ``rolling_kurt``, `rolling_skew`` (:issue:`11450`)
+- Release the GIL when reading and parsing text files in ``read_csv``, ``read_table`` (:issue:`11272`)
- Improved performance of ``rolling_median`` (:issue:`11450`)
- Improved performance to ``to_excel`` (:issue:`11352`)
diff --git a/pandas/parser.pyx b/pandas/parser.pyx
index 8ac1f64f2d50e..f9b8d921f02d1 100644
--- a/pandas/parser.pyx
+++ b/pandas/parser.pyx
@@ -161,7 +161,7 @@ cdef extern from "parser/tokenizer.h":
void *skipset
int64_t skip_first_N_rows
int skip_footer
- double (*converter)(const char *, char **, char, char, char, int)
+ double (*converter)(const char *, char **, char, char, char, int) nogil
# error handling
char *warn_msg
@@ -174,8 +174,8 @@ cdef extern from "parser/tokenizer.h":
int *line_start
int col
- void coliter_setup(coliter_t *it, parser_t *parser, int i, int start)
- void COLITER_NEXT(coliter_t, const char *)
+ void coliter_setup(coliter_t *it, parser_t *parser, int i, int start) nogil
+ void COLITER_NEXT(coliter_t, const char *) nogil
parser_t* parser_new()
@@ -193,26 +193,26 @@ cdef extern from "parser/tokenizer.h":
void debug_print_parser(parser_t *self)
- int tokenize_all_rows(parser_t *self)
- int tokenize_nrows(parser_t *self, size_t nrows)
+ int tokenize_all_rows(parser_t *self) nogil
+ int tokenize_nrows(parser_t *self, size_t nrows) nogil
int64_t str_to_int64(char *p_item, int64_t int_min,
- int64_t int_max, int *error, char tsep)
+ int64_t int_max, int *error, char tsep) nogil
# uint64_t str_to_uint64(char *p_item, uint64_t uint_max, int *error)
double xstrtod(const char *p, char **q, char decimal, char sci,
- char tsep, int skip_trailing)
+ char tsep, int skip_trailing) nogil
double precise_xstrtod(const char *p, char **q, char decimal, char sci,
- char tsep, int skip_trailing)
+ char tsep, int skip_trailing) nogil
double round_trip(const char *p, char **q, char decimal, char sci,
- char tsep, int skip_trailing)
+ char tsep, int skip_trailing) nogil
# inline int to_complex(char *item, double *p_real,
# double *p_imag, char sci, char decimal)
- inline int to_longlong(char *item, long long *p_value)
+ inline int to_longlong(char *item, long long *p_value) nogil
# inline int to_longlong_thousands(char *item, long long *p_value,
# char tsep)
- int to_boolean(const char *item, uint8_t *val)
+ int to_boolean(const char *item, uint8_t *val) nogil
cdef extern from "parser/io.h":
@@ -255,16 +255,19 @@ cdef class TextReader:
cdef:
parser_t *parser
object file_handle, na_fvalues
+ object true_values, false_values
bint na_filter, verbose, has_usecols, has_mi_columns
int parser_start
list clocks
char *c_encoding
+ kh_str_t *false_set
+ kh_str_t *true_set
cdef public:
int leading_cols, table_width, skip_footer, buffer_lines
object allow_leading_cols
object delimiter, converters, delim_whitespace
- object na_values, true_values, false_values
+ object na_values
object memory_map
object as_recarray
object header, orig_header, names, header_start, header_end
@@ -418,11 +421,14 @@ cdef class TextReader:
self.na_values = na_values
if na_fvalues is None:
- na_fvalues = set()
+ na_fvalues = set()
self.na_fvalues = na_fvalues
- self.true_values = _maybe_encode(true_values)
- self.false_values = _maybe_encode(false_values)
+ self.true_values = _maybe_encode(true_values) + _true_values
+ self.false_values = _maybe_encode(false_values) + _false_values
+
+ self.true_set = kset_from_list(self.true_values)
+ self.false_set = kset_from_list(self.false_values)
self.converters = converters
@@ -522,6 +528,8 @@ cdef class TextReader:
def __dealloc__(self):
parser_free(self.parser)
+ kh_destroy_str(self.true_set)
+ kh_destroy_str(self.false_set)
def set_error_bad_lines(self, int status):
self.parser.error_bad_lines = status
@@ -676,10 +684,10 @@ cdef class TextReader:
if hr == self.header[-1]:
lc = len(this_header)
ic = len(self.index_col) if self.index_col is not None else 0
- if lc != unnamed_count and lc-ic > unnamed_count:
- hr -= 1
- self.parser_start -= 1
- this_header = [ None ] * lc
+ if lc != unnamed_count and lc - ic > unnamed_count:
+ hr -= 1
+ self.parser_start -= 1
+ this_header = [None] * lc
data_line = hr + 1
header.append(this_header)
@@ -809,7 +817,8 @@ cdef class TextReader:
cdef _tokenize_rows(self, size_t nrows):
cdef int status
- status = tokenize_nrows(self.parser, nrows)
+ with nogil:
+ status = tokenize_nrows(self.parser, nrows)
if self.parser.warn_msg != NULL:
print >> sys.stderr, self.parser.warn_msg
@@ -836,7 +845,8 @@ cdef class TextReader:
raise ValueError('skip_footer can only be used to read '
'the whole file')
else:
- status = tokenize_all_rows(self.parser)
+ with nogil:
+ status = tokenize_all_rows(self.parser)
if self.parser.warn_msg != NULL:
print >> sys.stderr, self.parser.warn_msg
@@ -1055,9 +1065,6 @@ cdef class TextReader:
bint user_dtype,
kh_str_t *na_hashset,
object na_flist):
- cdef kh_str_t *true_set
- cdef kh_str_t *false_set
-
if dtype[1] == 'i' or dtype[1] == 'u':
result, na_count = _try_int64(self.parser, i, start, end,
na_filter, na_hashset)
@@ -1073,25 +1080,16 @@ cdef class TextReader:
elif dtype[1] == 'f':
result, na_count = _try_double(self.parser, i, start, end,
- na_filter, na_hashset, na_flist)
+ na_filter, na_hashset, na_flist)
if result is not None and dtype[1:] != 'f8':
result = result.astype(dtype)
return result, na_count
elif dtype[1] == 'b':
- if self.true_values is not None or self.false_values is not None:
-
- true_set = kset_from_list(self.true_values + _true_values)
- false_set = kset_from_list(self.false_values + _false_values)
- result, na_count = _try_bool_flex(self.parser, i, start, end,
- na_filter, na_hashset,
- true_set, false_set)
- kh_destroy_str(true_set)
- kh_destroy_str(false_set)
- else:
- result, na_count = _try_bool(self.parser, i, start, end,
- na_filter, na_hashset)
+ result, na_count = _try_bool_flex(self.parser, i, start, end,
+ na_filter, na_hashset,
+ self.true_set, self.false_set)
return result, na_count
elif dtype[1] == 'c':
raise NotImplementedError("the dtype %s is not supported for parsing" % dtype)
@@ -1442,8 +1440,7 @@ cdef _string_box_decode(parser_t *parser, int col,
cdef _to_fw_string(parser_t *parser, int col, int line_start,
int line_end, size_t width):
cdef:
- int error
- Py_ssize_t i, j
+ Py_ssize_t i
coliter_t it
const char *word = NULL
char *data
@@ -1452,6 +1449,18 @@ cdef _to_fw_string(parser_t *parser, int col, int line_start,
result = np.empty(line_end - line_start, dtype='|S%d' % width)
data = <char*> result.data
+ with nogil:
+ _to_fw_string_nogil(parser, col, line_start, line_end, width, data)
+
+ return result
+
+cdef inline void _to_fw_string_nogil(parser_t *parser, int col, int line_start,
+ int line_end, size_t width, char *data) nogil:
+ cdef:
+ Py_ssize_t i
+ coliter_t it
+ const char *word = NULL
+
coliter_setup(&it, parser, col, line_start)
for i in range(line_end - line_start):
@@ -1459,8 +1468,6 @@ cdef _to_fw_string(parser_t *parser, int col, int line_start,
strncpy(data, word, width)
data += width
- return result
-
cdef char* cinf = b'inf'
cdef char* cneginf = b'-inf'
@@ -1474,14 +1481,40 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
char *p_end
double *data
double NA = na_values[np.float64]
+ kh_float64_t *na_fset
ndarray result
khiter_t k
bint use_na_flist = len(na_flist) > 0
- global errno
lines = line_end - line_start
result = np.empty(lines, dtype=np.float64)
data = <double *> result.data
+ na_fset = kset_float64_from_list(na_flist)
+ with nogil:
+ error = _try_double_nogil(parser, col, line_start, line_end,
+ na_filter, na_hashset, use_na_flist, na_fset, NA, data, &na_count)
+ kh_destroy_float64(na_fset)
+ if error != 0:
+ return None, None
+ return result, na_count
+
+cdef inline int _try_double_nogil(parser_t *parser, int col, int line_start, int line_end,
+ bint na_filter, kh_str_t *na_hashset, bint use_na_flist,
+ const kh_float64_t *na_flist,
+ double NA,
+ double *data, int *na_count) nogil:
+ cdef:
+ int error,
+ size_t i
+ size_t lines = line_end - line_start
+ coliter_t it
+ const char *word = NULL
+ char *p_end
+ khiter_t k, k64
+
+ global errno
+
+ na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
if na_filter:
@@ -1491,39 +1524,41 @@ cdef _try_double(parser_t *parser, int col, int line_start, int line_end,
k = kh_get_str(na_hashset, word)
# in the hash table
if k != na_hashset.n_buckets:
- na_count += 1
+ na_count[0] += 1
data[0] = NA
else:
data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
- parser.thousands, 1)
+ parser.thousands, 1)
if errno != 0 or p_end[0] or p_end == word:
if strcasecmp(word, cinf) == 0:
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
data[0] = NEGINF
else:
- return None, None
+ # Just return a non-zero value since the errno is never consumed.
+ return 1
if use_na_flist:
- if data[0] in na_flist:
- na_count += 1
+ k64 = kh_get_float64(na_flist, data[0])
+ if k64 != na_flist.n_buckets:
+ na_count[0] += 1
data[0] = NA
data += 1
else:
for i in range(lines):
COLITER_NEXT(it, word)
data[0] = parser.converter(word, &p_end, parser.decimal, parser.sci,
- parser.thousands, 1)
+ parser.thousands, 1)
if errno != 0 or p_end[0] or p_end == word:
if strcasecmp(word, cinf) == 0:
data[0] = INF
elif strcasecmp(word, cneginf) == 0:
data[0] = NEGINF
else:
- return None, None
+ # Just return a non-zero value since the errno is never consumed.
+ return 1
data += 1
- return result, na_count
-
+ return 0
cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
bint na_filter, kh_str_t *na_hashset):
@@ -1531,7 +1566,6 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
int error, na_count = 0
size_t i, lines
coliter_t it
- const char *word = NULL
int64_t *data
ndarray result
@@ -1542,6 +1576,29 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
result = np.empty(lines, dtype=np.int64)
data = <int64_t *> result.data
coliter_setup(&it, parser, col, line_start)
+ with nogil:
+ error = _try_int64_nogil(parser, col, line_start, line_end, na_filter, na_hashset, NA, data, &na_count)
+ if error != 0:
+ if error == ERROR_OVERFLOW:
+ # Can't get the word variable
+ raise OverflowError('Overflow')
+ return None, None
+
+ return result, na_count
+
+cdef inline int _try_int64_nogil(parser_t *parser, int col, int line_start, int line_end,
+ bint na_filter, const kh_str_t *na_hashset, int64_t NA, int64_t *data,
+ int *na_count) nogil:
+ cdef:
+ int error
+ size_t i
+ size_t lines = line_end - line_start
+ coliter_t it
+ const char *word = NULL
+ khiter_t k
+
+ na_count[0] = 0
+ coliter_setup(&it, parser, col, line_start)
if na_filter:
for i in range(lines):
@@ -1549,46 +1606,54 @@ cdef _try_int64(parser_t *parser, int col, int line_start, int line_end,
k = kh_get_str(na_hashset, word)
# in the hash table
if k != na_hashset.n_buckets:
- na_count += 1
+ na_count[0] += 1
data[i] = NA
continue
data[i] = str_to_int64(word, INT64_MIN, INT64_MAX,
&error, parser.thousands)
if error != 0:
- if error == ERROR_OVERFLOW:
- raise OverflowError(word)
-
- return None, None
+ return error
else:
for i in range(lines):
COLITER_NEXT(it, word)
data[i] = str_to_int64(word, INT64_MIN, INT64_MAX,
&error, parser.thousands)
if error != 0:
- if error == ERROR_OVERFLOW:
- raise OverflowError(word)
- return None, None
-
- return result, na_count
+ return error
+ return 0
cdef _try_bool(parser_t *parser, int col, int line_start, int line_end,
bint na_filter, kh_str_t *na_hashset):
cdef:
- int error, na_count = 0
- size_t i, lines
- coliter_t it
- const char *word = NULL
+ int na_count
+ size_t lines = line_end - line_start
uint8_t *data
- ndarray result
+ cnp.ndarray[cnp.uint8_t, ndim=1] result
uint8_t NA = na_values[np.bool_]
- khiter_t k
- lines = line_end - line_start
- result = np.empty(lines, dtype=np.uint8)
+ result = np.empty(lines)
data = <uint8_t *> result.data
+
+ with nogil:
+ error = _try_bool_nogil(parser, col, line_start, line_end, na_filter, na_hashset, NA, data, &na_count)
+ if error != 0:
+ return None, None
+ return result.view(np.bool_), na_count
+
+cdef inline int _try_bool_nogil(parser_t *parser, int col, int line_start, int line_end,
+ bint na_filter, const kh_str_t *na_hashset, uint8_t NA, uint8_t *data,
+ int *na_count) nogil:
+ cdef:
+ int error
+ size_t lines = line_end - line_start
+ size_t i
+ coliter_t it
+ const char *word = NULL
+ khiter_t k
+ na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
if na_filter:
@@ -1598,14 +1663,14 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end,
k = kh_get_str(na_hashset, word)
# in the hash table
if k != na_hashset.n_buckets:
- na_count += 1
+ na_count[0] += 1
data[0] = NA
data += 1
continue
error = to_boolean(word, data)
if error != 0:
- return None, None
+ return error
data += 1
else:
for i in range(lines):
@@ -1613,15 +1678,13 @@ cdef _try_bool(parser_t *parser, int col, int line_start, int line_end,
error = to_boolean(word, data)
if error != 0:
- return None, None
+ return error
data += 1
-
- return result.view(np.bool_), na_count
-
+ return 0
cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
- bint na_filter, kh_str_t *na_hashset,
- kh_str_t *true_hashset, kh_str_t *false_hashset):
+ bint na_filter, const kh_str_t *na_hashset,
+ const kh_str_t *true_hashset, const kh_str_t *false_hashset):
cdef:
int error, na_count = 0
size_t i, lines
@@ -1636,6 +1699,26 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
lines = line_end - line_start
result = np.empty(lines, dtype=np.uint8)
data = <uint8_t *> result.data
+ with nogil:
+ error = _try_bool_flex_nogil(parser, col, line_start, line_end, na_filter, na_hashset,
+ true_hashset, false_hashset, NA, data, &na_count)
+ if error != 0:
+ return None, None
+ return result.view(np.bool_), na_count
+
+cdef inline int _try_bool_flex_nogil(parser_t *parser, int col, int line_start, int line_end,
+ bint na_filter, const kh_str_t *na_hashset,
+ const kh_str_t *true_hashset, const kh_str_t *false_hashset,
+ uint8_t NA, uint8_t *data, int *na_count) nogil:
+ cdef:
+ int error = 0
+ size_t i
+ size_t lines = line_end - line_start
+ coliter_t it
+ const char *word = NULL
+ khiter_t k
+
+ na_count[0] = 0
coliter_setup(&it, parser, col, line_start)
if na_filter:
@@ -1645,7 +1728,7 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
k = kh_get_str(na_hashset, word)
# in the hash table
if k != na_hashset.n_buckets:
- na_count += 1
+ na_count[0] += 1
data[0] = NA
data += 1
continue
@@ -1655,7 +1738,6 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
data[0] = 1
data += 1
continue
-
k = kh_get_str(false_hashset, word)
if k != false_hashset.n_buckets:
data[0] = 0
@@ -1664,7 +1746,7 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
error = to_boolean(word, data)
if error != 0:
- return None, None
+ return error
data += 1
else:
for i in range(lines):
@@ -1684,10 +1766,10 @@ cdef _try_bool_flex(parser_t *parser, int col, int line_start, int line_end,
error = to_boolean(word, data)
if error != 0:
- return None, None
+ return error
data += 1
- return result.view(np.bool_), na_count
+ return 0
cdef kh_str_t* kset_from_list(list values) except NULL:
# caller takes responsibility for freeing the hash table
@@ -1712,6 +1794,25 @@ cdef kh_str_t* kset_from_list(list values) except NULL:
return table
+cdef kh_float64_t* kset_float64_from_list(values) except NULL:
+ # caller takes responsibility for freeing the hash table
+ cdef:
+ Py_ssize_t i
+ khiter_t k
+ kh_float64_t *table
+ int ret = 0
+ cnp.float64_t val
+ object value
+
+ table = kh_init_float64()
+
+ for value in values:
+ val = float(value)
+
+ k = kh_put_float64(table, val, &ret)
+
+ return table
+
# if at first you don't succeed...
| The GIL was removed around the tokenizer functions and the conversion function(_string_convert excluded).
## Benchmark:
### Data Generation:
``` python
import pandas as pd
import numpy as np
df = pd.DataFrame(np.random.randn(1000000,10))
df.to_csv('test.csv')
```
### Benchmark Code:
``` python
import pandas as pd
from pandas.util.testing import test_parallel
def f():
for i in range(4):
pd.read_csv('test.csv', index_col=0)
@test_parallel(4)
def g():
pd.read_csv('test.csv', index_col=0)
```
### Before:
```
In [4]: %timeit pd.read_csv('test.csv', index_col=0)
1 loops, best of 3: 2.3 s per loop
In [7]: %timeit f()
1 loops, best of 3: 9.15 s per loop
In [8]: %timeit g()
1 loops, best of 3: 9.25 s per loop
```
### After:
``` pycon
In [6]: %timeit pd.read_csv('test.csv', index_col=0)
1 loops, best of 3: 2.35 s per loop
In [9]: %timeit f()
1 loops, best of 3: 9.55 s per loop
In [10]: %timeit g()
1 loops, best of 3: 4.38 s per loop
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11272 | 2015-10-09T16:08:15Z | 2015-11-04T20:28:15Z | 2015-11-04T20:28:15Z | 2015-11-04T21:04:53Z |
BUG: Allow MultiIndex to be subclassed #11267 | diff --git a/pandas/core/index.py b/pandas/core/index.py
index 256ece6539b6f..62ef73b150fec 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -3906,7 +3906,7 @@ def __new__(cls, levels=None, labels=None, sortorder=None, names=None,
name = None
return Index(levels[0], name=name, copy=True).take(labels[0])
- result = object.__new__(MultiIndex)
+ result = object.__new__(cls)
# we've already validated levels and labels, so shortcut here
result._set_levels(levels, copy=copy, validate=False)
@@ -4184,12 +4184,12 @@ def copy(self, names=None, dtype=None, levels=None, labels=None,
levels = self.levels
labels = self.labels
names = self.names
- return MultiIndex(levels=levels,
- labels=labels,
- names=names,
- sortorder=self.sortorder,
- verify_integrity=False,
- _set_identity=_set_identity)
+ return self.__class__(levels=levels,
+ labels=labels,
+ names=names,
+ sortorder=self.sortorder,
+ verify_integrity=False,
+ _set_identity=_set_identity)
def __array__(self, dtype=None):
""" the array interface, return my values """
@@ -4205,7 +4205,7 @@ def _shallow_copy(self, values=None, infer=False, **kwargs):
if values is not None:
if 'name' in kwargs:
kwargs['names'] = kwargs.pop('name',None)
- return MultiIndex.from_tuples(values, **kwargs)
+ return self.__class__.from_tuples(values, **kwargs)
return self.view()
@cache_readonly
@@ -4285,16 +4285,16 @@ def _format_native_types(self, **kwargs):
@property
def _constructor(self):
- return MultiIndex.from_tuples
+ return self.__class__.from_tuples
@cache_readonly
def inferred_type(self):
return 'mixed'
- @staticmethod
- def _from_elements(values, labels=None, levels=None, names=None,
+ @classmethod
+ def _from_elements(cls, values, labels=None, levels=None, names=None,
sortorder=None):
- return MultiIndex(levels, labels, names, sortorder=sortorder)
+ return cls(levels, labels, names, sortorder=sortorder)
def _get_level_number(self, level):
try:
@@ -4552,7 +4552,7 @@ def to_hierarchical(self, n_repeat, n_shuffle=1):
# Assumes that each label is divisible by n_shuffle
labels = [x.reshape(n_shuffle, -1).ravel(1) for x in labels]
names = self.names
- return MultiIndex(levels=levels, labels=labels, names=names)
+ return self.__class__(levels=levels, labels=labels, names=names)
@property
def is_all_dates(self):
@@ -4626,9 +4626,9 @@ def from_arrays(cls, arrays, sortorder=None, names=None):
if names is None:
names = [getattr(arr, "name", None) for arr in arrays]
- return MultiIndex(levels=levels, labels=labels,
- sortorder=sortorder, names=names,
- verify_integrity=False)
+ return cls(levels=levels, labels=labels,
+ sortorder=sortorder, names=names,
+ verify_integrity=False)
@classmethod
def from_tuples(cls, tuples, sortorder=None, names=None):
@@ -4673,8 +4673,8 @@ def from_tuples(cls, tuples, sortorder=None, names=None):
else:
arrays = lzip(*tuples)
- return MultiIndex.from_arrays(arrays, sortorder=sortorder,
- names=names)
+ return cls.from_arrays(arrays, sortorder=sortorder,
+ names=names)
@classmethod
def from_product(cls, iterables, sortorder=None, names=None):
@@ -4716,8 +4716,8 @@ def from_product(cls, iterables, sortorder=None, names=None):
categoricals = [Categorical.from_array(it, ordered=True) for it in iterables]
labels = cartesian_product([c.codes for c in categoricals])
- return MultiIndex(levels=[c.categories for c in categoricals],
- labels=labels, sortorder=sortorder, names=names)
+ return cls(levels=[c.categories for c in categoricals],
+ labels=labels, sortorder=sortorder, names=names)
@property
def nlevels(self):
@@ -4785,17 +4785,17 @@ def __getitem__(self, key):
new_labels = [lab[key] for lab in self.labels]
- return MultiIndex(levels=self.levels,
- labels=new_labels,
- names=self.names,
- sortorder=sortorder,
- verify_integrity=False)
+ return self.__class__(levels=self.levels,
+ labels=new_labels,
+ names=self.names,
+ sortorder=sortorder,
+ verify_integrity=False)
def take(self, indexer, axis=None):
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
- return MultiIndex(levels=self.levels, labels=new_labels,
- names=self.names, verify_integrity=False)
+ return self.__class__(levels=self.levels, labels=new_labels,
+ names=self.names, verify_integrity=False)
def append(self, other):
"""
@@ -4818,14 +4818,14 @@ def append(self, other):
label = self.get_level_values(i)
appended = [o.get_level_values(i) for o in other]
arrays.append(label.append(appended))
- return MultiIndex.from_arrays(arrays, names=self.names)
+ return self.__class__.from_arrays(arrays, names=self.names)
to_concat = (self.values,) + tuple(k._values for k in other)
new_tuples = np.concatenate(to_concat)
# if all(isinstance(x, MultiIndex) for x in other):
try:
- return MultiIndex.from_tuples(new_tuples, names=self.names)
+ return self.__class__.from_tuples(new_tuples, names=self.names)
except:
return Index(new_tuples)
@@ -4833,11 +4833,11 @@ def argsort(self, *args, **kwargs):
return self.values.argsort(*args, **kwargs)
def repeat(self, n):
- return MultiIndex(levels=self.levels,
- labels=[label.view(np.ndarray).repeat(n) for label in self.labels],
- names=self.names,
- sortorder=self.sortorder,
- verify_integrity=False)
+ return self.__class__(levels=self.levels,
+ labels=[label.view(np.ndarray).repeat(n) for label in self.labels],
+ names=self.names,
+ sortorder=self.sortorder,
+ verify_integrity=False)
def drop(self, labels, level=None, errors='raise'):
"""
@@ -4936,8 +4936,8 @@ def droplevel(self, level=0):
result.name = new_names[0]
return result
else:
- return MultiIndex(levels=new_levels, labels=new_labels,
- names=new_names, verify_integrity=False)
+ return self.__class__(levels=new_levels, labels=new_labels,
+ names=new_names, verify_integrity=False)
def swaplevel(self, i, j):
"""
@@ -4963,8 +4963,8 @@ def swaplevel(self, i, j):
new_labels[i], new_labels[j] = new_labels[j], new_labels[i]
new_names[i], new_names[j] = new_names[j], new_names[i]
- return MultiIndex(levels=new_levels, labels=new_labels,
- names=new_names, verify_integrity=False)
+ return self.__class__(levels=new_levels, labels=new_labels,
+ names=new_names, verify_integrity=False)
def reorder_levels(self, order):
"""
@@ -4982,8 +4982,8 @@ def reorder_levels(self, order):
new_labels = [self.labels[i] for i in order]
new_names = [self.names[i] for i in order]
- return MultiIndex(levels=new_levels, labels=new_labels,
- names=new_names, verify_integrity=False)
+ return self.__class__(levels=new_levels, labels=new_labels,
+ names=new_names, verify_integrity=False)
def __getslice__(self, i, j):
return self.__getitem__(slice(i, j))
@@ -5048,9 +5048,9 @@ def sortlevel(self, level=0, ascending=True, sort_remaining=True):
indexer = com._ensure_platform_int(indexer)
new_labels = [lab.take(indexer) for lab in self.labels]
- new_index = MultiIndex(labels=new_labels, levels=self.levels,
- names=self.names, sortorder=sortorder,
- verify_integrity=False)
+ new_index = self.__class__(labels=new_labels, levels=self.levels,
+ names=self.names, sortorder=sortorder,
+ verify_integrity=False)
return new_index, indexer
@@ -5165,7 +5165,7 @@ def reindex(self, target, method=None, level=None, limit=None,
target = self.take(indexer)
else:
# hopefully?
- target = MultiIndex.from_tuples(target)
+ target = self.__class__.from_tuples(target)
if (preserve_names and target.nlevels == self.nlevels and
target.names != self.names):
@@ -5665,8 +5665,8 @@ def truncate(self, before=None, after=None):
new_labels = [lab[left:right] for lab in self.labels]
new_labels[0] = new_labels[0] - i
- return MultiIndex(levels=new_levels, labels=new_labels,
- verify_integrity=False)
+ return self.__class__(levels=new_levels, labels=new_labels,
+ verify_integrity=False)
def equals(self, other):
"""
@@ -5734,8 +5734,8 @@ def union(self, other):
return self
uniq_tuples = lib.fast_unique_multiple([self._values, other._values])
- return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
- names=result_names)
+ return self.__class__.from_arrays(lzip(*uniq_tuples), sortorder=0,
+ names=result_names)
def intersection(self, other):
"""
@@ -5759,12 +5759,12 @@ def intersection(self, other):
other_tuples = other._values
uniq_tuples = sorted(set(self_tuples) & set(other_tuples))
if len(uniq_tuples) == 0:
- return MultiIndex(levels=[[]] * self.nlevels,
- labels=[[]] * self.nlevels,
- names=result_names, verify_integrity=False)
+ return self.__class__(levels=[[]] * self.nlevels,
+ labels=[[]] * self.nlevels,
+ names=result_names, verify_integrity=False)
else:
- return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
- names=result_names)
+ return self.__class__.from_arrays(lzip(*uniq_tuples), sortorder=0,
+ names=result_names)
def difference(self, other):
"""
@@ -5781,19 +5781,19 @@ def difference(self, other):
return self
if self.equals(other):
- return MultiIndex(levels=[[]] * self.nlevels,
- labels=[[]] * self.nlevels,
- names=result_names, verify_integrity=False)
+ return self.__class__(levels=[[]] * self.nlevels,
+ labels=[[]] * self.nlevels,
+ names=result_names, verify_integrity=False)
difference = sorted(set(self._values) - set(other._values))
if len(difference) == 0:
- return MultiIndex(levels=[[]] * self.nlevels,
- labels=[[]] * self.nlevels,
- names=result_names, verify_integrity=False)
+ return self.__class__(levels=[[]] * self.nlevels,
+ labels=[[]] * self.nlevels,
+ names=result_names, verify_integrity=False)
else:
- return MultiIndex.from_tuples(difference, sortorder=0,
- names=result_names)
+ return self.__class__.from_tuples(difference, sortorder=0,
+ names=result_names)
def astype(self, dtype):
if not is_object_dtype(np.dtype(dtype)):
@@ -5806,13 +5806,13 @@ def _convert_can_do_setop(self, other):
if not hasattr(other, 'names'):
if len(other) == 0:
- other = MultiIndex(levels=[[]] * self.nlevels,
- labels=[[]] * self.nlevels,
- verify_integrity=False)
+ other = self.__class__(levels=[[]] * self.nlevels,
+ labels=[[]] * self.nlevels,
+ verify_integrity=False)
else:
msg = 'other must be a MultiIndex or a list of tuples'
try:
- other = MultiIndex.from_tuples(other)
+ other = self.__class__.from_tuples(other)
except:
raise TypeError(msg)
else:
@@ -5856,8 +5856,8 @@ def insert(self, loc, item):
new_levels.append(level)
new_labels.append(np.insert(_ensure_int64(labels), loc, lev_loc))
- return MultiIndex(levels=new_levels, labels=new_labels,
- names=self.names, verify_integrity=False)
+ return self.__class__(levels=new_levels, labels=new_labels,
+ names=self.names, verify_integrity=False)
def delete(self, loc):
"""
@@ -5868,8 +5868,8 @@ def delete(self, loc):
new_index : MultiIndex
"""
new_labels = [np.delete(lab, loc) for lab in self.labels]
- return MultiIndex(levels=self.levels, labels=new_labels,
- names=self.names, verify_integrity=False)
+ return self.__class__(levels=self.levels, labels=new_labels,
+ names=self.names, verify_integrity=False)
get_major_bounds = slice_locs
@@ -5889,7 +5889,7 @@ def _bounds(self):
def _wrap_joined_index(self, joined, other):
names = self.names if self.names == other.names else None
- return MultiIndex.from_tuples(joined, names=names)
+ return self.__class__.from_tuples(joined, names=names)
@Appender(Index.isin.__doc__)
def isin(self, values, level=None):
diff --git a/pandas/tests/test_index.py b/pandas/tests/test_index.py
index 81ebc7efdbdd9..0eb0f9b4e8992 100644
--- a/pandas/tests/test_index.py
+++ b/pandas/tests/test_index.py
@@ -5673,6 +5673,13 @@ def test_equals_operator(self):
# GH9785
self.assertTrue((self.index == self.index).all())
+ def test_subclassing(self):
+ # GH11267
+ class MyMultiIndex(MultiIndex):
+ pass
+ mi = MyMultiIndex([['a'], ['b']], [[0], [0]])
+ self.assertTrue(isinstance(mi, MyMultiIndex))
+
def test_get_combined_index():
from pandas.core.index import _get_combined_index
| closes #11267
MultiIndex had several places where the output class was hard-coded to
MultiIndex rather than cls, self.**class**, or the like. These have
been replaced.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11268 | 2015-10-09T08:26:43Z | 2015-11-18T20:15:11Z | null | 2015-11-18T20:15:11Z |
PERF: Release GIL on some datetime ops | diff --git a/asv_bench/benchmarks/gil.py b/asv_bench/benchmarks/gil.py
index 4b82781fc39d9..eeca2d54381b2 100644
--- a/asv_bench/benchmarks/gil.py
+++ b/asv_bench/benchmarks/gil.py
@@ -320,3 +320,49 @@ def time_nogil_kth_smallest(self):
def run(arr):
algos.kth_smallest(arr, self.k)
run()
+
+class nogil_datetime_fields(object):
+ goal_time = 0.2
+
+ def setup(self):
+ self.N = 100000000
+ self.dti = pd.date_range('1900-01-01', periods=self.N, freq='D')
+ self.period = self.dti.to_period('D')
+ if (not have_real_test_parallel):
+ raise NotImplementedError
+
+ def time_datetime_field_year(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.year
+ run(self.dti)
+
+ def time_datetime_field_day(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.day
+ run(self.dti)
+
+ def time_datetime_field_daysinmonth(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.days_in_month
+ run(self.dti)
+
+ def time_datetime_field_normalize(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.normalize()
+ run(self.dti)
+
+ def time_datetime_to_period(self):
+ @test_parallel(num_threads=2)
+ def run(dti):
+ dti.to_period('S')
+ run(self.dti)
+
+ def time_period_to_datetime(self):
+ @test_parallel(num_threads=2)
+ def run(period):
+ period.to_timestamp()
+ run(self.period)
diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 14c4276e74a3a..035e3ae2ac2f0 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -55,6 +55,10 @@ Performance Improvements
- Checking monotonic-ness before sorting on an index (:issue:`11080`)
+
+- Release the GIL on most datetime field operations (e.g. ``DatetimeIndex.year``, ``Series.dt.year``), normalization, and conversion to and from ``Period``, ``DatetimeIndex.to_period`` and ``PeriodIndex.to_timestamp`` (:issue:`11263`)
+
+
.. _whatsnew_0171.bug_fixes:
Bug Fixes
diff --git a/pandas/src/datetime.pxd b/pandas/src/datetime.pxd
index 0896965162698..f2f764c785894 100644
--- a/pandas/src/datetime.pxd
+++ b/pandas/src/datetime.pxd
@@ -95,14 +95,14 @@ cdef extern from "datetime/np_datetime.h":
int apply_tzinfo)
npy_datetime pandas_datetimestruct_to_datetime(PANDAS_DATETIMEUNIT fr,
- pandas_datetimestruct *d)
+ pandas_datetimestruct *d) nogil
void pandas_datetime_to_datetimestruct(npy_datetime val,
PANDAS_DATETIMEUNIT fr,
- pandas_datetimestruct *result)
+ pandas_datetimestruct *result) nogil
int days_per_month_table[2][12]
- int dayofweek(int y, int m, int d)
- int is_leapyear(int64_t year)
+ int dayofweek(int y, int m, int d) nogil
+ int is_leapyear(int64_t year) nogil
PANDAS_DATETIMEUNIT get_datetime64_unit(object o)
cdef extern from "datetime/np_datetime_strings.h":
diff --git a/pandas/src/period.pyx b/pandas/src/period.pyx
index 2a7c2135f8045..b431bb58bc991 100644
--- a/pandas/src/period.pyx
+++ b/pandas/src/period.pyx
@@ -76,11 +76,11 @@ cdef extern from "period_helper.h":
int64_t get_period_ordinal(int year, int month, int day,
int hour, int minute, int second, int microseconds, int picoseconds,
- int freq) except INT32_MIN
+ int freq) nogil except INT32_MIN
int64_t get_python_ordinal(int64_t period_ordinal, int freq) except INT32_MIN
- int get_date_info(int64_t ordinal, int freq, date_info *dinfo) except INT32_MIN
+ int get_date_info(int64_t ordinal, int freq, date_info *dinfo) nogil except INT32_MIN
double getAbsTime(int, int64_t, int64_t)
int pyear(int64_t ordinal, int freq) except INT32_MIN
@@ -139,13 +139,14 @@ def dt64arr_to_periodarr(ndarray[int64_t] dtarr, int freq, tz=None):
out = np.empty(l, dtype='i8')
if tz is None:
- for i in range(l):
- if dtarr[i] == iNaT:
- out[i] = iNaT
- continue
- pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
- out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
- dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
+ with nogil:
+ for i in range(l):
+ if dtarr[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(dtarr[i], PANDAS_FR_ns, &dts)
+ out[i] = get_period_ordinal(dts.year, dts.month, dts.day,
+ dts.hour, dts.min, dts.sec, dts.us, dts.ps, freq)
else:
out = localize_dt64arr_to_period(dtarr, freq, tz)
return out
@@ -163,11 +164,12 @@ def periodarr_to_dt64arr(ndarray[int64_t] periodarr, int freq):
out = np.empty(l, dtype='i8')
- for i in range(l):
- if periodarr[i] == iNaT:
- out[i] = iNaT
- continue
- out[i] = period_ordinal_to_dt64(periodarr[i], freq)
+ with nogil:
+ for i in range(l):
+ if periodarr[i] == NPY_NAT:
+ out[i] = NPY_NAT
+ continue
+ out[i] = period_ordinal_to_dt64(periodarr[i], freq)
return out
@@ -245,13 +247,13 @@ def period_ordinal(int y, int m, int d, int h, int min, int s, int us, int ps, i
return get_period_ordinal(y, m, d, h, min, s, us, ps, freq)
-cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq):
+cpdef int64_t period_ordinal_to_dt64(int64_t ordinal, int freq) nogil:
cdef:
pandas_datetimestruct dts
date_info dinfo
float subsecond_fraction
- if ordinal == iNaT:
+ if ordinal == NPY_NAT:
return NPY_NAT
get_date_info(ordinal, freq, &dinfo)
diff --git a/pandas/src/period_helper.c b/pandas/src/period_helper.c
index 032bc44de6355..e056b1fa9a522 100644
--- a/pandas/src/period_helper.c
+++ b/pandas/src/period_helper.c
@@ -113,7 +113,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
int yearoffset;
/* Range check */
- Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366),
+ Py_AssertWithArg(year > -(INT_MAX / 366) && year < (INT_MAX / 366),
PyExc_ValueError,
"year out of range: %i",
year);
@@ -136,7 +136,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
day);
yearoffset = dInfoCalc_YearOffset(year, calendar);
- if (PyErr_Occurred()) goto onError;
+ if (yearoffset == INT_ERR_CODE) goto onError;
absdate = day + month_offset[leap][month - 1] + yearoffset;
@@ -155,7 +155,7 @@ static int dInfoCalc_SetFromDateAndTime(struct date_info *dinfo,
/* Calculate the absolute time */
{
- Py_AssertWithArg(hour >= 0 && hour <= 23,
+ Py_AssertWithArg(hour >= 0 && hour <= 23,
PyExc_ValueError,
"hour out of range (0-23): %i",
hour);
@@ -212,8 +212,7 @@ int dInfoCalc_SetFromAbsDate(register struct date_info *dinfo,
while (1) {
/* Calculate the year offset */
yearoffset = dInfoCalc_YearOffset(year, calendar);
- if (PyErr_Occurred())
- goto onError;
+ if (yearoffset == INT_ERR_CODE) goto onError;
/* Backward correction: absdate must be greater than the
yearoffset */
@@ -310,7 +309,7 @@ static int calc_conversion_factors_matrix_size() {
}
matrix_size = max_value(matrix_size, period_value);
}
- return matrix_size + 1;
+ return matrix_size + 1;
}
static void alloc_conversion_factors_matrix(int matrix_size) {
diff --git a/pandas/tslib.pyx b/pandas/tslib.pyx
index 398c5f0232de1..8e6d4019c69a3 100644
--- a/pandas/tslib.pyx
+++ b/pandas/tslib.pyx
@@ -3849,6 +3849,7 @@ def get_time_micros(ndarray[int64_t] dtindex):
@cython.wraparound(False)
[email protected](False)
def get_date_field(ndarray[int64_t] dtindex, object field):
'''
Given a int64-based datetime index, extract the year, month, etc.,
@@ -3872,130 +3873,142 @@ def get_date_field(ndarray[int64_t] dtindex, object field):
out = np.empty(count, dtype='i4')
if field == 'Y':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.year
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.year
return out
elif field == 'M':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.month
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.month
return out
elif field == 'D':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.day
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.day
return out
elif field == 'h':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.hour
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.hour
return out
elif field == 'm':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.min
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.min
return out
elif field == 's':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.sec
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.sec
return out
elif field == 'us':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.us
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.us
return out
elif field == 'ns':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.ps / 1000
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.ps / 1000
return out
elif field == 'doy':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- isleap = is_leapyear(dts.year)
- out[i] = _month_offset[isleap, dts.month-1] + dts.day
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ isleap = is_leapyear(dts.year)
+ out[i] = _month_offset[isleap, dts.month-1] + dts.day
return out
elif field == 'dow':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- ts = convert_to_tsobject(dtindex[i], None, None)
- out[i] = ts_dayofweek(ts)
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dayofweek(dts.year, dts.month, dts.day)
return out
elif field == 'woy':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
-
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- ts = convert_to_tsobject(dtindex[i], None, None)
- isleap = is_leapyear(dts.year)
- isleap_prev = is_leapyear(dts.year - 1)
- mo_off = _month_offset[isleap, dts.month - 1]
- doy = mo_off + dts.day
- dow = ts_dayofweek(ts)
-
- #estimate
- woy = (doy - 1) - dow + 3
- if woy >= 0:
- woy = woy / 7 + 1
-
- # verify
- if woy < 0:
- if (woy > -2) or (woy == -2 and isleap_prev):
- woy = 53
- else:
- woy = 52
- elif woy == 53:
- if 31 - dts.day + dow < 3:
- woy = 1
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
+
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ isleap = is_leapyear(dts.year)
+ isleap_prev = is_leapyear(dts.year - 1)
+ mo_off = _month_offset[isleap, dts.month - 1]
+ doy = mo_off + dts.day
+ dow = dayofweek(dts.year, dts.month, dts.day)
+
+ #estimate
+ woy = (doy - 1) - dow + 3
+ if woy >= 0:
+ woy = woy / 7 + 1
+
+ # verify
+ if woy < 0:
+ if (woy > -2) or (woy == -2 and isleap_prev):
+ woy = 53
+ else:
+ woy = 52
+ elif woy == 53:
+ if 31 - dts.day + dow < 3:
+ woy = 1
- out[i] = woy
+ out[i] = woy
return out
elif field == 'q':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = dts.month
- out[i] = ((out[i] - 1) / 3) + 1
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = dts.month
+ out[i] = ((out[i] - 1) / 3) + 1
return out
elif field == 'dim':
- for i in range(count):
- if dtindex[i] == NPY_NAT: out[i] = -1; continue
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = -1; continue
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
- out[i] = monthrange(dts.year, dts.month)[1]
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ out[i] = days_in_month(dts)
return out
raise ValueError("Field %s not supported" % field)
@@ -4239,12 +4252,13 @@ def date_normalize(ndarray[int64_t] stamps, tz=None):
tz = maybe_get_tz(tz)
result = _normalize_local(stamps, tz)
else:
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
- result[i] = _normalized_stamp(&dts)
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ result[i] = _normalized_stamp(&dts)
return result
@@ -4256,12 +4270,13 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
pandas_datetimestruct dts
if _is_utc(tz):
- for i in range(n):
- if stamps[i] == NPY_NAT:
- result[i] = NPY_NAT
- continue
- pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
- result[i] = _normalized_stamp(&dts)
+ with nogil:
+ for i in range(n):
+ if stamps[i] == NPY_NAT:
+ result[i] = NPY_NAT
+ continue
+ pandas_datetime_to_datetimestruct(stamps[i], PANDAS_FR_ns, &dts)
+ result[i] = _normalized_stamp(&dts)
elif _is_tzlocal(tz):
for i in range(n):
if stamps[i] == NPY_NAT:
@@ -4304,7 +4319,7 @@ cdef _normalize_local(ndarray[int64_t] stamps, object tz):
return result
-cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts):
+cdef inline int64_t _normalized_stamp(pandas_datetimestruct *dts) nogil:
dts.hour = 0
dts.min = 0
dts.sec = 0
@@ -4369,6 +4384,8 @@ def monthrange(int64_t year, int64_t month):
cdef inline int64_t ts_dayofweek(_TSObject ts):
return dayofweek(ts.dts.year, ts.dts.month, ts.dts.day)
+cdef inline int days_in_month(pandas_datetimestruct dts) nogil:
+ return days_per_month_table[is_leapyear(dts.year)][dts.month-1]
cpdef normalize_date(object dt):
'''
@@ -4388,17 +4405,18 @@ cpdef normalize_date(object dt):
cdef inline int _year_add_months(pandas_datetimestruct dts,
- int months):
+ int months) nogil:
'''new year number after shifting pandas_datetimestruct number of months'''
return dts.year + (dts.month + months - 1) / 12
cdef inline int _month_add_months(pandas_datetimestruct dts,
- int months):
+ int months) nogil:
'''new month number after shifting pandas_datetimestruct number of months'''
cdef int new_month = (dts.month + months) % 12
return 12 if new_month == 0 else new_month
@cython.wraparound(False)
[email protected](False)
def shift_months(int64_t[:] dtindex, int months, object day=None):
'''
Given an int64-based datetime index, shift all elements
@@ -4411,24 +4429,26 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
'''
cdef:
Py_ssize_t i
- int days_in_month
pandas_datetimestruct dts
int count = len(dtindex)
+ cdef int days_in_current_month
int64_t[:] out = np.empty(count, dtype='int64')
- for i in range(count):
- if dtindex[i] == NPY_NAT:
- out[i] = NPY_NAT
- else:
- pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
-
- if day is None:
+ if day is None:
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
- #prevent day from wrapping around month end
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
- dts.day = min(dts.day, days_in_month)
- elif day == 'start':
+
+ dts.day = min(dts.day, days_in_month(dts))
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ elif day == 'start':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
@@ -4439,21 +4459,28 @@ def shift_months(int64_t[:] dtindex, int months, object day=None):
dts.month = _month_add_months(dts, -1)
else:
dts.day = 1
- elif day == 'end':
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ elif day == 'end':
+ with nogil:
+ for i in range(count):
+ if dtindex[i] == NPY_NAT: out[i] = NPY_NAT; continue
+ pandas_datetime_to_datetimestruct(dtindex[i], PANDAS_FR_ns, &dts)
+ days_in_current_month = days_in_month(dts)
+
dts.year = _year_add_months(dts, months)
dts.month = _month_add_months(dts, months)
# similar semantics - when adding shift forward by one
# month if already at an end of month
- if months >= 0 and dts.day == days_in_month:
+ if months >= 0 and dts.day == days_in_current_month:
dts.year = _year_add_months(dts, 1)
dts.month = _month_add_months(dts, 1)
- days_in_month = days_per_month_table[is_leapyear(dts.year)][dts.month-1]
- dts.day = days_in_month
+ dts.day = days_in_month(dts)
+ out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
+ else:
+ raise ValueError("day must be None, 'start' or 'end'")
- out[i] = pandas_datetimestruct_to_datetime(PANDAS_FR_ns, &dts)
return np.asarray(out)
#----------------------------------------------------------------------
| This is a WIP, but far enough along I thought I'd share and see if the approach was reasonable.
This releases the GIL on most vectorized field accessors (e.g. `dt.year`) and conversion to and from `Period`. May be places it could be done - obviously would be nice for parsing, but I'm not sure that's possible.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11263 | 2015-10-08T03:29:23Z | 2015-10-17T14:48:10Z | 2015-10-17T14:48:10Z | 2015-10-21T22:42:05Z |
CI: fix numpy to 1.9.3 in 2.7,3.5 builds for now, as packages for 1.1.0 not released ATM | diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build
index 3fe6f60aee98b..6c9965ac0305e 100644
--- a/ci/requirements-2.7.build
+++ b/ci/requirements-2.7.build
@@ -1,4 +1,4 @@
dateutil=2.1
pytz=2013b
-numpy
+numpy=1.9.3
cython=0.19.1
diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run
index 86e5934539ebf..10049179912da 100644
--- a/ci/requirements-2.7.run
+++ b/ci/requirements-2.7.run
@@ -1,6 +1,6 @@
dateutil=2.1
pytz=2013b
-numpy
+numpy=1.9.3
xlwt=0.7.5
numexpr
pytables
diff --git a/ci/requirements-3.4_SLOW.build b/ci/requirements-3.4_SLOW.build
index 9558cf00ddf5c..de36b1afb9fa4 100644
--- a/ci/requirements-3.4_SLOW.build
+++ b/ci/requirements-3.4_SLOW.build
@@ -1,4 +1,4 @@
python-dateutil
pytz
-numpy
+numpy=1.9.3
cython
diff --git a/ci/requirements-3.4_SLOW.run b/ci/requirements-3.4_SLOW.run
index 4c60fb883954f..1eca130ecd96a 100644
--- a/ci/requirements-3.4_SLOW.run
+++ b/ci/requirements-3.4_SLOW.run
@@ -1,6 +1,6 @@
python-dateutil
pytz
-numpy
+numpy=1.9.3
openpyxl
xlsxwriter
xlrd
diff --git a/ci/requirements-3.5.build b/ci/requirements-3.5.build
index 9558cf00ddf5c..de36b1afb9fa4 100644
--- a/ci/requirements-3.5.build
+++ b/ci/requirements-3.5.build
@@ -1,4 +1,4 @@
python-dateutil
pytz
-numpy
+numpy=1.9.3
cython
diff --git a/ci/requirements-3.5.run b/ci/requirements-3.5.run
index 8de8f7d8f0630..91938675280d9 100644
--- a/ci/requirements-3.5.run
+++ b/ci/requirements-3.5.run
@@ -1,6 +1,6 @@
python-dateutil
pytz
-numpy
+numpy=1.9.3
openpyxl
xlsxwriter
xlrd
| xref #11187
conda packages are not yet updated, so need to fix to 1.9.3 for now
| https://api.github.com/repos/pandas-dev/pandas/pulls/11262 | 2015-10-08T02:42:15Z | 2015-10-08T03:00:33Z | 2015-10-08T03:00:33Z | 2015-10-08T03:00:33Z |
DOC: Included halflife as one 3 optional params that must be specified | diff --git a/pandas/stats/moments.py b/pandas/stats/moments.py
index 2e13082ee5366..c4791c43278b9 100644
--- a/pandas/stats/moments.py
+++ b/pandas/stats/moments.py
@@ -97,7 +97,7 @@
_ewm_notes = r"""
Notes
-----
-Either center of mass or span must be specified
+Either center of mass, span or halflife must be specified
EWMA is sometimes specified using a "span" parameter `s`, we have that the
decay parameter :math:`\alpha` is related to the span as
| This documentation appears to not be consistent with the function usage, i.e. halflife can also be specified as an alternative to center of mass or span.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11261 | 2015-10-08T01:56:48Z | 2015-10-08T03:00:41Z | 2015-10-08T03:00:41Z | 2015-10-08T03:00:41Z |
BUG/ERR: raise when trying to set a subset of values in a datetime64[ns, tz] columns with another tz | diff --git a/pandas/core/internals.py b/pandas/core/internals.py
index 4790f3aa3841e..51f6c7043817f 100644
--- a/pandas/core/internals.py
+++ b/pandas/core/internals.py
@@ -2099,7 +2099,7 @@ def _try_coerce_args(self, values, other):
if is_null_datelike_scalar(other):
other = tslib.iNaT
elif isinstance(other, self._holder):
- if other.tz != self.tz:
+ if other.tz != self.values.tz:
raise ValueError("incompatible or non tz-aware value")
other = other.tz_localize(None).asi8
else:
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5acc858840dfa..5a9b90f93bb0c 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -4322,30 +4322,6 @@ def test_constructor_with_datetime_tz(self):
result = result.set_index('foo')
tm.assert_index_equal(df.index,idx)
- # indexing
- result = df2.iloc[1]
- expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
- index=list('ABC'), dtype='object', name=1)
- assert_series_equal(result, expected)
- result = df2.loc[1]
- expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
- index=list('ABC'), dtype='object', name=1)
- assert_series_equal(result, expected)
-
- # indexing - fast_xs
- df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
- result = df.iloc[5]
- expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', offset='D')
- self.assertEqual(result, expected)
-
- result = df.loc[5]
- self.assertEqual(result, expected)
-
- # indexing - boolean
- result = df[df.a > df.a[3]]
- expected = df.iloc[4:]
- assert_frame_equal(result, expected)
-
def test_constructor_for_list_with_dtypes(self):
intname = np.dtype(np.int_).name
floatname = np.dtype(np.float_).name
diff --git a/pandas/tests/test_indexing.py b/pandas/tests/test_indexing.py
index 35467c6abb9b4..90f85b3f4576d 100644
--- a/pandas/tests/test_indexing.py
+++ b/pandas/tests/test_indexing.py
@@ -788,6 +788,58 @@ def test_loc_setitem_multiindex(self):
result = df.loc[(t,n),'X']
self.assertEqual(result,3)
+ def test_indexing_with_datetime_tz(self):
+
+ # 8260
+ # support datetime64 with tz
+
+ idx = Index(date_range('20130101',periods=3,tz='US/Eastern'),
+ name='foo')
+ dr = date_range('20130110',periods=3)
+ df = DataFrame({'A' : idx, 'B' : dr})
+ df['C'] = idx
+ df.iloc[1,1] = pd.NaT
+ df.iloc[1,2] = pd.NaT
+
+ # indexing
+ result = df.iloc[1]
+ expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
+ index=list('ABC'), dtype='object', name=1)
+ assert_series_equal(result, expected)
+ result = df.loc[1]
+ expected = Series([Timestamp('2013-01-02 00:00:00-0500', tz='US/Eastern'), np.nan, np.nan],
+ index=list('ABC'), dtype='object', name=1)
+ assert_series_equal(result, expected)
+
+ # indexing - fast_xs
+ df = DataFrame({'a': date_range('2014-01-01', periods=10, tz='UTC')})
+ result = df.iloc[5]
+ expected = Timestamp('2014-01-06 00:00:00+0000', tz='UTC', offset='D')
+ self.assertEqual(result, expected)
+
+ result = df.loc[5]
+ self.assertEqual(result, expected)
+
+ # indexing - boolean
+ result = df[df.a > df.a[3]]
+ expected = df.iloc[4:]
+ assert_frame_equal(result, expected)
+
+ # indexing - setting an element
+ df = DataFrame( data = pd.to_datetime(['2015-03-30 20:12:32','2015-03-12 00:11:11']) ,columns=['time'] )
+ df['new_col']=['new','old']
+ df.time=df.set_index('time').index.tz_localize('UTC')
+ v = df[df.new_col=='new'].set_index('time').index.tz_convert('US/Pacific')
+
+ # trying to set a single element on a part of a different timezone
+ def f():
+ df.loc[df.new_col=='new','time'] = v
+ self.assertRaises(ValueError, f)
+
+ v = df.loc[df.new_col=='new','time'] + pd.Timedelta('1s')
+ df.loc[df.new_col=='new','time'] = v
+ assert_series_equal(df.loc[df.new_col=='new','time'],v)
+
def test_loc_setitem_dups(self):
# GH 6541
| https://api.github.com/repos/pandas-dev/pandas/pulls/11259 | 2015-10-07T19:13:42Z | 2015-10-07T21:05:33Z | 2015-10-07T21:05:33Z | 2015-10-07T21:05:33Z |
|
DOC: some hacks to get rid of warnings | diff --git a/doc/source/api.rst b/doc/source/api.rst
index b1c7b569c0c42..bfd1c92d14acd 100644
--- a/doc/source/api.rst
+++ b/doc/source/api.rst
@@ -611,7 +611,7 @@ strings and apply several methods to it. These can be acccessed like
..
The following is needed to ensure the generated pages are created with the
- correct template (otherwise they would be created in the Series class page)
+ correct template (otherwise they would be created in the Series/Index class page)
..
.. autosummary::
@@ -621,6 +621,10 @@ strings and apply several methods to it. These can be acccessed like
Series.str
Series.cat
Series.dt
+ Index.str
+ CategoricalIndex.str
+ DatetimeIndex.str
+ TimedeltaIndex.str
.. _api.categorical:
diff --git a/doc/sphinxext/numpydoc/docscrape_sphinx.py b/doc/sphinxext/numpydoc/docscrape_sphinx.py
index ba93b2eab779d..5a582b4d03282 100755
--- a/doc/sphinxext/numpydoc/docscrape_sphinx.py
+++ b/doc/sphinxext/numpydoc/docscrape_sphinx.py
@@ -19,6 +19,7 @@ def __init__(self, docstring, config={}):
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
+ self.class_members_list = config.get('class_members_list', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
@@ -95,7 +96,7 @@ def _str_member_list(self, name):
"""
out = []
- if self[name]:
+ if self[name] and self.class_members_list:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
@@ -114,11 +115,13 @@ def _str_member_list(self, name):
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
- if param_obj and (pydoc.getdoc(param_obj) or not desc):
- # Referenced object has a docstring
- autosum += [" %s%s" % (prefix, param)]
- else:
- others.append((param, param_type, desc))
+ # pandas HACK - do not exclude attributes wich are None
+ # if param_obj and (pydoc.getdoc(param_obj) or not desc):
+ # # Referenced object has a docstring
+ # autosum += [" %s%s" % (prefix, param)]
+ # else:
+ # others.append((param, param_type, desc))
+ autosum += [" %s%s" % (prefix, param)]
if autosum:
out += ['.. autosummary::']
diff --git a/doc/sphinxext/numpydoc/numpydoc.py b/doc/sphinxext/numpydoc/numpydoc.py
index 2bc2d1e91ed3f..0cccf72de3745 100755
--- a/doc/sphinxext/numpydoc/numpydoc.py
+++ b/doc/sphinxext/numpydoc/numpydoc.py
@@ -42,6 +42,10 @@ def mangle_docstrings(app, what, name, obj, options, lines,
class_members_toctree=app.config.numpydoc_class_members_toctree,
)
+ # PANDAS HACK (to remove the list of methods/attributes for Categorical)
+ if what == "class" and name.endswith(".Categorical"):
+ cfg['class_members_list'] = False
+
if what == 'module':
# Strip top title
title_re = re.compile(sixu('^\\s*[#*=]{4,}\\n[a-z0-9 -]+\\n[#*=]{4,}\\s*'),
| This PR includes some hacks to numpydoc to get rid of a bunch of warnings (split of from https://github.com/pydata/pandas/pull/11069 as I want these merged for 0.17.0). There is some more explanation in the commit messages.
Closes #6100
The disadvantage of this is that this causes a deviation from numpydoc upstream, and makes it more difficult to update to that one.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11257 | 2015-10-07T11:19:56Z | 2015-10-07T21:42:44Z | 2015-10-07T21:42:44Z | 2018-12-06T21:13:45Z |
added random_split in generic.py, for DataFrames etc. | diff --git a/pandas/core/api.py b/pandas/core/api.py
index e2ac57e37cba6..e34895af9640c 100644
--- a/pandas/core/api.py
+++ b/pandas/core/api.py
@@ -6,7 +6,7 @@
from pandas.core.algorithms import factorize, match, unique, value_counts
from pandas.core.common import isnull, notnull
from pandas.core.categorical import Categorical
-from pandas.core.groupby import Grouper
+from pandas.core.groupby import Grouper, RandomPartitioner, Partitioner
from pandas.core.format import set_eng_float_format
from pandas.core.index import Index, CategoricalIndex, Int64Index, Float64Index, MultiIndex
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 2def8180a43e4..246ebcb6cc953 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -3,6 +3,8 @@
import operator
import weakref
import gc
+from numbers import Real
+from math import floor
import numpy as np
import pandas.lib as lib
@@ -2034,6 +2036,37 @@ def tail(self, n=5):
return self
return self.iloc[-n:]
+ def split(self, weights=(50, 50), random=False, axis=None):
+ """
+ Returns a random split from an axis of this object
+
+ Parameters
+ ----------
+ weights : weights: list or tuple or equivalent, optional
+ The passed collection of weights serves as relative sizes of the splits
+ of the returned datasets.
+ Default = (50,50).
+ random : boolean or int or numpy.random.RandomState, optional
+ If False (=default value), makes consecutive splits from beginning to end.
+ If not False, a seed for the random number generator can be provided (if int) or
+ a numpy RandomState object. If True, default random behavior.
+ Default = False.
+ axis : int or string, optional
+ Axis to sample. Accepts axis number or name. Default is stat axis
+ for given data type (0 for Series and DataFrames, 1 for Panels).
+
+ Returns
+ -------
+ Multiple objects of the same type as original object. The number of returned objects
+ is the same as the number of weights provided as parameter.
+ """
+ g = pd.Partitioner(weights, axis)
+ if random is not False and random is not None:
+ if random is True:
+ random = None
+ g = pd.RandomPartitioner(weights, axis, random)
+ return self.groupby(g).split()
+
def sample(self, n=None, frac=None, replace=False, weights=None, random_state=None, axis=None):
"""
diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index add5080a69ee4..d8f5b33a1ad35 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -4,6 +4,7 @@
import datetime
import collections
import warnings
+from numbers import Real
from pandas.compat import(
zip, builtins, range, long, lzip,
@@ -296,6 +297,62 @@ def groups(self):
return self.grouper.groups
+class Partitioner(Grouper):
+ '''
+
+ '''
+
+ def __init__(self, proportions=(1,1), axis=None):
+ self._proportions = proportions
+ self._axis = axis
+ self.key = None
+ # check weight type
+ if len(self._proportions) < 2:
+ raise ValueError("must split into more than 1 partition")
+ for w in self._proportions:
+ if not (com.is_float(w) or com.is_integer(w)) or w <=0:
+ raise ValueError("weights must be strictly positive real numbers")
+
+ # compute proportions as fractions
+ self._proportions = np.asarray(self._proportions, dtype="float64")
+ self._proportions = self._proportions/self._proportions.sum()
+ super(Partitioner, self).__init__()
+
+ def _get_grouper(self, obj):
+ if self._axis is None:
+ self._axis = obj._stat_axis_number
+ self._axis = obj._get_axis_number(self._axis)
+ axis_length = obj.shape[self._axis]
+
+ numbers = np.rint(self._proportions * axis_length).astype("int32")
+
+ newcol = reduce(lambda x, y: x + y, [[x]*numbers[x] for x in range(len(numbers))])
+ while len(newcol) < axis_length:
+ newcol.append(newcol[-1])
+
+ self._transform(newcol)
+
+ grouping = Grouping(obj._get_axis(self._axis), grouper=Series(newcol), obj=obj, sort=True, in_axis=True)
+
+ return None, BaseGrouper(self._axis, [grouping]), obj
+
+ def _transform(self, newcol):
+ pass
+
+class RandomPartitioner(Partitioner):
+ '''
+ TODO
+ '''
+
+ def __init__(self, proportions=(1,1), axis=None, random=None):
+ # Process random_state argument
+ self.rs = com._random_state(random)
+ super(RandomPartitioner, self).__init__(proportions, axis)
+
+ def _transform(self, newcol):
+ self.rs.shuffle(newcol)
+
+
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
@@ -658,6 +715,10 @@ def __iter__(self):
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
+ def split(self):
+ acc = [x for _, x in self]
+ return tuple(acc)
+
def apply(self, func, *args, **kwargs):
"""
Apply function and combine results together in an intelligent way. The
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 3a26be2ca1032..d8860b51d7c6e 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -354,6 +354,12 @@ def test_head_tail(self):
self._compare(o.head(-3), o.head(7))
self._compare(o.tail(-3), o.tail(7))
+ def test_split(self):
+ o = self._construct(shape=10)
+ a, b = o.split((1, 1), axis=0, random=True)
+ self.assertTrue(a.shape[0] == 5)
+ self.assertTrue(b.shape[0] == 5)
+
def test_sample(self):
# Fixes issue: 2419
diff --git a/pandas/tests/test_groupby.py b/pandas/tests/test_groupby.py
index 8eb641ce8f494..7a66da080ddf8 100644
--- a/pandas/tests/test_groupby.py
+++ b/pandas/tests/test_groupby.py
@@ -399,8 +399,13 @@ def test_grouper_multilevel_freq(self):
pd.Grouper(level=1, freq='W')]).sum()
assert_frame_equal(result, expected)
- def test_grouper_creation_bug(self):
+ def test_grouper_random(self):
+ df = DataFrame({"A": [0,1,2,3,4,5], "b": [10,11,12,13,14,15]})
+ g = df.groupby(pd.RandomPartitioner((1,2)))
+ a, b = g.split()
+ assert_frame_equal(df, df)
+ def test_grouper_creation_bug(self):
# GH 8795
df = DataFrame({'A':[0,0,1,1,2,2], 'B':[1,2,3,4,5,6]})
g = df.groupby('A')
| Added a method random_split() for NDFrames, to split the Frame into several frames according to one axis.
Basic use: for train/test or train/validation/test splitting of a dataframe.
Note: I'm not sure this feature fits well in NDFrame. If you think this feature can be added, I'll add more tests and docs.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11253 | 2015-10-06T15:58:42Z | 2015-11-23T00:11:47Z | null | 2015-11-23T00:11:48Z |
COMPAT/PERF: lib.ismember_int64 on older numpies/cython not comparing correctly #11232 | diff --git a/asv_bench/benchmarks/series_methods.py b/asv_bench/benchmarks/series_methods.py
index d2167a8b6e9e1..37969a6949157 100644
--- a/asv_bench/benchmarks/series_methods.py
+++ b/asv_bench/benchmarks/series_methods.py
@@ -5,22 +5,21 @@ class series_isin_int64(object):
goal_time = 0.2
def setup(self):
- self.s1 = Series(np.random.randn(10000))
- self.s2 = Series(np.random.randint(1, 10, 10000))
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
+ self.s4 = Series(np.random.randint(1, 100, 10000000)).astype('int64')
self.values = [1, 2]
- self.s4 = self.s3.astype('object')
def time_series_isin_int64(self):
self.s3.isin(self.values)
+ def time_series_isin_int64_large(self):
+ self.s4.isin(self.values)
+
class series_isin_object(object):
goal_time = 0.2
def setup(self):
- self.s1 = Series(np.random.randn(10000))
- self.s2 = Series(np.random.randint(1, 10, 10000))
self.s3 = Series(np.random.randint(1, 10, 100000)).astype('int64')
self.values = [1, 2]
self.s4 = self.s3.astype('object')
@@ -71,4 +70,4 @@ def setup(self):
def time_series_nsmallest2(self):
self.s2.nsmallest(3, take_last=True)
- self.s2.nsmallest(3, take_last=False)
\ No newline at end of file
+ self.s2.nsmallest(3, take_last=False)
diff --git a/ci/requirements-2.6.build b/ci/requirements-2.6.build
index f8cbd8cef3fef..85148069a9e6a 100644
--- a/ci/requirements-2.6.build
+++ b/ci/requirements-2.6.build
@@ -1,4 +1,4 @@
-numpy=1.7.0
+numpy=1.7.1
cython=0.19.1
dateutil=1.5
pytz=2013b
diff --git a/ci/requirements-2.6.run b/ci/requirements-2.6.run
index 6521ca4122ef3..5f8a2fde1409f 100644
--- a/ci/requirements-2.6.run
+++ b/ci/requirements-2.6.run
@@ -1,14 +1,16 @@
-numpy=1.7.0
+numpy=1.7.1
dateutil=1.5
pytz=2013b
scipy=0.11.0
xlwt=0.7.5
xlrd=0.9.2
statsmodels=0.4.3
+bottleneck=0.8.0
+numexpr=2.2.2
+pytables=3.0.0
html5lib=1.0b2
beautiful-soup=4.2.0
psycopg2=2.5.1
-numexpr=1.4.2
pymysql=0.6.0
sqlalchemy=0.7.8
xlsxwriter=0.4.6
diff --git a/ci/requirements-2.7.build b/ci/requirements-2.7.build
index df543aaf40f69..3fe6f60aee98b 100644
--- a/ci/requirements-2.7.build
+++ b/ci/requirements-2.7.build
@@ -1,4 +1,4 @@
dateutil=2.1
pytz=2013b
-numpy=1.7.1
+numpy
cython=0.19.1
diff --git a/ci/requirements-2.7.run b/ci/requirements-2.7.run
index a740966684ab2..86e5934539ebf 100644
--- a/ci/requirements-2.7.run
+++ b/ci/requirements-2.7.run
@@ -1,10 +1,10 @@
dateutil=2.1
pytz=2013b
-numpy=1.7.1
+numpy
xlwt=0.7.5
-numexpr=2.2.2
-pytables=3.0.0
-matplotlib=1.3.1
+numexpr
+pytables
+matplotlib
openpyxl=1.6.2
xlrd=0.9.2
sqlalchemy=0.9.6
@@ -12,7 +12,7 @@ lxml=3.2.1
scipy
xlsxwriter=0.4.6
boto=2.36.0
-bottleneck=0.8.0
+bottleneck
psycopg2=2.5.2
patsy
pymysql=0.6.3
diff --git a/ci/requirements-2.7_SLOW.build b/ci/requirements-2.7_SLOW.build
index 9558cf00ddf5c..664e8b418def7 100644
--- a/ci/requirements-2.7_SLOW.build
+++ b/ci/requirements-2.7_SLOW.build
@@ -1,4 +1,4 @@
python-dateutil
pytz
-numpy
+numpy=1.8.2
cython
diff --git a/ci/requirements-2.7_SLOW.run b/ci/requirements-2.7_SLOW.run
index b6c9250dd775e..f02a7cb8a309a 100644
--- a/ci/requirements-2.7_SLOW.run
+++ b/ci/requirements-2.7_SLOW.run
@@ -1,7 +1,7 @@
python-dateutil
pytz
-numpy
-matplotlib
+numpy=1.8.2
+matplotlib=1.3.1
scipy
patsy
statsmodels
diff --git a/doc/source/install.rst b/doc/source/install.rst
index 3c624a9d25a0c..54e7b2d4df350 100644
--- a/doc/source/install.rst
+++ b/doc/source/install.rst
@@ -214,7 +214,7 @@ Dependencies
------------
* `setuptools <http://pythonhosted.org/setuptools>`__
-* `NumPy <http://www.numpy.org>`__: 1.7.0 or higher
+* `NumPy <http://www.numpy.org>`__: 1.7.1 or higher
* `python-dateutil <http://labix.org/python-dateutil>`__ 1.5 or higher
* `pytz <http://pytz.sourceforge.net/>`__
* Needed for time zone support
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index ec074dfa28bf5..1e240d0786082 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -1034,6 +1034,7 @@ Bug Fixes
~~~~~~~~~
- Bug in incorrection computation of ``.mean()`` on ``timedelta64[ns]`` because of overflow (:issue:`9442`)
+- Bug in ``.isin`` on older numpies (:issue: `11232`)
- Bug in ``DataFrame.to_html(index=False)`` renders unnecessary ``name`` row (:issue:`10344`)
- Bug in ``DataFrame.to_latex()`` the ``column_format`` argument could not be passed (:issue:`9402`)
- Bug in ``DatetimeIndex`` when localizing with ``NaT`` (:issue:`10477`)
diff --git a/pandas/core/algorithms.py b/pandas/core/algorithms.py
index 34bf173d63860..e5347f03b5462 100644
--- a/pandas/core/algorithms.py
+++ b/pandas/core/algorithms.py
@@ -6,6 +6,7 @@
from warnings import warn
import numpy as np
+from pandas import compat, lib, _np_version_under1p8
import pandas.core.common as com
import pandas.algos as algos
import pandas.hashtable as htable
@@ -66,6 +67,54 @@ def unique(values):
return _hashtable_algo(f, values.dtype)
+def isin(comps, values):
+ """
+ Compute the isin boolean array
+
+ Parameters
+ ----------
+ comps: array-like
+ values: array-like
+
+ Returns
+ -------
+ boolean array same length as comps
+ """
+
+ if not com.is_list_like(comps):
+ raise TypeError("only list-like objects are allowed to be passed"
+ " to isin(), you passed a "
+ "[{0}]".format(type(comps).__name__))
+ comps = np.asarray(comps)
+ if not com.is_list_like(values):
+ raise TypeError("only list-like objects are allowed to be passed"
+ " to isin(), you passed a "
+ "[{0}]".format(type(values).__name__))
+
+ # GH11232
+ # work-around for numpy < 1.8 and comparisions on py3
+ # faster for larger cases to use np.in1d
+ if (_np_version_under1p8 and compat.PY3) or len(comps) > 1000000:
+ f = lambda x, y: np.in1d(x,np.asarray(list(y)))
+ else:
+ f = lambda x, y: lib.ismember_int64(x,set(y))
+
+ # may need i8 conversion for proper membership testing
+ if com.is_datetime64_dtype(comps):
+ from pandas.tseries.tools import to_datetime
+ values = to_datetime(values)._values.view('i8')
+ comps = comps.view('i8')
+ elif com.is_timedelta64_dtype(comps):
+ from pandas.tseries.timedeltas import to_timedelta
+ values = to_timedelta(values)._values.view('i8')
+ comps = comps.view('i8')
+ elif com.is_int64_dtype(comps):
+ pass
+ else:
+ f = lambda x, y: lib.ismember(x, set(values))
+
+ return f(comps, values)
+
def _hashtable_algo(f, dtype, return_dtype=None):
"""
f(HashTable, type_caster) -> result
diff --git a/pandas/core/index.py b/pandas/core/index.py
index 1daa0e1b52d02..256ece6539b6f 100644
--- a/pandas/core/index.py
+++ b/pandas/core/index.py
@@ -14,6 +14,7 @@
from pandas.compat import range, zip, lrange, lzip, u, map
from pandas import compat
+from pandas.core import algorithms
from pandas.core.base import PandasObject, FrozenList, FrozenNDArray, IndexOpsMixin, _shared_docs, PandasDelegate
from pandas.util.decorators import (Appender, Substitution, cache_readonly,
deprecate, deprecate_kwarg)
@@ -108,7 +109,6 @@ class Index(IndexOpsMixin, PandasObject):
_is_numeric_dtype = False
_engine_type = _index.ObjectEngine
- _isin_type = lib.ismember
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False,
tupleize_cols=True, **kwargs):
@@ -1443,7 +1443,7 @@ def __add__(self, other):
return Index(np.array(self) + other)
def __radd__(self, other):
- if com.is_list_like(other):
+ if is_list_like(other):
warnings.warn("using '+' to provide set union with Indexes is deprecated, "
"use '|' or .union()", FutureWarning, stacklevel=2)
return Index(other + np.array(self))
@@ -1995,10 +1995,9 @@ def isin(self, values, level=None):
is_contained : ndarray (boolean dtype)
"""
- value_set = set(values)
if level is not None:
self._validate_index_level(level)
- return self._isin_type(np.array(self), value_set)
+ return algorithms.isin(np.array(self), values)
def _can_reindex(self, indexer):
"""
@@ -3097,6 +3096,8 @@ def _is_dtype_compat(self, other):
raise TypeError("categories must match existing categories when appending")
else:
values = other
+ if not is_list_like(values):
+ values = [ values ]
other = CategoricalIndex(self._create_categorical(self, other, categories=self.categories, ordered=self.ordered))
if not other.isin(values).all():
raise TypeError("cannot append a non-category item to a CategoricalIndex")
@@ -3580,7 +3581,6 @@ class Int64Index(NumericIndex):
_outer_indexer = _algos.outer_join_indexer_int64
_engine_type = _index.Int64Engine
- _isin_type = lib.ismember_int64
def __new__(cls, data=None, dtype=None, copy=False, name=None, fastpath=False, **kwargs):
diff --git a/pandas/core/series.py b/pandas/core/series.py
index 11645311467d5..f4e3374626011 100644
--- a/pandas/core/series.py
+++ b/pandas/core/series.py
@@ -42,7 +42,7 @@
from pandas.compat import zip, u, OrderedDict, StringIO
import pandas.core.ops as ops
-from pandas.core.algorithms import select_n
+from pandas.core import algorithms
import pandas.core.common as com
import pandas.core.datetools as datetools
@@ -1156,8 +1156,7 @@ def mode(self):
modes : Series (sorted)
"""
# TODO: Add option for bins like value_counts()
- from pandas.core.algorithms import mode
- return mode(self)
+ return algorithms.mode(self)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@Appender(base._shared_docs['drop_duplicates'] % _shared_doc_kwargs)
@@ -1812,9 +1811,8 @@ def rank(self, method='average', na_option='keep', ascending=True,
-------
ranks : Series
"""
- from pandas.core.algorithms import rank
- ranks = rank(self._values, method=method, na_option=na_option,
- ascending=ascending, pct=pct)
+ ranks = algorithms.rank(self._values, method=method, na_option=na_option,
+ ascending=ascending, pct=pct)
return self._constructor(ranks, index=self.index).__finalize__(self)
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
@@ -1852,7 +1850,7 @@ def nlargest(self, n=5, keep='first'):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nlargest(10) # only sorts up to the N requested
"""
- return select_n(self, n=n, keep=keep, method='nlargest')
+ return algorithms.select_n(self, n=n, keep=keep, method='nlargest')
@deprecate_kwarg('take_last', 'keep', mapping={True: 'last', False: 'first'})
def nsmallest(self, n=5, keep='first'):
@@ -1889,7 +1887,7 @@ def nsmallest(self, n=5, keep='first'):
>>> s = pd.Series(np.random.randn(1e6))
>>> s.nsmallest(10) # only sorts up to the N requested
"""
- return select_n(self, n=n, keep=keep, method='nsmallest')
+ return algorithms.select_n(self, n=n, keep=keep, method='nsmallest')
def sortlevel(self, level=0, ascending=True, sort_remaining=True):
"""
@@ -2353,29 +2351,7 @@ def isin(self, values):
dtype: bool
"""
- if not com.is_list_like(values):
- raise TypeError("only list-like objects are allowed to be passed"
- " to Series.isin(), you passed a "
- "{0!r}".format(type(values).__name__))
-
- # may need i8 conversion for proper membership testing
- comps = _values_from_object(self)
- f = lib.ismember
- if com.is_datetime64_dtype(self):
- from pandas.tseries.tools import to_datetime
- values = Series(to_datetime(values))._values.view('i8')
- comps = comps.view('i8')
- f = lib.ismember_int64
- elif com.is_timedelta64_dtype(self):
- from pandas.tseries.timedeltas import to_timedelta
- values = Series(to_timedelta(values))._values.view('i8')
- comps = comps.view('i8')
- f = lib.ismember_int64
- elif is_int64_dtype(self):
- f = lib.ismember_int64
-
- value_set = set(values)
- result = f(comps, value_set)
+ result = algorithms.isin(_values_from_object(self), values)
return self._constructor(result, index=self.index).__finalize__(self)
def between(self, left, right, inclusive=True):
diff --git a/pandas/tests/test_algos.py b/pandas/tests/test_algos.py
index 3c9931b93f9f9..b18bd7b2b3978 100644
--- a/pandas/tests/test_algos.py
+++ b/pandas/tests/test_algos.py
@@ -278,7 +278,69 @@ def test_timedelta64_dtype_array_returned(self):
tm.assert_numpy_array_equal(result, expected)
self.assertEqual(result.dtype, expected.dtype)
+class TestIsin(tm.TestCase):
+ _multiprocess_can_split_ = True
+
+ def test_invalid(self):
+
+ self.assertRaises(TypeError, lambda : algos.isin(1,1))
+ self.assertRaises(TypeError, lambda : algos.isin(1,[1]))
+ self.assertRaises(TypeError, lambda : algos.isin([1],1))
+
+ def test_basic(self):
+
+ result = algos.isin([1,2],[1])
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(np.array([1,2]),[1])
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(pd.Series([1,2]),[1])
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(pd.Series([1,2]),pd.Series([1]))
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(['a','b'],['a'])
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+ result = algos.isin(pd.Series(['a','b']),pd.Series(['a']))
+ expected = np.array([True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(['a','b'],[1])
+ expected = np.array([False,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = pd.date_range('20130101',periods=3).values
+ result = algos.isin(arr,[arr[0]])
+ expected = np.array([True,False,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ result = algos.isin(arr,arr[0:2])
+ expected = np.array([True,True,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+ arr = pd.timedelta_range('1 day',periods=3).values
+ result = algos.isin(arr,[arr[0]])
+ expected = np.array([True,False,False])
+ tm.assert_numpy_array_equal(result, expected)
+
+
+
+ def test_large(self):
+
+ s = pd.date_range('20000101',periods=2000000,freq='s').values
+ result = algos.isin(s,s[0:2])
+ expected = np.zeros(len(s),dtype=bool)
+ expected[0] = True
+ expected[1] = True
+ tm.assert_numpy_array_equal(result, expected)
class TestValueCounts(tm.TestCase):
_multiprocess_can_split_ = True
diff --git a/pandas/tseries/base.py b/pandas/tseries/base.py
index ebf3af5f46c47..2f4858300293e 100644
--- a/pandas/tseries/base.py
+++ b/pandas/tseries/base.py
@@ -7,7 +7,7 @@
from pandas import compat
import numpy as np
-from pandas.core import common as com
+from pandas.core import common as com, algorithms
from pandas.core.common import is_integer, is_float, AbstractMethodError
import pandas.tslib as tslib
import pandas.lib as lib
@@ -486,8 +486,7 @@ def isin(self, values):
except ValueError:
return self.asobject.isin(values)
- value_set = set(values.asi8)
- return lib.ismember_int64(self.asi8, value_set)
+ return algorithms.isin(self.asi8, values.asi8)
def shift(self, n, freq=None):
"""
| closes #11232
PERF: use np.in1d on larger isin sizes
| https://api.github.com/repos/pandas-dev/pandas/pulls/11252 | 2015-10-06T11:07:25Z | 2015-10-06T18:04:08Z | 2015-10-06T18:04:08Z | 2018-01-22T18:34:53Z |
DOC: Fix typo in Grouper private method documentation. | diff --git a/pandas/core/groupby.py b/pandas/core/groupby.py
index 40f078a1bbcfe..add5080a69ee4 100644
--- a/pandas/core/groupby.py
+++ b/pandas/core/groupby.py
@@ -242,7 +242,7 @@ def _get_grouper(self, obj):
def _set_grouper(self, obj, sort=False):
"""
- given an object and the specifcations, setup the internal grouper for this particular specification
+ given an object and the specifications, setup the internal grouper for this particular specification
Parameters
----------
@@ -1962,7 +1962,7 @@ def __init__(self, index, grouper=None, obj=None, name=None, level=None,
# technically we cannot group on an unordered Categorical
# but this a user convenience to do so; the ordering
- # is preserved and if its a reduction is doesnt't make any difference
+ # is preserved and if it's a reduction it doesn't make any difference
pass
# fix bug #GH8868 sort=False being ignored in categorical groupby
@@ -2069,7 +2069,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
- This routine tries to figure of what the passing in references
+ This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
@@ -2077,7 +2077,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
group_axis = obj._get_axis(axis)
- # validate thatthe passed level is compatible with the passed
+ # validate that the passed level is compatible with the passed
# axis of the object
if level is not None:
if not isinstance(group_axis, MultiIndex):
@@ -2091,7 +2091,7 @@ def _get_grouper(obj, key=None, axis=0, level=None, sort=True):
level = None
key = group_axis
- # a passed in Grouper, directly convert
+ # a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj)
if key.key is None:
@@ -2568,7 +2568,7 @@ def nunique(self, dropna=True):
ids, val = ids[sorter], val[sorter]
- # group boundries are where group ids change
+ # group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
@@ -2591,7 +2591,7 @@ def nunique(self, dropna=True):
@Appender(Series.nlargest.__doc__)
def nlargest(self, n=5, keep='first'):
# ToDo: When we remove deprecate_kwargs, we can remote these methods
- # and inlucde nlargest and nsmallest to _series_apply_whitelist
+ # and include nlargest and nsmallest to _series_apply_whitelist
return self.apply(lambda x: x.nlargest(n=n, keep=keep))
@@ -2634,12 +2634,12 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
- # group boundries are where group ids change
+ # group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
inc = np.r_[True, lab[1:] != lab[:-1]]
- inc[idx] = True # group boundries are also new values
+ inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
@@ -2919,8 +2919,6 @@ def _aggregate_generic(self, func, *args, **kwargs):
if axis != obj._info_axis_number:
try:
for name, data in self:
- # for name in self.indices:
- # data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
@@ -3742,7 +3740,7 @@ def get_group_index(labels, shape, sort, xnull):
sort: boolean
If the ranks of returned ids should match lexical ranks of labels
xnull: boolean
- If true nulls are eXcluded. i.e. -1 values in the labels are
+ If true nulls are excluded. i.e. -1 values in the labels are
passed through
Returns
-------
| https://api.github.com/repos/pandas-dev/pandas/pulls/11249 | 2015-10-06T00:09:11Z | 2015-10-06T11:06:41Z | null | 2015-10-06T11:06:41Z |
|
DOC: to_timedelta in convert_objects warning | diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3473dd0f7cd88..2def8180a43e4 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -2595,7 +2595,7 @@ def convert_objects(self, convert_dates=True, convert_numeric=False,
"""
from warnings import warn
warn("convert_objects is deprecated. Use the data-type specific "
- "converters pd.to_datetime, pd.to_timestamp and pd.to_numeric.",
+ "converters pd.to_datetime, pd.to_timedelta and pd.to_numeric.",
FutureWarning, stacklevel=2)
return self._constructor(
| closes #11246
| https://api.github.com/repos/pandas-dev/pandas/pulls/11248 | 2015-10-05T23:24:45Z | 2015-10-06T07:55:46Z | 2015-10-06T07:55:46Z | 2015-10-06T23:58:39Z |
BUG: datetime64 series reduces to nan when empty instead of nat | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 668873e838597..60c8b58768f83 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -28,6 +28,9 @@ Other Enhancements
API changes
~~~~~~~~~~~
+- min and max reductions on ``datetime64`` and ``timedelta64`` dtyped series now
+ result in ``NaT`` and not ``nan`` (:issue:`11245`).
+
.. _whatsnew_0171.deprecations:
Deprecations
@@ -74,3 +77,5 @@ Bug Fixes
- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
+- Fixed a bug that prevented the construction of an empty series of dtype
+ ``datetime64[ns, tz]`` (:issue:`11245`).
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 2411925207696..da30ca0538f0e 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -1927,12 +1927,11 @@ def _possibly_cast_to_datetime(value, dtype, errors='raise'):
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
- elif np.prod(value.shape) and not is_dtype_equal(value.dtype, dtype):
+ elif np.prod(value.shape) or not is_dtype_equal(value.dtype, dtype):
try:
if is_datetime64:
value = to_datetime(value, errors=errors)._values
elif is_datetime64tz:
-
# input has to be UTC at this point, so just localize
value = to_datetime(value, errors=errors).tz_localize(dtype.tz)
elif is_timedelta64:
diff --git a/pandas/core/dtypes.py b/pandas/core/dtypes.py
index bf6d068a58d73..0b13471aadcfb 100644
--- a/pandas/core/dtypes.py
+++ b/pandas/core/dtypes.py
@@ -138,7 +138,7 @@ class DatetimeTZDtype(ExtensionDtype):
num = 101
base = np.dtype('M8[ns]')
_metadata = ['unit','tz']
- _match = re.compile("datetime64\[(?P<unit>.+), (?P<tz>.+)\]")
+ _match = re.compile("(datetime64|M8)\[(?P<unit>.+), (?P<tz>.+)\]")
def __init__(self, unit, tz=None):
"""
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 08dfe315c4cb2..013bd1c230662 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4624,7 +4624,7 @@ def _reduce(self, op, name, axis=0, skipna=True, numeric_only=None,
values = self.values
result = f(values)
- if is_object_dtype(result.dtype):
+ if hasattr(result, 'dtype') and is_object_dtype(result.dtype):
try:
if filter_type is None or filter_type == 'numeric':
result = result.astype(np.float64)
diff --git a/pandas/core/nanops.py b/pandas/core/nanops.py
index 1561c0aefbb9b..43533b67b5441 100644
--- a/pandas/core/nanops.py
+++ b/pandas/core/nanops.py
@@ -425,65 +425,34 @@ def nansem(values, axis=None, skipna=True, ddof=1):
return np.sqrt(var) / np.sqrt(count)
-@bottleneck_switch()
-def nanmin(values, axis=None, skipna=True):
- values, mask, dtype, dtype_max = _get_values(values, skipna,
- fill_value_typ='+inf')
-
- # numpy 1.6.1 workaround in Python 3.x
- if is_object_dtype(values) and compat.PY3:
- if values.ndim > 1:
- apply_ax = axis if axis is not None else 0
- result = np.apply_along_axis(builtins.min, apply_ax, values)
- else:
- try:
- result = builtins.min(values)
- except:
- result = np.nan
- else:
+def _nanminmax(meth, fill_value_typ):
+ @bottleneck_switch()
+ def reduction(values, axis=None, skipna=True):
+ values, mask, dtype, dtype_max = _get_values(
+ values,
+ skipna,
+ fill_value_typ=fill_value_typ,
+ )
+
if ((axis is not None and values.shape[axis] == 0)
or values.size == 0):
try:
- result = ensure_float(values.sum(axis, dtype=dtype_max))
+ result = getattr(values, meth)(axis, dtype=dtype_max)
result.fill(np.nan)
except:
result = np.nan
else:
- result = values.min(axis)
+ result = getattr(values, meth)(axis)
- result = _wrap_results(result, dtype)
- return _maybe_null_out(result, axis, mask)
+ result = _wrap_results(result, dtype)
+ return _maybe_null_out(result, axis, mask)
+ reduction.__name__ = 'nan' + meth
+ return reduction
-@bottleneck_switch()
-def nanmax(values, axis=None, skipna=True):
- values, mask, dtype, dtype_max = _get_values(values, skipna,
- fill_value_typ='-inf')
- # numpy 1.6.1 workaround in Python 3.x
- if is_object_dtype(values) and compat.PY3:
-
- if values.ndim > 1:
- apply_ax = axis if axis is not None else 0
- result = np.apply_along_axis(builtins.max, apply_ax, values)
- else:
- try:
- result = builtins.max(values)
- except:
- result = np.nan
- else:
- if ((axis is not None and values.shape[axis] == 0)
- or values.size == 0):
- try:
- result = ensure_float(values.sum(axis, dtype=dtype_max))
- result.fill(np.nan)
- except:
- result = np.nan
- else:
- result = values.max(axis)
-
- result = _wrap_results(result, dtype)
- return _maybe_null_out(result, axis, mask)
+nanmin = _nanminmax('min', fill_value_typ='+inf')
+nanmax = _nanminmax('max', fill_value_typ='-inf')
def nanargmax(values, axis=None, skipna=True):
@@ -637,7 +606,7 @@ def _maybe_null_out(result, axis, mask):
else:
result = result.astype('f8')
result[null_mask] = np.nan
- else:
+ elif result is not tslib.NaT:
null_mask = mask.size - mask.sum()
if null_mask == 0:
result = np.nan
diff --git a/pandas/tests/test_dtypes.py b/pandas/tests/test_dtypes.py
index e6df9c894c219..4403465576848 100644
--- a/pandas/tests/test_dtypes.py
+++ b/pandas/tests/test_dtypes.py
@@ -1,4 +1,5 @@
# -*- coding: utf-8 -*-
+from itertools import product
import nose
import numpy as np
@@ -148,6 +149,15 @@ def test_dst(self):
self.assertTrue(is_datetimetz(s2))
self.assertEqual(s1.dtype, s2.dtype)
+ def test_parser(self):
+ # pr #11245
+ for tz, constructor in product(('UTC', 'US/Eastern'),
+ ('M8', 'datetime64')):
+ self.assertEqual(
+ DatetimeTZDtype('%s[ns, %s]' % (constructor, tz)),
+ DatetimeTZDtype('ns', tz),
+ )
+
diff --git a/pandas/tests/test_series.py b/pandas/tests/test_series.py
index 79de22b507e2a..3a13af60ae86f 100644
--- a/pandas/tests/test_series.py
+++ b/pandas/tests/test_series.py
@@ -7960,6 +7960,12 @@ def test_datetime_timedelta_quantiles(self):
self.assertTrue(pd.isnull(Series([],dtype='M8[ns]').quantile(.5)))
self.assertTrue(pd.isnull(Series([],dtype='m8[ns]').quantile(.5)))
+ def test_empty_timeseries_redections_return_nat(self):
+ # covers #11245
+ for dtype in ('m8[ns]', 'm8[ns]', 'M8[ns]', 'M8[ns, UTC]'):
+ self.assertIs(Series([], dtype=dtype).min(), pd.NaT)
+ self.assertIs(Series([], dtype=dtype).max(), pd.NaT)
+
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| I ran into some strange behavior with a series of dtype datetime64[ns] where I called max and got back a `nan`. I think the correct behavior here is to return `nat`. I looked through test_nanops but I am not sure where the correct place to put the test for this is.
The new behavior is:
``` python
In [1]: pd.Series(dtype='datetime64[ns]').max()
Out[1]: NaT
```
where the old behavior was:
``` python
In [1]: pd.Series(dtype='datetime64[ns]').max()
Out[1]: nan
```
| https://api.github.com/repos/pandas-dev/pandas/pulls/11245 | 2015-10-05T18:41:09Z | 2015-10-11T15:17:51Z | 2015-10-11T15:17:51Z | 2015-10-11T15:17:56Z |
DOC: clarify requirements for read_stata's args | diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 5afbc2671e3a7..fd155d62bd5d8 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -74,7 +74,7 @@
Parameters
----------
filepath_or_buffer : string or file-like object
- Path to .dta file or object implementing a binary read() functions
+ Path to .dta file or object implementing a binary read() and seek() functions
%s
%s
%s
| read_stata(buf) needs a buf which implements both read() and seek(). read() is not enough. For example, the object returned from zipfile.ZipFile.open('filename') only provides read() and not seek() and cannot be used.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11243 | 2015-10-05T15:37:20Z | 2015-10-06T11:18:06Z | null | 2015-10-06T11:18:26Z |
Enhance timeseries documentation | diff --git a/doc/source/timeseries.rst b/doc/source/timeseries.rst
index 7ca4ff0529b4e..65fb2a6133806 100644
--- a/doc/source/timeseries.rst
+++ b/doc/source/timeseries.rst
@@ -1050,6 +1050,29 @@ previous versions, resampling had to be done using a combination of
function on the grouped object. This was not nearly convenient or performant as
the new pandas timeseries API.
+Sparse timeseries
+~~~~~~~~~~~~~~~~~
+
+If your timeseries are sparse, be aware that upsampling will generate a lot of
+intermediate points filled with whatever passed as ``fill_method``. What
+``resample`` does is basically a group by and then applying an aggregation
+method on each of its groups, which can also be achieve with something like the
+following.
+
+.. ipython:: python
+
+ def round(t, freq):
+ # round a Timestamp to a specified freq
+ return Timestamp((t.value // freq.delta.value) * freq.delta.value)
+
+ from functools import partial
+
+ rng = date_range('1/1/2012', periods=100, freq='S')
+
+ ts = Series(randint(0, 500, len(rng)), index=rng)
+
+ ts.groupby(partial(round, freq=offsets.Minute(3))).sum()
+
.. _timeseries.periods:
Time Span Representation
| As talked in #11217.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11242 | 2015-10-05T14:18:58Z | 2015-10-08T12:25:34Z | null | 2015-10-08T12:25:38Z |
test regarding fix about non propagating along axis on pct_change | diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 3a26be2ca1032..8a1e41b2915ec 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1850,6 +1850,15 @@ def test_pipe_panel(self):
with tm.assertRaises(ValueError):
result = wp.pipe((f, 'y'), x=1, y=1)
+
+ def test_pct_change(self):
+ pnl = Panel(np.random.rand(10, 10, 10))
+ pnl.iat[1,1,0] = np.nan
+
+ expected = pnl.ffill(axis=1).pct_change(axis=1, fill_method=None)
+ result = pnl.pct_change(axis=1, fill_method='pad')
+
+ assert_panel_equal(result, expected)
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
| https://api.github.com/repos/pandas-dev/pandas/pulls/11241 | 2015-10-05T12:14:20Z | 2015-10-05T12:24:28Z | null | 2015-10-05T12:24:28Z |
|
BUG: HDFStore.append with encoded string itemsize | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 736554672a089..f9ae7c32e956c 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -42,3 +42,10 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
+
+- Bug in ``HDFStore.append`` with strings whose encoded length exceded the max unencoded length (:issue:`11234`)
+
+
+
+
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index ac2358cb3d231..4de641bb67926 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -1860,7 +1860,8 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize,
)
# itemsize is the maximum length of a string (along any dimension)
- itemsize = lib.max_len_string_array(com._ensure_object(data.ravel()))
+ data_converted = _convert_string_array(data, encoding)
+ itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
@@ -1877,10 +1878,7 @@ def set_atom_string(self, block, block_items, existing_col, min_itemsize,
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
- self.set_data(self.convert_string_data(data, itemsize, encoding))
-
- def convert_string_data(self, data, itemsize, encoding):
- return _convert_string_array(data, encoding, itemsize)
+ self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
diff --git a/pandas/io/tests/test_pytables.py b/pandas/io/tests/test_pytables.py
index df2a659100305..167170f7cd7c5 100644
--- a/pandas/io/tests/test_pytables.py
+++ b/pandas/io/tests/test_pytables.py
@@ -4292,6 +4292,22 @@ def f():
compat_assert_produces_warning(PerformanceWarning, f)
+
+ def test_unicode_longer_encoded(self):
+ # GH 11234
+ char = '\u0394'
+ df = pd.DataFrame({'A': [char]})
+ with ensure_clean_store(self.path) as store:
+ store.put('df', df, format='table', encoding='utf-8')
+ result = store.get('df')
+ tm.assert_frame_equal(result, df)
+
+ df = pd.DataFrame({'A': ['a', char], 'B': ['b', 'b']})
+ with ensure_clean_store(self.path) as store:
+ store.put('df', df, format='table', encoding='utf-8')
+ result = store.get('df')
+ tm.assert_frame_equal(result, df)
+
def test_store_datetime_mixed(self):
df = DataFrame(
| Closes https://github.com/pydata/pandas/issues/11234
Failure came when the maximum length of the unencoded string
was smaller than the maximum encoded length.
Need to run a perf check still. We end up having to call `_convert_string_array` twice, once before we know the min_itemsize, and a second time just before appending once we do know the min itemsize.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11240 | 2015-10-05T00:15:36Z | 2015-10-09T13:33:02Z | null | 2016-11-03T12:38:27Z |
BUG: to_excel duplicate columns | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 543eea399f447..668873e838597 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -70,3 +70,7 @@ Bug Fixes
- Bug in ``DataFrame.to_latex()`` produces an extra rule when ``header=False`` (:issue:`7124`)
+
+
+
+- Bugs in ``to_excel`` with duplicate columns (:issue:`11007`, :issue:`10982`, :issue:`10970`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index bc25d14be3960..22e8d6502b358 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1683,12 +1683,12 @@ class ExcelFormatter(object):
def __init__(self, df, na_rep='', float_format=None, cols=None,
header=True, index=True, index_label=None, merge_cells=False,
inf_rep='inf'):
- self.df = df
self.rowcounter = 0
self.na_rep = na_rep
- self.columns = cols
- if cols is None:
- self.columns = df.columns
+ self.df = df
+ if cols is not None:
+ self.df = df.loc[:, cols]
+ self.columns = self.df.columns
self.float_format = float_format
self.index = index
self.index_label = index_label
@@ -1843,12 +1843,9 @@ def _format_regular_rows(self):
for idx, idxval in enumerate(index_values):
yield ExcelCell(self.rowcounter + idx, 0, idxval, header_style)
- # Get a frame that will account for any duplicates in the column names.
- col_mapped_frame = self.df.loc[:, self.columns]
-
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
- series = col_mapped_frame.iloc[:, colidx]
+ series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, colidx + coloffset, val)
@@ -1917,12 +1914,9 @@ def _format_hierarchical_rows(self):
header_style)
gcolidx += 1
- # Get a frame that will account for any duplicates in the column names.
- col_mapped_frame = self.df.loc[:, self.columns]
-
# Write the body of the frame data series by series.
for colidx in range(len(self.columns)):
- series = col_mapped_frame.iloc[:, colidx]
+ series = self.df.iloc[:, colidx]
for i, val in enumerate(series):
yield ExcelCell(self.rowcounter + i, gcolidx + colidx, val)
diff --git a/pandas/io/tests/test_excel.py b/pandas/io/tests/test_excel.py
index 13bb116638b98..40cbd97ea539f 100644
--- a/pandas/io/tests/test_excel.py
+++ b/pandas/io/tests/test_excel.py
@@ -1346,7 +1346,7 @@ def roundtrip2(df, header=True, parser_hdr=0, index=True):
def test_duplicated_columns(self):
- # Test for issue #5235.
+ # Test for issue #5235
_skip_if_no_xlrd()
with ensure_clean(self.ext) as path:
@@ -1358,7 +1358,20 @@ def test_duplicated_columns(self):
read_frame = read_excel(path, 'test1')
read_frame.columns = colnames
+ tm.assert_frame_equal(write_frame, read_frame)
+
+ # 11007 / #10970
+ write_frame = DataFrame([[1,2,3,4],[5,6,7,8]],
+ columns=['A','B','A','B'])
+ write_frame.to_excel(path, 'test1')
+ read_frame = read_excel(path, 'test1')
+ read_frame.columns = ['A','B','A','B']
+ tm.assert_frame_equal(write_frame, read_frame)
+ # 10982
+ write_frame.to_excel(path, 'test1', index=False, header=False)
+ read_frame = read_excel(path, 'test1', header=None)
+ write_frame.columns = [0, 1, 2, 3]
tm.assert_frame_equal(write_frame, read_frame)
def test_swapped_columns(self):
@@ -1375,6 +1388,23 @@ def test_swapped_columns(self):
tm.assert_series_equal(write_frame['A'], read_frame['A'])
tm.assert_series_equal(write_frame['B'], read_frame['B'])
+ def test_invalid_columns(self):
+ # 10982
+ _skip_if_no_xlrd()
+
+ with ensure_clean(self.ext) as path:
+ write_frame = DataFrame({'A': [1, 1, 1],
+ 'B': [2, 2, 2]})
+
+ write_frame.to_excel(path, 'test1', columns=['B', 'C'])
+ expected = write_frame.loc[:, ['B','C']]
+ read_frame = read_excel(path, 'test1')
+ tm.assert_frame_equal(expected, read_frame)
+
+ with tm.assertRaises(KeyError):
+ write_frame.to_excel(path, 'test1', columns=['C', 'D'])
+
+
def test_datetimes(self):
# Test writing and reading datetimes. For issue #9139. (xref #9185)
| closes #11007
closes #10970 (data in wrong order)
closes #10982 (columns lost).
Using the approach suggested [here](https://github.com/pydata/pandas/issues/11007#issuecomment-141019207)
All three occurred when using `to_excel` with duplicate columns in the `DataFrame`
| https://api.github.com/repos/pandas-dev/pandas/pulls/11237 | 2015-10-04T18:21:15Z | 2015-10-10T00:23:39Z | 2015-10-10T00:23:39Z | 2015-10-11T23:36:11Z |
BUG: to_latex() output broken when the index has a name (GH10660) | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 736554672a089..e4a3183983fa9 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -42,3 +42,4 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+Bug in ``.to_latex()`` output broken when the index has a name (:issue: `10660`)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 5f12abb543513..1ad6f250187ee 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -636,11 +636,13 @@ def get_col_type(dtype):
if self.index and isinstance(self.frame.index, MultiIndex):
clevels = self.frame.columns.nlevels
strcols.pop(0)
- name = any(self.frame.columns.names)
+ name = any(self.frame.index.names)
for i, lev in enumerate(self.frame.index.levels):
- lev2 = lev.format(name=name)
+ lev2 = lev.format()
blank = ' ' * len(lev2[0])
lev3 = [blank] * clevels
+ if name:
+ lev3.append(lev.name)
for level_idx, group in itertools.groupby(
self.frame.index.labels[i]):
count = len(list(group))
@@ -667,6 +669,8 @@ def write(buf, frame, column_format, strcols, longtable=False):
buf.write('\\toprule\n')
nlevels = frame.columns.nlevels
+ if any(frame.index.names):
+ nlevels += 1
for i, row in enumerate(zip(*strcols)):
if i == nlevels:
buf.write('\\midrule\n') # End of header
diff --git a/pandas/tests/test_format.py b/pandas/tests/test_format.py
index b5220c8cb2706..b96a6d09983b5 100644
--- a/pandas/tests/test_format.py
+++ b/pandas/tests/test_format.py
@@ -2645,6 +2645,50 @@ def test_to_latex_multiindex(self):
c3 & 0 & 0 & 1 & 2 & 3 \\
\bottomrule
\end{tabular}
+"""
+ self.assertEqual(result, expected)
+
+ # GH 10660
+ df = pd.DataFrame({'a':[0,0,1,1], 'b':list('abab'), 'c':[1,2,3,4]})
+ result = df.set_index(['a', 'b']).to_latex()
+ expected = r"""\begin{tabular}{llr}
+\toprule
+ & & c \\
+a & b & \\
+\midrule
+0 & a & 1 \\
+ & b & 2 \\
+1 & a & 3 \\
+ & b & 4 \\
+\bottomrule
+\end{tabular}
+"""
+ self.assertEqual(result, expected)
+
+ result = df.groupby('a').describe().to_latex()
+ expected = r"""\begin{tabular}{llr}
+\toprule
+ & & c \\
+a & {} & \\
+\midrule
+0 & count & 2.000000 \\
+ & mean & 1.500000 \\
+ & std & 0.707107 \\
+ & min & 1.000000 \\
+ & 25\% & 1.250000 \\
+ & 50\% & 1.500000 \\
+ & 75\% & 1.750000 \\
+ & max & 2.000000 \\
+1 & count & 2.000000 \\
+ & mean & 3.500000 \\
+ & std & 0.707107 \\
+ & min & 3.000000 \\
+ & 25\% & 3.250000 \\
+ & 50\% & 3.500000 \\
+ & 75\% & 3.750000 \\
+ & max & 4.000000 \\
+\bottomrule
+\end{tabular}
"""
self.assertEqual(result, expected)
| closes #10660
first try at contributing to pandas. feedbacks will be greatly appreciated.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11233 | 2015-10-04T06:55:29Z | 2015-10-09T13:03:57Z | 2015-10-09T13:03:57Z | 2015-10-09T13:04:03Z |
DOC: Add note about unicode layout | diff --git a/doc/source/_static/option_unicode01.png b/doc/source/_static/option_unicode01.png
new file mode 100644
index 0000000000000..d7168de126c5b
Binary files /dev/null and b/doc/source/_static/option_unicode01.png differ
diff --git a/doc/source/_static/option_unicode02.png b/doc/source/_static/option_unicode02.png
new file mode 100644
index 0000000000000..89e81e4f5f0ed
Binary files /dev/null and b/doc/source/_static/option_unicode02.png differ
diff --git a/doc/source/_static/option_unicode03.png b/doc/source/_static/option_unicode03.png
new file mode 100644
index 0000000000000..0b4ee876e17fe
Binary files /dev/null and b/doc/source/_static/option_unicode03.png differ
diff --git a/doc/source/_static/option_unicode04.png b/doc/source/_static/option_unicode04.png
new file mode 100644
index 0000000000000..1b839a44422b3
Binary files /dev/null and b/doc/source/_static/option_unicode04.png differ
diff --git a/doc/source/options.rst b/doc/source/options.rst
index 46ff2b6e5c343..bb78b29d7f205 100644
--- a/doc/source/options.rst
+++ b/doc/source/options.rst
@@ -454,10 +454,14 @@ Unicode Formatting
Some East Asian countries use Unicode characters its width is corresponding to 2 alphabets.
If DataFrame or Series contains these characters, default output cannot be aligned properly.
+.. note:: Screen captures are attached for each outputs to show the actual results.
+
.. ipython:: python
df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']})
- df
+ df;
+
+.. image:: _static/option_unicode01.png
Enable ``display.unicode.east_asian_width`` allows pandas to check each character's "East Asian Width" property.
These characters can be aligned properly by checking this property, but it takes longer time than standard ``len`` function.
@@ -465,31 +469,32 @@ These characters can be aligned properly by checking this property, but it takes
.. ipython:: python
pd.set_option('display.unicode.east_asian_width', True)
- df
+ df;
+
+.. image:: _static/option_unicode02.png
In addition, Unicode contains characters which width is "Ambiguous". These character's width should be either 1 or 2 depending on terminal setting or encoding. Because this cannot be distinguished from Python, ``display.unicode.ambiguous_as_wide`` option is added to handle this.
By default, "Ambiguous" character's width, "¡" (inverted exclamation) in below example, is regarded as 1.
-.. note::
-
- This should be aligned properly in terminal which uses monospaced font.
-
.. ipython:: python
df = pd.DataFrame({'a': ['xxx', u'¡¡'], 'b': ['yyy', u'¡¡']})
- df
+ df;
+
+.. image:: _static/option_unicode03.png
Enabling ``display.unicode.ambiguous_as_wide`` lets pandas to regard these character's width as 2. Note that this option will be effective only when ``display.unicode.east_asian_width`` is enabled. Confirm starting position has been changed, but not aligned properly because the setting is mismatched with this environment.
.. ipython:: python
pd.set_option('display.unicode.ambiguous_as_wide', True)
- df
+ df;
+
+.. image:: _static/option_unicode04.png
.. ipython:: python
:suppress:
pd.set_option('display.unicode.east_asian_width', False)
pd.set_option('display.unicode.ambiguous_as_wide', False)
-
diff --git a/doc/source/whatsnew/v0.17.0.txt b/doc/source/whatsnew/v0.17.0.txt
index 1e240d0786082..ab9cc17a3f990 100644
--- a/doc/source/whatsnew/v0.17.0.txt
+++ b/doc/source/whatsnew/v0.17.0.txt
@@ -353,10 +353,16 @@ Some East Asian countries use Unicode characters its width is corresponding to 2
.. ipython:: python
df = pd.DataFrame({u'国籍': ['UK', u'日本'], u'名前': ['Alice', u'しのぶ']})
- df
+ df;
+
+.. image:: _static/option_unicode01.png
+
+.. ipython:: python
pd.set_option('display.unicode.east_asian_width', True)
- df
+ df;
+
+.. image:: _static/option_unicode02.png
For further details, see :ref:`here <options.east_asian_width>`
| Follow-up for #11102. Added note because doc output may look incorrect depending on font.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11231 | 2015-10-03T23:23:35Z | 2015-10-07T15:52:05Z | 2015-10-07T15:52:05Z | 2015-10-08T05:09:42Z |
squeeze works on 0 length arrays | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 736554672a089..b4ed00189c710 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -42,3 +42,5 @@ Performance Improvements
Bug Fixes
~~~~~~~~~
+
+- Bug in ``squeeze()`` with zero length arrays (:issue:`11230`, :issue:`8999`)
diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index 3473dd0f7cd88..53ce5c6b4b751 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -513,7 +513,7 @@ def pop(self, item):
def squeeze(self):
""" squeeze length 1 dimensions """
try:
- return self.ix[tuple([slice(None) if len(a) > 1 else a[0]
+ return self.iloc[tuple([0 if len(a) == 1 else slice(None)
for a in self.axes])]
except:
return self
diff --git a/pandas/tests/test_generic.py b/pandas/tests/test_generic.py
index 3a26be2ca1032..061382e0e16de 100644
--- a/pandas/tests/test_generic.py
+++ b/pandas/tests/test_generic.py
@@ -1717,6 +1717,15 @@ def test_squeeze(self):
p4d = tm.makePanel4D().reindex(labels=['label1'],items=['ItemA'])
tm.assert_frame_equal(p4d.squeeze(),p4d.ix['label1','ItemA'])
+ # don't fail with 0 length dimensions GH11229 & GH8999
+ empty_series=pd.Series([], name='five')
+ empty_frame=pd.DataFrame([empty_series])
+ empty_panel=pd.Panel({'six':empty_frame})
+
+ [tm.assert_series_equal(empty_series, higher_dim.squeeze())
+ for higher_dim in [empty_series, empty_frame, empty_panel]]
+
+
def test_equals(self):
s1 = pd.Series([1, 2, 3], index=[0, 2, 1])
s2 = s1.copy()
| fixes: https://github.com/pydata/pandas/issues/11229.
fixes: https://github.com/pydata/pandas/issues/8999.
Also a better implementation that avoids `ix`
Should I add to What's new for 0.17? Or is that closed now?
| https://api.github.com/repos/pandas-dev/pandas/pulls/11230 | 2015-10-03T18:05:48Z | 2015-10-09T13:15:07Z | null | 2015-12-08T03:52:34Z |
defer or remove loading of openhub in docs | diff --git a/doc/source/themes/nature_with_gtoc/layout.html b/doc/source/themes/nature_with_gtoc/layout.html
index 3fae6ef500bdf..fd0755e096023 100644
--- a/doc/source/themes/nature_with_gtoc/layout.html
+++ b/doc/source/themes/nature_with_gtoc/layout.html
@@ -34,12 +34,6 @@ <h3 style="margin-top: 1.5em;">{{ _('Search') }}</h3>
{{ _('Enter search terms or a module, class or function name.') }}
</p>
- <p>
-
- <p>
- <script type="text/javascript"
- src="http://www.ohloh.net/p/482908/widgets/project_partner_badge.js"></script>
- </p>
</div>
{%- endblock %}
{# possible location for sidebar #} {% endblock %}
| Their service was slowing down the loading of our docs. I've added a defer tag so that our docs load first.
Or we can delete that entirely, anyone know why it's in there to begin with?
| https://api.github.com/repos/pandas-dev/pandas/pulls/11225 | 2015-10-02T19:43:34Z | 2015-10-02T21:26:58Z | 2015-10-02T21:26:58Z | 2016-11-03T12:38:29Z |
ENH: added compression kw to to_csv GH7615 | diff --git a/doc/source/whatsnew/v0.17.1.txt b/doc/source/whatsnew/v0.17.1.txt
index 74ace42eb7e22..94f66f8cfc672 100755
--- a/doc/source/whatsnew/v0.17.1.txt
+++ b/doc/source/whatsnew/v0.17.1.txt
@@ -18,6 +18,8 @@ Highlights include:
Enhancements
~~~~~~~~~~~~
+- Support for ``compression`` (gzip/bz2) in :method:`DataFrame.to_csv` (:issue:`7615`)
+
.. _whatsnew_0171.enhancements.other:
- Improve the error message in :func:`pandas.io.gbq.to_gbq` when a streaming insert fails (:issue:`11285`)
diff --git a/pandas/core/common.py b/pandas/core/common.py
index 0de1f8ca5f7ae..724843d379f64 100644
--- a/pandas/core/common.py
+++ b/pandas/core/common.py
@@ -2846,11 +2846,10 @@ def _get_handle(path, mode, encoding=None, compression=None):
if compression == 'gzip':
import gzip
- f = gzip.GzipFile(path, 'rb')
+ f = gzip.GzipFile(path, mode)
elif compression == 'bz2':
import bz2
-
- f = bz2.BZ2File(path, 'rb')
+ f = bz2.BZ2File(path, mode)
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
diff --git a/pandas/core/format.py b/pandas/core/format.py
index 22e8d6502b358..bf9b3bc8040de 100644
--- a/pandas/core/format.py
+++ b/pandas/core/format.py
@@ -1259,7 +1259,7 @@ class CSVFormatter(object):
def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
cols=None, header=True, index=True, index_label=None,
- mode='w', nanRep=None, encoding=None, quoting=None,
+ mode='w', nanRep=None, encoding=None, compression=None, quoting=None,
line_terminator='\n', chunksize=None, engine=None,
tupleize_cols=False, quotechar='"', date_format=None,
doublequote=True, escapechar=None, decimal='.'):
@@ -1281,6 +1281,7 @@ def __init__(self, obj, path_or_buf=None, sep=",", na_rep='', float_format=None,
self.index_label = index_label
self.mode = mode
self.encoding = encoding
+ self.compression = compression
if quoting is None:
quoting = csv.QUOTE_MINIMAL
@@ -1470,7 +1471,8 @@ def save(self):
close = False
else:
f = com._get_handle(self.path_or_buf, self.mode,
- encoding=self.encoding)
+ encoding=self.encoding,
+ compression=self.compression)
close = True
try:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 013bd1c230662..2cdb6d9b04341 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1210,7 +1210,7 @@ def to_panel(self):
def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
- mode='w', encoding=None, quoting=None,
+ mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=False, date_format=None, doublequote=True,
escapechar=None, decimal='.', **kwds):
@@ -1247,6 +1247,10 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
encoding : string, optional
A string representing the encoding to use in the output file,
defaults to 'ascii' on Python 2 and 'utf-8' on Python 3.
+ compression : string, optional
+ a string representing the compression to use in the output file,
+ allowed values are 'gzip', 'bz2',
+ only used when the first argument is a filename
line_terminator : string, default '\\n'
The newline character or character sequence to use in the output
file
@@ -1275,6 +1279,7 @@ def to_csv(self, path_or_buf=None, sep=",", na_rep='', float_format=None,
formatter = fmt.CSVFormatter(self, path_or_buf,
line_terminator=line_terminator,
sep=sep, encoding=encoding,
+ compression=compression,
quoting=quoting, na_rep=na_rep,
float_format=float_format, cols=columns,
header=header, index=index,
diff --git a/pandas/tests/test_frame.py b/pandas/tests/test_frame.py
index 5a9b90f93bb0c..eb88fec716627 100644
--- a/pandas/tests/test_frame.py
+++ b/pandas/tests/test_frame.py
@@ -7328,6 +7328,63 @@ def test_to_csv_path_is_none(self):
recons = pd.read_csv(StringIO(csv_str), index_col=0)
assert_frame_equal(self.frame, recons)
+ def test_to_csv_compression_gzip(self):
+ ## GH7615
+ ## use the compression kw in to_csv
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+
+ with ensure_clean() as filename:
+
+ df.to_csv(filename, compression="gzip")
+
+ # test the round trip - to_csv -> read_csv
+ rs = read_csv(filename, compression="gzip", index_col=0)
+ assert_frame_equal(df, rs)
+
+ # explicitly make sure file is gziped
+ import gzip
+ f = gzip.open(filename, 'rb')
+ text = f.read().decode('utf8')
+ f.close()
+ for col in df.columns:
+ self.assertIn(col, text)
+
+ def test_to_csv_compression_bz2(self):
+ ## GH7615
+ ## use the compression kw in to_csv
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+
+ with ensure_clean() as filename:
+
+ df.to_csv(filename, compression="bz2")
+
+ # test the round trip - to_csv -> read_csv
+ rs = read_csv(filename, compression="bz2", index_col=0)
+ assert_frame_equal(df, rs)
+
+ # explicitly make sure file is bz2ed
+ import bz2
+ f = bz2.BZ2File(filename, 'rb')
+ text = f.read().decode('utf8')
+ f.close()
+ for col in df.columns:
+ self.assertIn(col, text)
+
+ def test_to_csv_compression_value_error(self):
+ ## GH7615
+ ## use the compression kw in to_csv
+ df = DataFrame([[0.123456, 0.234567, 0.567567],
+ [12.32112, 123123.2, 321321.2]],
+ index=['A', 'B'], columns=['X', 'Y', 'Z'])
+
+ with ensure_clean() as filename:
+ # zip compression is not supported and should raise ValueError
+ self.assertRaises(ValueError, df.to_csv, filename, compression="zip")
+
def test_info(self):
io = StringIO()
self.frame.info(buf=io)
| This closes #7615 and represents work that started in #2636.
| https://api.github.com/repos/pandas-dev/pandas/pulls/11219 | 2015-10-02T12:34:30Z | 2015-10-12T15:49:37Z | 2015-10-12T15:49:37Z | 2016-04-23T07:37:46Z |
BUG: edge case when reading from postgresl with read_sql_query and datetime with tz and chunksize | diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 34f28e2fbfacb..721a2c1f350ee 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -18,6 +18,7 @@
from pandas.core.api import DataFrame, Series
from pandas.core.common import isnull
from pandas.core.base import PandasObject
+from pandas.core.dtypes import DatetimeTZDtype
from pandas.tseries.tools import to_datetime
from pandas.util.decorators import Appender
@@ -89,6 +90,10 @@ def _handle_date_column(col, format=None):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=True)
+ elif com.is_datetime64tz_dtype(col):
+ # coerce to UTC timezone
+ # GH11216
+ return to_datetime(col,errors='coerce').astype('datetime64[ns, UTC]')
else:
return to_datetime(col, errors='coerce', format=format, utc=True)
@@ -113,6 +118,14 @@ def _parse_date_columns(data_frame, parse_dates):
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
+
+ # we want to coerce datetime64_tz dtypes for now
+ # we could in theory do a 'nice' conversion from a FixedOffset tz
+ # GH11216
+ for col_name, df_col in data_frame.iteritems():
+ if com.is_datetime64tz_dtype(df_col):
+ data_frame[col_name] = _handle_date_column(df_col)
+
return data_frame
@@ -366,7 +379,7 @@ def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
- con : SQLAlchemy connectable(engine/connection) or database string URI
+ con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
@@ -898,11 +911,10 @@ def _harmonize_columns(self, parse_dates=None):
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
- col_type = self._numpy_type(sql_col.type)
+ col_type = self._get_dtype(sql_col.type)
- if col_type is datetime or col_type is date:
- if not issubclass(df_col.dtype.type, np.datetime64):
- self.frame[col_name] = _handle_date_column(df_col)
+ if col_type is datetime or col_type is date or col_type is DatetimeTZDtype:
+ self.frame[col_name] = _handle_date_column(df_col)
elif col_type is float:
# floats support NA, can always convert!
@@ -982,20 +994,25 @@ def _sqlalchemy_type(self, col):
return Text
- def _numpy_type(self, sqltype):
- from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date
+ def _get_dtype(self, sqltype):
+ from sqlalchemy.types import Integer, Float, Boolean, DateTime, Date, TIMESTAMP
if isinstance(sqltype, Float):
return float
- if isinstance(sqltype, Integer):
+ elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
- if isinstance(sqltype, DateTime):
+ elif isinstance(sqltype, TIMESTAMP):
+ # we have a timezone capable type
+ if not sqltype.timezone:
+ return datetime
+ return DatetimeTZDtype
+ elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
- if isinstance(sqltype, Date):
+ elif isinstance(sqltype, Date):
return date
- if isinstance(sqltype, Boolean):
+ elif isinstance(sqltype, Boolean):
return bool
return object
diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py
index 15e241dae895e..aced92ec8abd0 100644
--- a/pandas/io/tests/test_sql.py
+++ b/pandas/io/tests/test_sql.py
@@ -26,6 +26,7 @@
import nose
import warnings
import numpy as np
+import pandas as pd
from datetime import datetime, date, time
@@ -33,6 +34,7 @@
from pandas import date_range, to_datetime, to_timedelta, Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, lrange, string_types
+from pandas.core import common as com
from pandas.core.datetools import format as date_format
import pandas.io.sql as sql
@@ -1248,6 +1250,66 @@ def test_default_date_load(self):
self.assertTrue(issubclass(df.DateCol.dtype.type, np.datetime64),
"DateCol loaded with incorrect type")
+ def test_datetime_with_timezone(self):
+ # edge case that converts postgresql datetime with time zone types
+ # to datetime64[ns,psycopg2.tz.FixedOffsetTimezone..], which is ok
+ # but should be more natural, so coerce to datetime64[ns] for now
+
+ def check(col):
+ # check that a column is either datetime64[ns]
+ # or datetime64[ns, UTC]
+ if com.is_datetime64_dtype(col.dtype):
+
+ # "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
+ self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00'))
+
+ # "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
+ self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00'))
+
+ elif com.is_datetime64tz_dtype(col.dtype):
+ self.assertTrue(str(col.dt.tz) == 'UTC')
+
+ # "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
+ self.assertEqual(col[0], Timestamp('2000-01-01 08:00:00', tz='UTC'))
+
+ # "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
+ self.assertEqual(col[1], Timestamp('2000-06-01 07:00:00', tz='UTC'))
+
+ else:
+ raise AssertionError("DateCol loaded with incorrect type -> {0}".format(col.dtype))
+
+ # GH11216
+ df = pd.read_sql_query("select * from types_test_data", self.conn)
+ if not hasattr(df,'DateColWithTz'):
+ raise nose.SkipTest("no column with datetime with time zone")
+
+ # this is parsed on Travis (linux), but not on macosx for some reason
+ # even with the same versions of psycopg2 & sqlalchemy, possibly a Postgrsql server
+ # version difference
+ col = df.DateColWithTz
+ self.assertTrue(com.is_object_dtype(col.dtype) or com.is_datetime64_dtype(col.dtype) \
+ or com.is_datetime64tz_dtype(col.dtype),
+ "DateCol loaded with incorrect type -> {0}".format(col.dtype))
+
+ df = pd.read_sql_query("select * from types_test_data", self.conn, parse_dates=['DateColWithTz'])
+ if not hasattr(df,'DateColWithTz'):
+ raise nose.SkipTest("no column with datetime with time zone")
+ check(df.DateColWithTz)
+
+ df = pd.concat(list(pd.read_sql_query("select * from types_test_data",
+ self.conn,chunksize=1)),ignore_index=True)
+ col = df.DateColWithTz
+ self.assertTrue(com.is_datetime64tz_dtype(col.dtype),
+ "DateCol loaded with incorrect type -> {0}".format(col.dtype))
+ self.assertTrue(str(col.dt.tz) == 'UTC')
+ expected = sql.read_sql_table("types_test_data", self.conn)
+ tm.assert_series_equal(df.DateColWithTz, expected.DateColWithTz.astype('datetime64[ns, UTC]'))
+
+ # xref #7139
+ # this might or might not be converted depending on the postgres driver
+ df = sql.read_sql_table("types_test_data", self.conn)
+ check(df.DateColWithTz)
+
def test_date_parsing(self):
# No Parsing
df = sql.read_sql_table("types_test_data", self.conn)
@@ -1746,23 +1808,6 @@ def test_schema_support(self):
res2 = pdsql.read_table('test_schema_other2')
tm.assert_frame_equal(res1, res2)
- def test_datetime_with_time_zone(self):
-
- # Test to see if we read the date column with timezones that
- # the timezone information is converted to utc and into a
- # np.datetime64 (GH #7139)
-
- df = sql.read_sql_table("types_test_data", self.conn)
- self.assertTrue(issubclass(df.DateColWithTz.dtype.type, np.datetime64),
- "DateColWithTz loaded with incorrect type -> {0}".format(df.DateColWithTz.dtype))
-
- # "2000-01-01 00:00:00-08:00" should convert to "2000-01-01 08:00:00"
- self.assertEqual(df.DateColWithTz[0], Timestamp('2000-01-01 08:00:00'))
-
- # "2000-06-01 00:00:00-07:00" should convert to "2000-06-01 07:00:00"
- self.assertEqual(df.DateColWithTz[1], Timestamp('2000-06-01 07:00:00'))
-
-
class TestMySQLAlchemy(_TestMySQLAlchemy, _TestSQLAlchemy):
pass
| - When we don't specifiy a chunksize we get an object dtype which is ok
- We create a propery datetime64[ns, tz] type, but its a pytz.FixedOffset(....)
which ATM is not really a useful/palatable type and is mostly confusing for now.
In the future could attempt to coerce this to a nice tz, e.g. US/Eastern, ,not sure if
this is possible.
- Note that this is w/o parse_dates specified
| https://api.github.com/repos/pandas-dev/pandas/pulls/11216 | 2015-10-02T01:55:57Z | 2015-10-03T15:48:50Z | 2015-10-03T15:48:50Z | 2017-01-16T10:56:11Z |