Dataset Viewer
	title
				 
			stringlengths 1 
			185 
			 | diff
				 
			stringlengths 0 
			32.2M 
			 | body
				 
			stringlengths 0 
			123k 
			⌀  | url
				 
			stringlengths 57 
			58 
			 | created_at
				 
			stringlengths 20 
			20 
			 | closed_at
				 
			stringlengths 20 
			20 
			 | merged_at
				 
			stringlengths 20 
			20 
			⌀  | updated_at
				 
			stringlengths 20 
			20 
			 | 
|---|---|---|---|---|---|---|---|
	clarified the documentation for DF.drop_duplicates 
 | 
	diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 78c9f2aa96472..ade05ab27093e 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -4625,7 +4625,8 @@ def dropna(self, axis=0, how='any', thresh=None, subset=None,
     def drop_duplicates(self, subset=None, keep='first', inplace=False):
         """
         Return DataFrame with duplicate rows removed, optionally only
-        considering certain columns.
+        considering certain columns. Indexes, including time indexes
+        are ignored.
 
         Parameters
         ----------
 
 | 
	I hit an issue with a time series index where I wanted to keep duplicate data with different time values and only delete rows with the same time and columns. This documentation change would have saved me a lot of time.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25056 
 | 
	2019-01-31T17:14:09Z 
 | 
	2019-02-01T18:24:37Z 
 | 
	2019-02-01T18:24:37Z 
 | 
	2019-02-02T14:50:36Z 
 | 
					
	Clarification of docstring for value_counts 
 | 
	diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst
index 58e1b2370c7c8..b23a0f10e9e2b 100644
--- a/doc/source/user_guide/io.rst
+++ b/doc/source/user_guide/io.rst
@@ -989,6 +989,36 @@ a single date rather than the entire array.
 
    os.remove('tmp.csv')
 
+
+.. _io.csv.mixed_timezones:
+
+Parsing a CSV with mixed Timezones
+++++++++++++++++++++++++++++++++++
+
+Pandas cannot natively represent a column or index with mixed timezones. If your CSV
+file contains columns with a mixture of timezones, the default result will be
+an object-dtype column with strings, even with ``parse_dates``.
+
+
+.. ipython:: python
+
+   content = """\
+   a
+   2000-01-01T00:00:00+05:00
+   2000-01-01T00:00:00+06:00"""
+   df = pd.read_csv(StringIO(content), parse_dates=['a'])
+   df['a']
+
+To parse the mixed-timezone values as a datetime column, pass a partially-applied
+:func:`to_datetime` with ``utc=True`` as the ``date_parser``.
+
+.. ipython:: python
+
+   df = pd.read_csv(StringIO(content), parse_dates=['a'],
+                    date_parser=lambda col: pd.to_datetime(col, utc=True))
+   df['a']
+
+
 .. _io.dayfirst:
 
 
diff --git a/doc/source/whatsnew/v0.24.0.rst b/doc/source/whatsnew/v0.24.0.rst
index fc963fce37a5b..a49ea2cf493a6 100644
--- a/doc/source/whatsnew/v0.24.0.rst
+++ b/doc/source/whatsnew/v0.24.0.rst
@@ -6,7 +6,8 @@ What's New in 0.24.0 (January 25, 2019)
 .. warning::
 
    The 0.24.x series of releases will be the last to support Python 2. Future feature
-   releases will support Python 3 only. See :ref:`install.dropping-27` for more.
+   releases will support Python 3 only. See :ref:`install.dropping-27` for more
+   details.
 
 {{ header }}
 
@@ -244,7 +245,7 @@ the new extension arrays that back interval and period data.
 Joining with two multi-indexes
 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
 
-:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlaping index levels (:issue:`6360`)
+:func:`DataFrame.merge` and :func:`DataFrame.join` can now be used to join multi-indexed ``Dataframe`` instances on the overlapping index levels (:issue:`6360`)
 
 See the :ref:`Merge, join, and concatenate
 <merging.Join_with_two_multi_indexes>` documentation section.
@@ -647,6 +648,52 @@ that the dates have been converted to UTC
     pd.to_datetime(["2015-11-18 15:30:00+05:30",
                     "2015-11-18 16:30:00+06:30"], utc=True)
 
+
+.. _whatsnew_0240.api_breaking.read_csv_mixed_tz:
+
+Parsing mixed-timezones with :func:`read_csv`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+:func:`read_csv` no longer silently converts mixed-timezone columns to UTC (:issue:`24987`).
+
+*Previous Behavior*
+
+.. code-block:: python
+
+   >>> import io
+   >>> content = """\
+   ... a
+   ... 2000-01-01T00:00:00+05:00
+   ... 2000-01-01T00:00:00+06:00"""
+   >>> df = pd.read_csv(io.StringIO(content), parse_dates=['a'])
+   >>> df.a
+   0   1999-12-31 19:00:00
+   1   1999-12-31 18:00:00
+   Name: a, dtype: datetime64[ns]
+
+*New Behavior*
+
+.. ipython:: python
+
+   import io
+   content = """\
+   a
+   2000-01-01T00:00:00+05:00
+   2000-01-01T00:00:00+06:00"""
+   df = pd.read_csv(io.StringIO(content), parse_dates=['a'])
+   df.a
+
+As can be seen, the ``dtype`` is object; each value in the column is a string.
+To convert the strings to an array of datetimes, the ``date_parser`` argument
+
+.. ipython:: python
+
+   df = pd.read_csv(io.StringIO(content), parse_dates=['a'],
+                    date_parser=lambda col: pd.to_datetime(col, utc=True))
+   df.a
+
+See :ref:`whatsnew_0240.api.timezone_offset_parsing` for more.
+
 .. _whatsnew_0240.api_breaking.period_end_time:
 
 Time values in ``dt.end_time`` and ``to_timestamp(how='end')``
diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index ee4b7ab62b31a..047404e93914b 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -15,6 +15,15 @@ Whats New in 0.24.1 (February XX, 2019)
 These are the changes in pandas 0.24.1. See :ref:`release` for a full changelog
 including other versions of pandas.
 
+.. _whatsnew_0241.regressions:
+
+Fixed Regressions
+^^^^^^^^^^^^^^^^^
+
+- Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
+- Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
+- Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`).
+- Fixed regression in :func:`merge` when merging an empty ``DataFrame`` with multiple timezone-aware columns on one of the timezone-aware columns (:issue:`25014`).
 
 .. _whatsnew_0241.enhancements:
 
@@ -58,11 +67,19 @@ Bug Fixes
 -
 
 **Timedelta**
-
+- Bug in :func:`to_timedelta` with `box=False` incorrectly returning a ``datetime64`` object instead of a ``timedelta64`` object (:issue:`24961`)
 -
 -
 -
 
+**Reshaping**
+
+- Bug in :meth:`DataFrame.groupby` with :class:`Grouper` when there is a time change (DST) and grouping frequency is ``'1d'`` (:issue:`24972`)
+
+**Visualization**
+
+- Fixed the warning for implicitly registered matplotlib converters not showing. See :ref:`whatsnew_0211.converters` for more (:issue:`24963`).
+
 
 **Other**
 
diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 47517782e2bbf..791ff44303e96 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -222,7 +222,7 @@ def __getitem__(self, item):
             item = item._ndarray
 
         result = self._ndarray[item]
-        if not lib.is_scalar(result):
+        if not lib.is_scalar(item):
             result = type(self)(result)
         return result
 
diff --git a/pandas/core/base.py b/pandas/core/base.py
index c02ba88ea7fda..7b3152595e4b2 100644
--- a/pandas/core/base.py
+++ b/pandas/core/base.py
@@ -1234,7 +1234,7 @@ def value_counts(self, normalize=False, sort=True, ascending=False,
             If True then the object returned will contain the relative
             frequencies of the unique values.
         sort : boolean, default True
-            Sort by values.
+            Sort by frequencies.
         ascending : boolean, default False
             Sort in ascending order.
         bins : integer, optional
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index b4f79bda25517..28c6f3c23a3ce 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -847,7 +847,7 @@ def itertuples(self, index=True, name="Pandas"):
         ----------
         index : bool, default True
             If True, return the index as the first element of the tuple.
-        name : str, default "Pandas"
+        name : str or None, default "Pandas"
             The name of the returned namedtuples or None to return regular
             tuples.
 
@@ -1290,23 +1290,26 @@ def to_dict(self, orient='dict', into=dict):
                            ('columns', self.columns.tolist()),
                            ('data', [
                                list(map(com.maybe_box_datetimelike, t))
-                               for t in self.itertuples(index=False)]
-                            )))
+                               for t in self.itertuples(index=False, name=None)
+                           ])))
         elif orient.lower().startswith('s'):
             return into_c((k, com.maybe_box_datetimelike(v))
                           for k, v in compat.iteritems(self))
         elif orient.lower().startswith('r'):
+            columns = self.columns.tolist()
+            rows = (dict(zip(columns, row))
+                    for row in self.itertuples(index=False, name=None))
             return [
                 into_c((k, com.maybe_box_datetimelike(v))
-                       for k, v in compat.iteritems(row._asdict()))
-                for row in self.itertuples(index=False)]
+                       for k, v in compat.iteritems(row))
+                for row in rows]
         elif orient.lower().startswith('i'):
             if not self.index.is_unique:
                 raise ValueError(
                     "DataFrame index must be unique for orient='index'."
                 )
             return into_c((t[0], dict(zip(self.columns, t[1:])))
-                          for t in self.itertuples())
+                          for t in self.itertuples(name=None))
         else:
             raise ValueError("orient '{o}' not understood".format(o=orient))
 
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 767da81c5c43a..3d176012df22b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2333,7 +2333,7 @@ def union(self, other, sort=True):
     def _wrap_setop_result(self, other, result):
         return self._constructor(result, name=get_op_result_name(self, other))
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two Index objects.
 
@@ -2342,11 +2342,15 @@ def intersection(self, other, sort=True):
         Parameters
         ----------
         other : Index or array-like
-        sort : bool, default True
+        sort : bool, default False
             Sort the resulting index if possible
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         intersection : Index
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index cc373c06efcc9..ef941ab87ba12 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -594,7 +594,7 @@ def _wrap_setop_result(self, other, result):
         name = get_op_result_name(self, other)
         return self._shallow_copy(result, name=name, freq=None, tz=self.tz)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Specialized intersection for DatetimeIndex objects. May be much faster
         than Index.intersection
@@ -602,6 +602,14 @@ def intersection(self, other, sort=True):
         Parameters
         ----------
         other : DatetimeIndex or array-like
+        sort : bool, default True
+            Sort the resulting index if possible.
+
+            .. versionadded:: 0.24.0
+
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
 
         Returns
         -------
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 0210560aaa21f..736de94991181 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1093,8 +1093,8 @@ def equals(self, other):
     def overlaps(self, other):
         return self._data.overlaps(other)
 
-    def _setop(op_name):
-        def func(self, other, sort=True):
+    def _setop(op_name, sort=True):
+        def func(self, other, sort=sort):
             other = self._as_like_interval_index(other)
 
             # GH 19016: ensure set op will not return a prohibited dtype
@@ -1128,7 +1128,7 @@ def is_all_dates(self):
         return False
 
     union = _setop('union')
-    intersection = _setop('intersection')
+    intersection = _setop('intersection', sort=False)
     difference = _setop('difference')
     symmetric_difference = _setop('symmetric_difference')
 
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e4d01a40bd181..16af3fe8eef26 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2910,7 +2910,7 @@ def union(self, other, sort=True):
         return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
                                       names=result_names)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two MultiIndex objects.
 
@@ -2922,6 +2922,10 @@ def intersection(self, other, sort=True):
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         Index
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index ebf5b279563cf..e17a6a682af40 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -343,7 +343,7 @@ def equals(self, other):
 
         return super(RangeIndex, self).equals(other)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two Index objects.
 
@@ -355,6 +355,10 @@ def intersection(self, other, sort=True):
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         intersection : Index
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 4a16707a376e9..640587b7f9f31 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -183,7 +183,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
                         is_datetime64tz_dtype(empty_dtype)):
                     if self.block is None:
                         array = empty_dtype.construct_array_type()
-                        return array(np.full(self.shape[1], fill_value),
+                        return array(np.full(self.shape[1], fill_value.value),
                                      dtype=empty_dtype)
                     pass
                 elif getattr(self.block, 'is_categorical', False):
@@ -335,8 +335,10 @@ def get_empty_dtype_and_na(join_units):
     elif 'category' in upcast_classes:
         return np.dtype(np.object_), np.nan
     elif 'datetimetz' in upcast_classes:
+        # GH-25014. We use NaT instead of iNaT, since this eventually
+        # ends up in DatetimeArray.take, which does not allow iNaT.
         dtype = upcast_classes['datetimetz']
-        return dtype[0], tslibs.iNaT
+        return dtype[0], tslibs.NaT
     elif 'datetime' in upcast_classes:
         return np.dtype('M8[ns]'), tslibs.iNaT
     elif 'timedelta' in upcast_classes:
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 6822225273906..7723827ff478a 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -30,8 +30,7 @@
 from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
 
 from pandas.tseries.frequencies import to_offset
-from pandas.tseries.offsets import (
-    DateOffset, Day, Nano, Tick, delta_to_nanoseconds)
+from pandas.tseries.offsets import DateOffset, Day, Nano, Tick
 
 _shared_docs_kwargs = dict()
 
@@ -1613,20 +1612,20 @@ def _get_timestamp_range_edges(first, last, offset, closed='left', base=0):
     A tuple of length 2, containing the adjusted pd.Timestamp objects.
     """
     if isinstance(offset, Tick):
-        is_day = isinstance(offset, Day)
-        day_nanos = delta_to_nanoseconds(timedelta(1))
-
-        # #1165 and #24127
-        if (is_day and not offset.nanos % day_nanos) or not is_day:
-            first, last = _adjust_dates_anchored(first, last, offset,
-                                                 closed=closed, base=base)
-            if is_day and first.tz is not None:
-                # _adjust_dates_anchored assumes 'D' means 24H, but first/last
-                # might contain a DST transition (23H, 24H, or 25H).
-                # Ensure first/last snap to midnight.
-                first = first.normalize()
-                last = last.normalize()
-            return first, last
+        if isinstance(offset, Day):
+            # _adjust_dates_anchored assumes 'D' means 24H, but first/last
+            # might contain a DST transition (23H, 24H, or 25H).
+            # So "pretend" the dates are naive when adjusting the endpoints
+            tz = first.tz
+            first = first.tz_localize(None)
+            last = last.tz_localize(None)
+
+        first, last = _adjust_dates_anchored(first, last, offset,
+                                             closed=closed, base=base)
+        if isinstance(offset, Day):
+            first = first.tz_localize(tz)
+            last = last.tz_localize(tz)
+        return first, last
 
     else:
         first = first.normalize()
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index e3428146b91d8..ddd21d0f62d08 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -120,7 +120,8 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
     try:
         result = Timedelta(r, unit)
         if not box:
-            result = result.asm8
+            # explicitly view as timedelta64 for case when result is pd.NaT
+            result = result.asm8.view('timedelta64[ns]')
     except ValueError:
         if errors == 'raise':
             raise
diff --git a/pandas/io/parsers.py b/pandas/io/parsers.py
index b31d3f665f47f..4163a571df800 100755
--- a/pandas/io/parsers.py
+++ b/pandas/io/parsers.py
@@ -203,9 +203,14 @@
     * dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
       result 'foo'
 
-    If a column or index contains an unparseable date, the entire column or
-    index will be returned unaltered as an object data type. For non-standard
-    datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
+    If a column or index cannot be represented as an array of datetimes,
+    say because of an unparseable value or a mixture of timezones, the column
+    or index will be returned unaltered as an object data type. For
+    non-standard datetime parsing, use ``pd.to_datetime`` after
+    ``pd.read_csv``. To parse an index or column with a mixture of timezones,
+    specify ``date_parser`` to be a partially-applied
+    :func:`pandas.to_datetime` with ``utc=True``. See
+    :ref:`io.csv.mixed_timezones` for more.
 
     Note: A fast-path exists for iso8601-formatted dates.
 infer_datetime_format : bool, default False
diff --git a/pandas/plotting/_core.py b/pandas/plotting/_core.py
index e543ab88f53b2..85549bafa8dc0 100644
--- a/pandas/plotting/_core.py
+++ b/pandas/plotting/_core.py
@@ -39,7 +39,7 @@
 else:
     _HAS_MPL = True
     if get_option('plotting.matplotlib.register_converters'):
-        _converter.register(explicit=True)
+        _converter.register(explicit=False)
 
 
 def _raise_if_no_mpl():
diff --git a/pandas/tests/extension/numpy_/__init__.py b/pandas/tests/extension/numpy_/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/extension/numpy_/conftest.py b/pandas/tests/extension/numpy_/conftest.py
new file mode 100644
index 0000000000000..daa93571c2957
--- /dev/null
+++ b/pandas/tests/extension/numpy_/conftest.py
@@ -0,0 +1,38 @@
+import numpy as np
+import pytest
+
+from pandas.core.arrays.numpy_ import PandasArray
+
+
[email protected]
+def allow_in_pandas(monkeypatch):
+    """
+    A monkeypatch to tell pandas to let us in.
+
+    By default, passing a PandasArray to an index / series / frame
+    constructor will unbox that PandasArray to an ndarray, and treat
+    it as a non-EA column. We don't want people using EAs without
+    reason.
+
+    The mechanism for this is a check against ABCPandasArray
+    in each constructor.
+
+    But, for testing, we need to allow them in pandas. So we patch
+    the _typ of PandasArray, so that we evade the ABCPandasArray
+    check.
+    """
+    with monkeypatch.context() as m:
+        m.setattr(PandasArray, '_typ', 'extension')
+        yield
+
+
[email protected]
+def na_value():
+    return np.nan
+
+
[email protected]
+def na_cmp():
+    def cmp(a, b):
+        return np.isnan(a) and np.isnan(b)
+    return cmp
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/numpy_/test_numpy.py
similarity index 84%
rename from pandas/tests/extension/test_numpy.py
rename to pandas/tests/extension/numpy_/test_numpy.py
index 7ca6882c7441b..4c93d5ee0b9d7 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/numpy_/test_numpy.py
@@ -6,7 +6,7 @@
 from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
 import pandas.util.testing as tm
 
-from . import base
+from .. import base
 
 
 @pytest.fixture
@@ -14,28 +14,6 @@ def dtype():
     return PandasDtype(np.dtype('float'))
 
 
[email protected]
-def allow_in_pandas(monkeypatch):
-    """
-    A monkeypatch to tells pandas to let us in.
-
-    By default, passing a PandasArray to an index / series / frame
-    constructor will unbox that PandasArray to an ndarray, and treat
-    it as a non-EA column. We don't want people using EAs without
-    reason.
-
-    The mechanism for this is a check against ABCPandasArray
-    in each constructor.
-
-    But, for testing, we need to allow them in pandas. So we patch
-    the _typ of PandasArray, so that we evade the ABCPandasArray
-    check.
-    """
-    with monkeypatch.context() as m:
-        m.setattr(PandasArray, '_typ', 'extension')
-        yield
-
-
 @pytest.fixture
 def data(allow_in_pandas, dtype):
     return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@@ -46,18 +24,6 @@ def data_missing(allow_in_pandas):
     return PandasArray(np.array([np.nan, 1.0]))
 
 
[email protected]
-def na_value():
-    return np.nan
-
-
[email protected]
-def na_cmp():
-    def cmp(a, b):
-        return np.isnan(a) and np.isnan(b)
-    return cmp
-
-
 @pytest.fixture
 def data_for_sorting(allow_in_pandas):
     """Length-3 array with a known sort order.
diff --git a/pandas/tests/extension/numpy_/test_numpy_nested.py b/pandas/tests/extension/numpy_/test_numpy_nested.py
new file mode 100644
index 0000000000000..cf9b34dd08798
--- /dev/null
+++ b/pandas/tests/extension/numpy_/test_numpy_nested.py
@@ -0,0 +1,286 @@
+"""
+Tests for PandasArray with nested data. Users typically won't create
+these objects via `pd.array`, but they can show up through `.array`
+on a Series with nested data.
+
+We partition these tests into their own file, as many of the base
+tests fail, as they aren't appropriate for nested data. It is easier
+to have a seperate file with its own data generating fixtures, than
+trying to skip based upon the value of a fixture.
+"""
+import pytest
+
+import pandas as pd
+from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
+
+from .. import base
+
+# For NumPy <1.16, np.array([np.nan, (1,)]) raises
+# ValueError: setting an array element with a sequence.
+np = pytest.importorskip('numpy', minversion='1.16.0')
+
+
[email protected]
+def dtype():
+    return PandasDtype(np.dtype('object'))
+
+
[email protected]
+def data(allow_in_pandas, dtype):
+    return pd.Series([(i,) for i in range(100)]).array
+
+
[email protected]
+def data_missing(allow_in_pandas):
+    return PandasArray(np.array([np.nan, (1,)]))
+
+
[email protected]
+def data_for_sorting(allow_in_pandas):
+    """Length-3 array with a known sort order.
+
+    This should be three items [B, C, A] with
+    A < B < C
+    """
+    # Use an empty tuple for first element, then remove,
+    # to disable np.array's shape inference.
+    return PandasArray(
+        np.array([(), (2,), (3,), (1,)])[1:]
+    )
+
+
[email protected]
+def data_missing_for_sorting(allow_in_pandas):
+    """Length-3 array with a known sort order.
+
+    This should be three items [B, NA, A] with
+    A < B and NA missing.
+    """
+    return PandasArray(
+        np.array([(1,), np.nan, (0,)])
+    )
+
+
[email protected]
+def data_for_grouping(allow_in_pandas):
+    """Data for factorization, grouping, and unique tests.
+
+    Expected to be like [B, B, NA, NA, A, A, B, C]
+
+    Where A < B < C and NA is missing
+    """
+    a, b, c = (1,), (2,), (3,)
+    return PandasArray(np.array(
+        [b, b, np.nan, np.nan, a, a, b, c]
+    ))
+
+
+skip_nested = pytest.mark.skip(reason="Skipping for nested PandasArray")
+
+
+class BaseNumPyTests(object):
+    pass
+
+
+class TestCasting(BaseNumPyTests, base.BaseCastingTests):
+
+    @skip_nested
+    def test_astype_str(self, data):
+        pass
+
+
+class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
+    @pytest.mark.skip(reason="We don't register our dtype")
+    # We don't want to register. This test should probably be split in two.
+    def test_from_dtype(self, data):
+        pass
+
+    @skip_nested
+    def test_array_from_scalars(self, data):
+        pass
+
+
+class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
+
+    @pytest.mark.skip(reason="Incorrect expected.")
+    # we unsurprisingly clash with a NumPy name.
+    def test_check_dtype(self, data):
+        pass
+
+
+class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
+
+    @skip_nested
+    def test_getitem_scalar(self, data):
+        pass
+
+    @skip_nested
+    def test_take_series(self, data):
+        pass
+
+
+class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
+    @skip_nested
+    def test_groupby_extension_apply(self, data_for_grouping, op):
+        pass
+
+
+class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
+    @skip_nested
+    def test_array_interface(self, data):
+        # NumPy array shape inference
+        pass
+
+
+class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
+
+    @pytest.mark.skip(reason="TODO: remove?")
+    def test_value_counts(self, all_data, dropna):
+        pass
+
+    @pytest.mark.skip(reason="Incorrect expected")
+    # We have a bool dtype, so the result is an ExtensionArray
+    # but expected is not
+    def test_combine_le(self, data_repeated):
+        super(TestMethods, self).test_combine_le(data_repeated)
+
+    @skip_nested
+    def test_combine_add(self, data_repeated):
+        # Not numeric
+        pass
+
+    @skip_nested
+    def test_shift_fill_value(self, data):
+        # np.array shape inference. Shift implementation fails.
+        super().test_shift_fill_value(data)
+
+    @skip_nested
+    def test_unique(self, data, box, method):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_fillna_copy_frame(self, data_missing):
+        # The "scalar" for this array isn't a scalar.
+        pass
+
+    @skip_nested
+    def test_fillna_copy_series(self, data_missing):
+        # The "scalar" for this array isn't a scalar.
+        pass
+
+    @skip_nested
+    def test_hash_pandas_object_works(self, data, as_frame):
+        # ndarray of tuples not hashable
+        pass
+
+    @skip_nested
+    def test_searchsorted(self, data_for_sorting, as_series):
+        # Test setup fails.
+        pass
+
+    @skip_nested
+    def test_where_series(self, data, na_value, as_frame):
+        # Test setup fails.
+        pass
+
+    @skip_nested
+    def test_repeat(self, data, repeats, as_series, use_numpy):
+        # Fails creating expected
+        pass
+
+
+class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
+    pass
+
+
+class TestMissing(BaseNumPyTests, base.BaseMissingTests):
+
+    @skip_nested
+    def test_fillna_scalar(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_series_method(self, data_missing, method):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_series(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_frame(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+
+class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
+
+    @pytest.mark.skip("Incorrect parent test")
+    # not actually a mixed concat, since we concat int and int.
+    def test_concat_mixed_dtypes(self, data):
+        super(TestReshaping, self).test_concat_mixed_dtypes(data)
+
+    @skip_nested
+    def test_merge(self, data, na_value):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_merge_on_extension_array(self, data):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_merge_on_extension_array_duplicates(self, data):
+        # Fails creating expected
+        pass
+
+
+class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
+
+    @skip_nested
+    def test_setitem_scalar_series(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence_broadcasts(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_loc_scalar_mixed(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_loc_scalar_multiple_homogoneous(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_iloc_scalar_mixed(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_mask_broadcast(self, data, setter):
+        pass
+
+    @skip_nested
+    def test_setitem_scalar_key_sequence_raise(self, data):
+        pass
+
+
+# Skip Arithmetics, NumericReduce, BooleanReduce, Parsing
diff --git a/pandas/tests/frame/test_convert_to.py b/pandas/tests/frame/test_convert_to.py
index ddf85136126a1..7b98395dd6dec 100644
--- a/pandas/tests/frame/test_convert_to.py
+++ b/pandas/tests/frame/test_convert_to.py
@@ -488,3 +488,17 @@ def test_to_dict_index_dtypes(self, into, expected):
         result = DataFrame.from_dict(result, orient='index')[cols]
         expected = DataFrame.from_dict(expected, orient='index')[cols]
         tm.assert_frame_equal(result, expected)
+
+    def test_to_dict_numeric_names(self):
+        # https://github.com/pandas-dev/pandas/issues/24940
+        df = DataFrame({str(i): [i] for i in range(5)})
+        result = set(df.to_dict('records')[0].keys())
+        expected = set(df.columns)
+        assert result == expected
+
+    def test_to_dict_wide(self):
+        # https://github.com/pandas-dev/pandas/issues/24939
+        df = DataFrame({('A_{:d}'.format(i)): [i] for i in range(256)})
+        result = df.to_dict('records')[0]
+        expected = {'A_{:d}'.format(i): i for i in range(256)}
+        assert result == expected
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f3e9d835c7391..20e439de46bde 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -765,6 +765,11 @@ def test_intersect_str_dates(self, sort):
 
         assert len(result) == 0
 
+    def test_intersect_nosort(self):
+        result = pd.Index(['c', 'b', 'a']).intersection(['b', 'a'])
+        expected = pd.Index(['b', 'a'])
+        tm.assert_index_equal(result, expected)
+
     @pytest.mark.parametrize("sort", [True, False])
     def test_chained_union(self, sort):
         # Chained unions handles names correctly
@@ -1595,20 +1600,27 @@ def test_drop_tuple(self, values, to_drop):
         for drop_me in to_drop[1], [to_drop[1]]:
             pytest.raises(KeyError, removed.drop, drop_me)
 
-    @pytest.mark.parametrize("method,expected", [
+    @pytest.mark.parametrize("method,expected,sort", [
+        ('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
+                                  dtype=[('num', int), ('let', 'a1')]),
+         False),
+
         ('intersection', np.array([(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
-                                  dtype=[('num', int), ('let', 'a1')])),
+                                  dtype=[('num', int), ('let', 'a1')]),
+         True),
+
         ('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'),
-                            (2, 'C')], dtype=[('num', int), ('let', 'a1')]))
+                            (2, 'C')], dtype=[('num', int), ('let', 'a1')]),
+         True)
     ])
-    def test_tuple_union_bug(self, method, expected):
+    def test_tuple_union_bug(self, method, expected, sort):
         index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
                                 dtype=[('num', int), ('let', 'a1')]))
         index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
                                  (2, 'B'), (1, 'C'), (2, 'C')],
                                 dtype=[('num', int), ('let', 'a1')]))
 
-        result = getattr(index1, method)(index2)
+        result = getattr(index1, method)(index2, sort=sort)
         assert result.ndim == 1
 
         expected = Index(expected)
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 73995cbe79ecd..b743aeecdc756 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1276,6 +1276,21 @@ def test_resample_across_dst():
     assert_frame_equal(result, expected)
 
 
+def test_groupby_with_dst_time_change():
+    # GH 24972
+    index = pd.DatetimeIndex([1478064900001000000, 1480037118776792000],
+                             tz='UTC').tz_convert('America/Chicago')
+
+    df = pd.DataFrame([1, 2], index=index)
+    result = df.groupby(pd.Grouper(freq='1d')).last()
+    expected_index_values = pd.date_range('2016-11-02', '2016-11-24',
+                                          freq='d', tz='America/Chicago')
+
+    index = pd.DatetimeIndex(expected_index_values)
+    expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
+    assert_frame_equal(result, expected)
+
+
 def test_resample_dst_anchor():
     # 5172
     dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f0a3ddc8ce8a4..1e60fdbebfeb3 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -616,6 +616,24 @@ def test_merge_on_datetime64tz(self):
         assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]'
         assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]'
 
+    def test_merge_on_datetime64tz_empty(self):
+        # https://github.com/pandas-dev/pandas/issues/25014
+        dtz = pd.DatetimeTZDtype(tz='UTC')
+        right = pd.DataFrame({'date': [pd.Timestamp('2018', tz=dtz.tz)],
+                              'value': [4.0],
+                              'date2': [pd.Timestamp('2019', tz=dtz.tz)]},
+                             columns=['date', 'value', 'date2'])
+        left = right[:0]
+        result = left.merge(right, on='date')
+        expected = pd.DataFrame({
+            'value_x': pd.Series(dtype=float),
+            'date2_x': pd.Series(dtype=dtz),
+            'date': pd.Series(dtype=dtz),
+            'value_y': pd.Series(dtype=float),
+            'date2_y': pd.Series(dtype=dtz),
+        }, columns=['value_x', 'date2_x', 'date', 'value_y', 'date2_y'])
+        tm.assert_frame_equal(result, expected)
+
     def test_merge_datetime64tz_with_dst_transition(self):
         # GH 18885
         df1 = pd.DataFrame(pd.date_range(
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 9b5fdfb06a9fa..e1838e0160fec 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -309,8 +309,13 @@ def test_iso_conversion(self):
         assert to_timedelta('P0DT0H0M1S') == expected
 
     def test_nat_converters(self):
-        assert to_timedelta('nat', box=False).astype('int64') == iNaT
-        assert to_timedelta('nan', box=False).astype('int64') == iNaT
+        result = to_timedelta('nat', box=False)
+        assert result.dtype.kind == 'm'
+        assert result.astype('int64') == iNaT
+
+        result = to_timedelta('nan', box=False)
+        assert result.dtype.kind == 'm'
+        assert result.astype('int64') == iNaT
 
     @pytest.mark.parametrize('units, np_unit',
                              [(['Y', 'y'], 'Y'),
 
 | 
	- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25055 
 | 
	2019-01-31T17:05:55Z 
 | 
	2019-01-31T20:30:20Z 
 | null  | 
	2019-01-31T20:30:20Z 
 | 
					
	Backport PR #25039 on branch 0.24.x (BUG: avoid usage in_qtconsole for recent IPython versions) 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 047404e93914b..521319c55a503 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -83,7 +83,7 @@ Bug Fixes
 
 **Other**
 
--
+- Fixed AttributeError when printing a DataFrame's HTML repr after accessing the IPython config object (:issue:`25036`)
 -
 
 .. _whatsnew_0.241.contributors:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 28c6f3c23a3ce..5b462b949abf9 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -17,6 +17,7 @@
 import itertools
 import sys
 import warnings
+from distutils.version import LooseVersion
 from textwrap import dedent
 
 import numpy as np
@@ -646,9 +647,15 @@ def _repr_html_(self):
         # XXX: In IPython 3.x and above, the Qt console will not attempt to
         # display HTML, so this check can be removed when support for
         # IPython 2.x is no longer needed.
-        if console.in_qtconsole():
-            # 'HTML output is disabled in QtConsole'
-            return None
+        try:
+            import IPython
+        except ImportError:
+            pass
+        else:
+            if LooseVersion(IPython.__version__) < LooseVersion('3.0'):
+                if console.in_qtconsole():
+                    # 'HTML output is disabled in QtConsole'
+                    return None
 
         if self._info_repr():
             buf = StringIO(u(""))
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 5d922ccaf1fd5..b0cf5a2f17609 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -12,6 +12,7 @@
 import os
 import re
 import sys
+import textwrap
 import warnings
 
 import dateutil
@@ -2777,3 +2778,17 @@ def test_format_percentiles():
         fmt.format_percentiles([2, 0.1, 0.5])
     with pytest.raises(ValueError, match=msg):
         fmt.format_percentiles([0.1, 0.5, 'a'])
+
+
+def test_repr_html_ipython_config(ip):
+    code = textwrap.dedent("""\
+    import pandas as pd
+    df = pd.DataFrame({"A": [1, 2]})
+    df._repr_html_()
+
+    cfg = get_ipython().config
+    cfg['IPKernelApp']['parent_appname']
+    df._repr_html_()
+    """)
+    result = ip.run_cell(code)
+    assert not result.error_in_exec
 
 | 
	Backport PR #25039: BUG: avoid usage in_qtconsole for recent IPython versions 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25054 
 | 
	2019-01-31T16:03:27Z 
 | 
	2019-01-31T20:17:47Z 
 | 
	2019-01-31T20:17:47Z 
 | 
	2019-01-31T20:17:47Z 
 | 
					
	DEPR: remove PanelGroupBy, disable DataFrame.to_panel 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 09626be713c4f..a3fb1c575e7f1 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -51,7 +51,7 @@ Deprecations
 
 Removal of prior version deprecations/changes
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
-
+- Removed (parts of) :class:`Panel` (:issue:`25047`)
 -
 -
 -
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index afc4194e71eb1..ad4709fb3b870 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -1974,45 +1974,7 @@ def to_panel(self):
         -------
         panel : Panel
         """
-        # only support this kind for now
-        if (not isinstance(self.index, MultiIndex) or  # pragma: no cover
-                len(self.index.levels) != 2):
-            raise NotImplementedError('Only 2-level MultiIndex are supported.')
-
-        if not self.index.is_unique:
-            raise ValueError("Can't convert non-uniquely indexed "
-                             "DataFrame to Panel")
-
-        self._consolidate_inplace()
-
-        # minor axis must be sorted
-        if self.index.lexsort_depth < 2:
-            selfsorted = self.sort_index(level=0)
-        else:
-            selfsorted = self
-
-        major_axis, minor_axis = selfsorted.index.levels
-        major_codes, minor_codes = selfsorted.index.codes
-        shape = len(major_axis), len(minor_axis)
-
-        # preserve names, if any
-        major_axis = major_axis.copy()
-        major_axis.name = self.index.names[0]
-
-        minor_axis = minor_axis.copy()
-        minor_axis.name = self.index.names[1]
-
-        # create new axes
-        new_axes = [selfsorted.columns, major_axis, minor_axis]
-
-        # create new manager
-        new_mgr = selfsorted._data.reshape_nd(axes=new_axes,
-                                              labels=[major_codes,
-                                                      minor_codes],
-                                              shape=shape,
-                                              ref_items=selfsorted.columns)
-
-        return self._constructor_expanddim(new_mgr)
+        raise NotImplementedError("Panel is being removed in pandas 0.25.0.")
 
     @deprecate_kwarg(old_arg_name='encoding', new_arg_name=None)
     def to_stata(self, fname, convert_dates=None, write_index=True,
diff --git a/pandas/core/groupby/__init__.py b/pandas/core/groupby/__init__.py
index 9c15a5ebfe0f2..ac35f3825e5e8 100644
--- a/pandas/core/groupby/__init__.py
+++ b/pandas/core/groupby/__init__.py
@@ -1,4 +1,4 @@
 from pandas.core.groupby.groupby import GroupBy  # noqa: F401
 from pandas.core.groupby.generic import (  # noqa: F401
-    SeriesGroupBy, DataFrameGroupBy, PanelGroupBy)
+    SeriesGroupBy, DataFrameGroupBy)
 from pandas.core.groupby.grouper import Grouper  # noqa: F401
diff --git a/pandas/core/groupby/generic.py b/pandas/core/groupby/generic.py
index 78aa6d13a9e02..c8ea9ce689871 100644
--- a/pandas/core/groupby/generic.py
+++ b/pandas/core/groupby/generic.py
@@ -1,5 +1,5 @@
 """
-Define the SeriesGroupBy, DataFrameGroupBy, and PanelGroupBy
+Define the SeriesGroupBy and DataFrameGroupBy
 classes that hold the groupby interfaces (and some implementations).
 
 These are user facing as the result of the ``df.groupby(...)`` operations,
@@ -39,7 +39,6 @@
 from pandas.core.index import CategoricalIndex, Index, MultiIndex
 import pandas.core.indexes.base as ibase
 from pandas.core.internals import BlockManager, make_block
-from pandas.core.panel import Panel
 from pandas.core.series import Series
 
 from pandas.plotting._core import boxplot_frame_groupby
@@ -1586,90 +1585,3 @@ def groupby_series(obj, col=None):
         return results
 
     boxplot = boxplot_frame_groupby
-
-
-class PanelGroupBy(NDFrameGroupBy):
-
-    def aggregate(self, arg, *args, **kwargs):
-        return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
-
-    agg = aggregate
-
-    def _iterate_slices(self):
-        if self.axis == 0:
-            # kludge
-            if self._selection is None:
-                slice_axis = self._selected_obj.items
-            else:
-                slice_axis = self._selection_list
-            slicer = lambda x: self._selected_obj[x]
-        else:
-            raise NotImplementedError("axis other than 0 is not supported")
-
-        for val in slice_axis:
-            if val in self.exclusions:
-                continue
-
-            yield val, slicer(val)
-
-    def aggregate(self, arg, *args, **kwargs):
-        """
-        Aggregate using input function or dict of {column -> function}
-
-        Parameters
-        ----------
-        arg : function or dict
-            Function to use for aggregating groups. If a function, must either
-            work when passed a Panel or when passed to Panel.apply. If
-            pass a dict, the keys must be DataFrame column names
-
-        Returns
-        -------
-        aggregated : Panel
-        """
-        if isinstance(arg, compat.string_types):
-            return getattr(self, arg)(*args, **kwargs)
-
-        return self._aggregate_generic(arg, *args, **kwargs)
-
-    def _wrap_generic_output(self, result, obj):
-        if self.axis == 0:
-            new_axes = list(obj.axes)
-            new_axes[0] = self.grouper.result_index
-        elif self.axis == 1:
-            x, y, z = obj.axes
-            new_axes = [self.grouper.result_index, z, x]
-        else:
-            x, y, z = obj.axes
-            new_axes = [self.grouper.result_index, y, x]
-
-        result = Panel._from_axes(result, new_axes)
-
-        if self.axis == 1:
-            result = result.swapaxes(0, 1).swapaxes(0, 2)
-        elif self.axis == 2:
-            result = result.swapaxes(0, 2)
-
-        return result
-
-    def _aggregate_item_by_item(self, func, *args, **kwargs):
-        obj = self._obj_with_exclusions
-        result = {}
-
-        if self.axis > 0:
-            for item in obj:
-                try:
-                    itemg = DataFrameGroupBy(obj[item],
-                                             axis=self.axis - 1,
-                                             grouper=self.grouper)
-                    result[item] = itemg.aggregate(func, *args, **kwargs)
-                except (ValueError, TypeError):
-                    raise
-            new_axes = list(obj.axes)
-            new_axes[self.axis] = self.grouper.result_index
-            return Panel._from_axes(result, new_axes)
-        else:
-            raise ValueError("axis value must be greater than 0")
-
-    def _wrap_aggregated_output(self, output, names=None):
-        raise AbstractMethodError(self)
diff --git a/pandas/core/panel.py b/pandas/core/panel.py
index c8afafde48ac2..de535eeea4b5e 100644
--- a/pandas/core/panel.py
+++ b/pandas/core/panel.py
@@ -917,9 +917,7 @@ def groupby(self, function, axis='major'):
         -------
         grouped : PanelGroupBy
         """
-        from pandas.core.groupby import PanelGroupBy
-        axis = self._get_axis_number(axis)
-        return PanelGroupBy(self, function, axis=axis)
+        raise NotImplementedError("Panel is removed in pandas 0.25.0")
 
     def to_frame(self, filter_observations=True):
         """
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index a7204fcd9dd20..fbddc9ff29ce9 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -20,7 +20,7 @@
 import pandas.core.algorithms as algos
 from pandas.core.generic import _shared_docs
 from pandas.core.groupby.base import GroupByMixin
-from pandas.core.groupby.generic import PanelGroupBy, SeriesGroupBy
+from pandas.core.groupby.generic import SeriesGroupBy
 from pandas.core.groupby.groupby import (
     GroupBy, _GroupBy, _pipe_template, groupby)
 from pandas.core.groupby.grouper import Grouper
@@ -340,12 +340,7 @@ def _groupby_and_aggregate(self, how, grouper=None, *args, **kwargs):
 
         obj = self._selected_obj
 
-        try:
-            grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
-        except TypeError:
-
-            # panel grouper
-            grouped = PanelGroupBy(obj, grouper=grouper, axis=self.axis)
+        grouped = groupby(obj, by=None, grouper=grouper, axis=self.axis)
 
         try:
             if isinstance(obj, ABCDataFrame) and compat.callable(how):
diff --git a/pandas/io/pytables.py b/pandas/io/pytables.py
index 2ab6ddb5b25c7..00fa01bb23c8c 100644
--- a/pandas/io/pytables.py
+++ b/pandas/io/pytables.py
@@ -31,7 +31,7 @@
     PeriodIndex, Series, SparseDataFrame, SparseSeries, TimedeltaIndex, compat,
     concat, isna, to_datetime)
 from pandas.core import config
-from pandas.core.algorithms import match, unique
+from pandas.core.algorithms import unique
 from pandas.core.arrays.categorical import (
     Categorical, _factorize_from_iterables)
 from pandas.core.arrays.sparse import BlockIndex, IntIndex
@@ -3944,29 +3944,7 @@ def read(self, where=None, columns=None, **kwargs):
                 objs.append(obj)
 
         else:
-            warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
-
-            # reconstruct
-            long_index = MultiIndex.from_arrays(
-                [i.values for i in self.index_axes])
-
-            for c in self.values_axes:
-                lp = DataFrame(c.data, index=long_index, columns=c.values)
-
-                # need a better algorithm
-                tuple_index = long_index.values
-
-                unique_tuples = unique(tuple_index)
-                unique_tuples = com.asarray_tuplesafe(unique_tuples)
-
-                indexer = match(unique_tuples, tuple_index)
-                indexer = ensure_platform_int(indexer)
-
-                new_index = long_index.take(indexer)
-                new_values = lp.values.take(indexer, axis=0)
-
-                lp = DataFrame(new_values, index=new_index, columns=lp.columns)
-                objs.append(lp.to_panel())
+            raise NotImplementedError("Panel is removed in pandas 0.25.0")
 
         # create the composite object
         if len(objs) == 1:
@@ -4875,16 +4853,3 @@ def select_coords(self):
             return self.coordinates
 
         return np.arange(start, stop)
-
-# utilities ###
-
-
-def timeit(key, df, fn=None, remove=True, **kwargs):
-    if fn is None:
-        fn = 'timeit.h5'
-    store = HDFStore(fn, mode='w')
-    store.append(key, df, **kwargs)
-    store.close()
-
-    if remove:
-        os.remove(fn)
diff --git a/pandas/tests/dtypes/test_generic.py b/pandas/tests/dtypes/test_generic.py
index 1622088d05f4d..2bb3559d56d61 100644
--- a/pandas/tests/dtypes/test_generic.py
+++ b/pandas/tests/dtypes/test_generic.py
@@ -1,6 +1,6 @@
 # -*- coding: utf-8 -*-
 
-from warnings import catch_warnings, simplefilter
+from warnings import catch_warnings
 
 import numpy as np
 
@@ -39,9 +39,6 @@ def test_abc_types(self):
         assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
         assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
         assert isinstance(self.df, gt.ABCDataFrame)
-        with catch_warnings(record=True):
-            simplefilter('ignore', FutureWarning)
-            assert isinstance(self.df.to_panel(), gt.ABCPanel)
         assert isinstance(self.sparse_series, gt.ABCSparseSeries)
         assert isinstance(self.sparse_array, gt.ABCSparseArray)
         assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame)
diff --git a/pandas/tests/frame/test_subclass.py b/pandas/tests/frame/test_subclass.py
index 4f0747c0d6945..2e3696e7e04cc 100644
--- a/pandas/tests/frame/test_subclass.py
+++ b/pandas/tests/frame/test_subclass.py
@@ -6,7 +6,7 @@
 import pytest
 
 import pandas as pd
-from pandas import DataFrame, Index, MultiIndex, Panel, Series
+from pandas import DataFrame, Index, MultiIndex, Series
 from pandas.tests.frame.common import TestData
 import pandas.util.testing as tm
 
@@ -125,29 +125,6 @@ def test_indexing_sliced(self):
         tm.assert_series_equal(res, exp)
         assert isinstance(res, tm.SubclassedSeries)
 
-    @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-    def test_to_panel_expanddim(self):
-        # GH 9762
-
-        class SubclassedFrame(DataFrame):
-
-            @property
-            def _constructor_expanddim(self):
-                return SubclassedPanel
-
-        class SubclassedPanel(Panel):
-            pass
-
-        index = MultiIndex.from_tuples([(0, 0), (0, 1), (0, 2)])
-        df = SubclassedFrame({'X': [1, 2, 3], 'Y': [4, 5, 6]}, index=index)
-        result = df.to_panel()
-        assert isinstance(result, SubclassedPanel)
-        expected = SubclassedPanel([[[1, 2, 3]], [[4, 5, 6]]],
-                                   items=['X', 'Y'], major_axis=[0],
-                                   minor_axis=[0, 1, 2],
-                                   dtype='int64')
-        tm.assert_panel_equal(result, expected)
-
     def test_subclass_attr_err_propagation(self):
         # GH 11808
         class A(DataFrame):
diff --git a/pandas/tests/groupby/test_groupby.py b/pandas/tests/groupby/test_groupby.py
index 98c917a6eca3c..0bfc7ababd18a 100644
--- a/pandas/tests/groupby/test_groupby.py
+++ b/pandas/tests/groupby/test_groupby.py
@@ -1239,31 +1239,6 @@ def _check_work(gp):
     # _check_work(panel.groupby(lambda x: x.month, axis=1))
 
 
[email protected]("ignore:\\nPanel:FutureWarning")
-def test_panel_groupby():
-    panel = tm.makePanel()
-    tm.add_nans(panel)
-    grouped = panel.groupby({'ItemA': 0, 'ItemB': 0, 'ItemC': 1},
-                            axis='items')
-    agged = grouped.mean()
-    agged2 = grouped.agg(lambda x: x.mean('items'))
-
-    tm.assert_panel_equal(agged, agged2)
-
-    tm.assert_index_equal(agged.items, Index([0, 1]))
-
-    grouped = panel.groupby(lambda x: x.month, axis='major')
-    agged = grouped.mean()
-
-    exp = Index(sorted(list(set(panel.major_axis.month))))
-    tm.assert_index_equal(agged.major_axis, exp)
-
-    grouped = panel.groupby({'A': 0, 'B': 0, 'C': 1, 'D': 1},
-                            axis='minor')
-    agged = grouped.mean()
-    tm.assert_index_equal(agged.minor_axis, Index([0, 1]))
-
-
 def test_groupby_2d_malformed():
     d = DataFrame(index=lrange(2))
     d['group'] = ['g1', 'g2']
diff --git a/pandas/tests/groupby/test_grouping.py b/pandas/tests/groupby/test_grouping.py
index a509a7cb57c97..44b5bd5f13992 100644
--- a/pandas/tests/groupby/test_grouping.py
+++ b/pandas/tests/groupby/test_grouping.py
@@ -14,8 +14,7 @@
 from pandas.core.groupby.grouper import Grouping
 import pandas.util.testing as tm
 from pandas.util.testing import (
-    assert_almost_equal, assert_frame_equal, assert_panel_equal,
-    assert_series_equal)
+    assert_almost_equal, assert_frame_equal, assert_series_equal)
 
 # selection
 # --------------------------------
@@ -563,17 +562,7 @@ def test_list_grouper_with_nat(self):
 # --------------------------------
 
 class TestGetGroup():
-
-    @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
     def test_get_group(self):
-        wp = tm.makePanel()
-        grouped = wp.groupby(lambda x: x.month, axis='major')
-
-        gp = grouped.get_group(1)
-        expected = wp.reindex(
-            major=[x for x in wp.major_axis if x.month == 1])
-        assert_panel_equal(gp, expected)
-
         # GH 5267
         # be datelike friendly
         df = DataFrame({'DATE': pd.to_datetime(
@@ -755,19 +744,6 @@ def test_multi_iter_frame(self, three_group):
         for key, group in grouped:
             pass
 
-    @pytest.mark.filterwarnings("ignore:\\nPanel:FutureWarning")
-    def test_multi_iter_panel(self):
-        wp = tm.makePanel()
-        grouped = wp.groupby([lambda x: x.month, lambda x: x.weekday()],
-                             axis=1)
-
-        for (month, wd), group in grouped:
-            exp_axis = [x
-                        for x in wp.major_axis
-                        if x.month == month and x.weekday() == wd]
-            expected = wp.reindex(major=exp_axis)
-            assert_panel_equal(group, expected)
-
     def test_dictify(self, df):
         dict(iter(df.groupby('A')))
         dict(iter(df.groupby(['A', 'B'])))
diff --git a/pandas/tests/io/test_pytables.py b/pandas/tests/io/test_pytables.py
index 9430011288f27..c339c33751b5f 100644
--- a/pandas/tests/io/test_pytables.py
+++ b/pandas/tests/io/test_pytables.py
@@ -3050,29 +3050,6 @@ def test_select_with_dups(self):
             result = store.select('df', columns=['B', 'A'])
             assert_frame_equal(result, expected, by_blocks=True)
 
-    @pytest.mark.filterwarnings(
-        "ignore:\\nduplicate:pandas.io.pytables.DuplicateWarning"
-    )
-    def test_wide_table_dups(self):
-        with ensure_clean_store(self.path) as store:
-            with catch_warnings(record=True):
-
-                wp = tm.makePanel()
-                store.put('panel', wp, format='table')
-                store.put('panel', wp, format='table', append=True)
-
-                recons = store['panel']
-
-                assert_panel_equal(recons, wp)
-
-    def test_long(self):
-        def _check(left, right):
-            assert_panel_equal(left.to_panel(), right.to_panel())
-
-        with catch_warnings(record=True):
-            wp = tm.makePanel()
-            self._check_roundtrip(wp.to_frame(), _check)
-
     def test_overwrite_node(self):
 
         with ensure_clean_store(self.path) as store:
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 856c4df5380e5..ceccb48194f85 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1,6 +1,5 @@
 from datetime import datetime, timedelta
 from functools import partial
-from warnings import catch_warnings, simplefilter
 
 import numpy as np
 import pytest
@@ -10,7 +9,7 @@
 from pandas.errors import UnsupportedFunctionCall
 
 import pandas as pd
-from pandas import DataFrame, Panel, Series, Timedelta, Timestamp, isna, notna
+from pandas import DataFrame, Series, Timedelta, Timestamp, isna, notna
 from pandas.core.indexes.datetimes import date_range
 from pandas.core.indexes.period import Period, period_range
 from pandas.core.resample import (
@@ -692,56 +691,6 @@ def test_resample_axis1():
     tm.assert_frame_equal(result, expected)
 
 
-def test_resample_panel():
-    rng = date_range('1/1/2000', '6/30/2000')
-    n = len(rng)
-
-    with catch_warnings(record=True):
-        simplefilter("ignore", FutureWarning)
-        panel = Panel(np.random.randn(3, n, 5),
-                      items=['one', 'two', 'three'],
-                      major_axis=rng,
-                      minor_axis=['a', 'b', 'c', 'd', 'e'])
-
-        result = panel.resample('M', axis=1).mean()
-
-        def p_apply(panel, f):
-            result = {}
-            for item in panel.items:
-                result[item] = f(panel[item])
-            return Panel(result, items=panel.items)
-
-        expected = p_apply(panel, lambda x: x.resample('M').mean())
-        tm.assert_panel_equal(result, expected)
-
-        panel2 = panel.swapaxes(1, 2)
-        result = panel2.resample('M', axis=2).mean()
-        expected = p_apply(panel2,
-                           lambda x: x.resample('M', axis=1).mean())
-        tm.assert_panel_equal(result, expected)
-
-
[email protected]("ignore:\\nPanel:FutureWarning")
-def test_resample_panel_numpy():
-    rng = date_range('1/1/2000', '6/30/2000')
-    n = len(rng)
-
-    with catch_warnings(record=True):
-        panel = Panel(np.random.randn(3, n, 5),
-                      items=['one', 'two', 'three'],
-                      major_axis=rng,
-                      minor_axis=['a', 'b', 'c', 'd', 'e'])
-
-        result = panel.resample('M', axis=1).apply(lambda x: x.mean(1))
-        expected = panel.resample('M', axis=1).mean()
-        tm.assert_panel_equal(result, expected)
-
-        panel = panel.swapaxes(1, 2)
-        result = panel.resample('M', axis=2).apply(lambda x: x.mean(2))
-        expected = panel.resample('M', axis=2).mean()
-        tm.assert_panel_equal(result, expected)
-
-
 def test_resample_anchored_ticks():
     # If a fixed delta (5 minute, 4 hour) evenly divides a day, we should
     # "anchor" the origin at midnight so we get regular intervals rather
diff --git a/pandas/tests/resample/test_time_grouper.py b/pandas/tests/resample/test_time_grouper.py
index a4eb7933738c0..2f330d1f2484b 100644
--- a/pandas/tests/resample/test_time_grouper.py
+++ b/pandas/tests/resample/test_time_grouper.py
@@ -5,7 +5,7 @@
 import pytest
 
 import pandas as pd
-from pandas import DataFrame, Panel, Series
+from pandas import DataFrame, Series
 from pandas.core.indexes.datetimes import date_range
 from pandas.core.resample import TimeGrouper
 import pandas.util.testing as tm
@@ -79,27 +79,6 @@ def f(df):
     tm.assert_index_equal(result.index, df.index)
 
 
[email protected]("ignore:\\nPanel:FutureWarning")
-def test_panel_aggregation():
-    ind = pd.date_range('1/1/2000', periods=100)
-    data = np.random.randn(2, len(ind), 4)
-
-    wp = Panel(data, items=['Item1', 'Item2'], major_axis=ind,
-               minor_axis=['A', 'B', 'C', 'D'])
-
-    tg = TimeGrouper('M', axis=1)
-    _, grouper, _ = tg._get_grouper(wp)
-    bingrouped = wp.groupby(grouper)
-    binagg = bingrouped.mean()
-
-    def f(x):
-        assert (isinstance(x, Panel))
-        return x.mean(1)
-
-    result = bingrouped.agg(f)
-    tm.assert_panel_equal(result, binagg)
-
-
 @pytest.mark.parametrize('name, func', [
     ('Int64Index', tm.makeIntIndex),
     ('Index', tm.makeUnicodeIndex),
diff --git a/pandas/tests/test_panel.py b/pandas/tests/test_panel.py
index ba0ad72e624f7..6b20acc844829 100644
--- a/pandas/tests/test_panel.py
+++ b/pandas/tests/test_panel.py
@@ -1653,61 +1653,6 @@ def test_transpose_copy(self):
         panel.values[0, 1, 1] = np.nan
         assert notna(result.values[1, 0, 1])
 
-    def test_to_frame(self):
-        # filtered
-        filtered = self.panel.to_frame()
-        expected = self.panel.to_frame().dropna(how='any')
-        assert_frame_equal(filtered, expected)
-
-        # unfiltered
-        unfiltered = self.panel.to_frame(filter_observations=False)
-        assert_panel_equal(unfiltered.to_panel(), self.panel)
-
-        # names
-        assert unfiltered.index.names == ('major', 'minor')
-
-        # unsorted, round trip
-        df = self.panel.to_frame(filter_observations=False)
-        unsorted = df.take(np.random.permutation(len(df)))
-        pan = unsorted.to_panel()
-        assert_panel_equal(pan, self.panel)
-
-        # preserve original index names
-        df = DataFrame(np.random.randn(6, 2),
-                       index=[['a', 'a', 'b', 'b', 'c', 'c'],
-                              [0, 1, 0, 1, 0, 1]],
-                       columns=['one', 'two'])
-        df.index.names = ['foo', 'bar']
-        df.columns.name = 'baz'
-
-        rdf = df.to_panel().to_frame()
-        assert rdf.index.names == df.index.names
-        assert rdf.columns.names == df.columns.names
-
-    def test_to_frame_mixed(self):
-        panel = self.panel.fillna(0)
-        panel['str'] = 'foo'
-        panel['bool'] = panel['ItemA'] > 0
-
-        lp = panel.to_frame()
-        wp = lp.to_panel()
-        assert wp['bool'].values.dtype == np.bool_
-        # Previously, this was mutating the underlying
-        # index and changing its name
-        assert_frame_equal(wp['bool'], panel['bool'], check_names=False)
-
-        # GH 8704
-        # with categorical
-        df = panel.to_frame()
-        df['category'] = df['str'].astype('category')
-
-        # to_panel
-        # TODO: this converts back to object
-        p = df.to_panel()
-        expected = panel.copy()
-        expected['category'] = 'foo'
-        assert_panel_equal(p, expected)
-
     def test_to_frame_multi_major(self):
         idx = MultiIndex.from_tuples(
             [(1, 'one'), (1, 'two'), (2, 'one'), (2, 'two')])
@@ -1808,22 +1753,6 @@ def test_to_frame_multi_drop_level(self):
         expected = DataFrame({'i1': [1., 2], 'i2': [1., 2]}, index=exp_idx)
         assert_frame_equal(result, expected)
 
-    def test_to_panel_na_handling(self):
-        df = DataFrame(np.random.randint(0, 10, size=20).reshape((10, 2)),
-                       index=[[0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
-                              [0, 1, 2, 3, 4, 5, 2, 3, 4, 5]])
-
-        panel = df.to_panel()
-        assert isna(panel[0].loc[1, [0, 1]]).all()
-
-    def test_to_panel_duplicates(self):
-        # #2441
-        df = DataFrame({'a': [0, 0, 1], 'b': [1, 1, 1], 'c': [1, 2, 3]})
-        idf = df.set_index(['a', 'b'])
-
-        with pytest.raises(ValueError, match='non-uniquely indexed'):
-            idf.to_panel()
-
     def test_panel_dups(self):
 
         # GH 4960
@@ -2121,14 +2050,6 @@ def test_get_attr(self):
         self.panel['i'] = self.panel['ItemA']
         assert_frame_equal(self.panel['i'], self.panel.i)
 
-    def test_from_frame_level1_unsorted(self):
-        tuples = [('MSFT', 3), ('MSFT', 2), ('AAPL', 2), ('AAPL', 1),
-                  ('MSFT', 1)]
-        midx = MultiIndex.from_tuples(tuples)
-        df = DataFrame(np.random.rand(5, 4), index=midx)
-        p = df.to_panel()
-        assert_frame_equal(p.minor_xs(2), df.xs(2, level=1).sort_index())
-
     def test_to_excel(self):
         try:
             import xlwt  # noqa
@@ -2404,40 +2325,11 @@ def setup_method(self, method):
         self.panel = panel.to_frame()
         self.unfiltered_panel = panel.to_frame(filter_observations=False)
 
-    def test_ops_differently_indexed(self):
-        # trying to set non-identically indexed panel
-        wp = self.panel.to_panel()
-        wp2 = wp.reindex(major=wp.major_axis[:-1])
-        lp2 = wp2.to_frame()
-
-        result = self.panel + lp2
-        assert_frame_equal(result.reindex(lp2.index), lp2 * 2)
-
-        # careful, mutation
-        self.panel['foo'] = lp2['ItemA']
-        assert_series_equal(self.panel['foo'].reindex(lp2.index),
-                            lp2['ItemA'],
-                            check_names=False)
-
     def test_ops_scalar(self):
         result = self.panel.mul(2)
         expected = DataFrame.__mul__(self.panel, 2)
         assert_frame_equal(result, expected)
 
-    def test_combineFrame(self):
-        wp = self.panel.to_panel()
-        result = self.panel.add(wp['ItemA'].stack(), axis=0)
-        assert_frame_equal(result.to_panel()['ItemA'], wp['ItemA'] * 2)
-
-    def test_combinePanel(self):
-        wp = self.panel.to_panel()
-        result = self.panel.add(self.panel)
-        wide_result = result.to_panel()
-        assert_frame_equal(wp['ItemA'] * 2, wide_result['ItemA'])
-
-        # one item
-        result = self.panel.add(self.panel.filter(['ItemA']))
-
     def test_combine_scalar(self):
         result = self.panel.mul(2)
         expected = DataFrame(self.panel._data) * 2
@@ -2454,34 +2346,6 @@ def test_combine_series(self):
         expected = DataFrame.add(self.panel, s, axis=1)
         assert_frame_equal(result, expected)
 
-    def test_operators(self):
-        wp = self.panel.to_panel()
-        result = (self.panel + 1).to_panel()
-        assert_frame_equal(wp['ItemA'] + 1, result['ItemA'])
-
-    def test_arith_flex_panel(self):
-        ops = ['add', 'sub', 'mul', 'div',
-               'truediv', 'pow', 'floordiv', 'mod']
-        if not compat.PY3:
-            aliases = {}
-        else:
-            aliases = {'div': 'truediv'}
-        self.panel = self.panel.to_panel()
-
-        for n in [np.random.randint(-50, -1), np.random.randint(1, 50), 0]:
-            for op in ops:
-                alias = aliases.get(op, op)
-                f = getattr(operator, alias)
-                exp = f(self.panel, n)
-                result = getattr(self.panel, op)(n)
-                assert_panel_equal(result, exp, check_panel_type=True)
-
-                # rops
-                r_f = lambda x, y: f(y, x)
-                exp = r_f(self.panel, n)
-                result = getattr(self.panel, 'r' + op)(n)
-                assert_panel_equal(result, exp)
-
     def test_sort(self):
         def is_sorted(arr):
             return (arr[1:] > arr[:-1]).any()
@@ -2502,45 +2366,6 @@ def test_to_sparse(self):
             with pytest.raises(NotImplementedError, match=msg):
                 self.panel.to_sparse
 
-    def test_truncate(self):
-        dates = self.panel.index.levels[0]
-        start, end = dates[1], dates[5]
-
-        trunced = self.panel.truncate(start, end).to_panel()
-        expected = self.panel.to_panel()['ItemA'].truncate(start, end)
-
-        # TODO truncate drops index.names
-        assert_frame_equal(trunced['ItemA'], expected, check_names=False)
-
-        trunced = self.panel.truncate(before=start).to_panel()
-        expected = self.panel.to_panel()['ItemA'].truncate(before=start)
-
-        # TODO truncate drops index.names
-        assert_frame_equal(trunced['ItemA'], expected, check_names=False)
-
-        trunced = self.panel.truncate(after=end).to_panel()
-        expected = self.panel.to_panel()['ItemA'].truncate(after=end)
-
-        # TODO truncate drops index.names
-        assert_frame_equal(trunced['ItemA'], expected, check_names=False)
-
-        # truncate on dates that aren't in there
-        wp = self.panel.to_panel()
-        new_index = wp.major_axis[::5]
-
-        wp2 = wp.reindex(major=new_index)
-
-        lp2 = wp2.to_frame()
-        lp_trunc = lp2.truncate(wp.major_axis[2], wp.major_axis[-2])
-
-        wp_trunc = wp2.truncate(wp.major_axis[2], wp.major_axis[-2])
-
-        assert_panel_equal(wp_trunc, lp_trunc.to_panel())
-
-        # throw proper exception
-        pytest.raises(Exception, lp2.truncate, wp.major_axis[-2],
-                      wp.major_axis[2])
-
     def test_axis_dummies(self):
         from pandas.core.reshape.reshape import make_axis_dummies
 
@@ -2567,20 +2392,6 @@ def test_get_dummies(self):
         dummies = get_dummies(self.panel['Label'])
         tm.assert_numpy_array_equal(dummies.values, minor_dummies.values)
 
-    def test_mean(self):
-        means = self.panel.mean(level='minor')
-
-        # test versus Panel version
-        wide_means = self.panel.to_panel().mean('major')
-        assert_frame_equal(means, wide_means)
-
-    def test_sum(self):
-        sums = self.panel.sum(level='minor')
-
-        # test versus Panel version
-        wide_sums = self.panel.to_panel().sum('major')
-        assert_frame_equal(sums, wide_sums)
-
     def test_count(self):
         index = self.panel.index
 
 
 | 
	My understanding is that we're removing Panel in 0.25.0.  A local attempt to do this all-at-once got messy quick (largely due to io.pytables and io.msgpack).  This gets the ball rolling by removing only PanelGroupBy and DataFrame.to_panel, followed by all of the code+tests that rely on either of these. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25047 
 | 
	2019-01-31T03:50:28Z 
 | 
	2019-02-06T03:47:26Z 
 | 
	2019-02-06T03:47:26Z 
 | 
	2019-02-09T08:53:29Z 
 | 
					
	ENH: Support fold argument in Timestamp.replace 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index a9fa8b2174dd0..8e1fc352ba4f7 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -19,7 +19,7 @@ including other versions of pandas.
 Other Enhancements
 ^^^^^^^^^^^^^^^^^^
 
--
+- :meth:`Timestamp.replace` now supports the ``fold`` argument to disambiguate DST transition times (:issue:`25017`)
 -
 -
 
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index a55d15a7c4e85..c719bcb2ef135 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -669,7 +669,6 @@ class NaTType(_NaT):
         nanosecond : int, optional
         tzinfo : tz-convertible, optional
         fold : int, optional, default is 0
-            added in 3.6, NotImplemented
 
         Returns
         -------
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index fe0564cb62c30..85d94f822056b 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -1,4 +1,5 @@
 # -*- coding: utf-8 -*-
+import sys
 import warnings
 
 from cpython cimport (PyObject_RichCompareBool, PyObject_RichCompare,
@@ -43,10 +44,11 @@ from pandas._libs.tslibs.timezones import UTC
 # Constants
 _zero_time = datetime_time(0, 0)
 _no_input = object()
-
+PY36 = sys.version_info >= (3, 6)
 
 # ----------------------------------------------------------------------
 
+
 def maybe_integer_op_deprecated(obj):
     # GH#22535 add/sub of integers and int-arrays is deprecated
     if obj.freq is not None:
@@ -1195,7 +1197,6 @@ class Timestamp(_Timestamp):
         nanosecond : int, optional
         tzinfo : tz-convertible, optional
         fold : int, optional, default is 0
-            added in 3.6, NotImplemented
 
         Returns
         -------
@@ -1252,12 +1253,16 @@ class Timestamp(_Timestamp):
             # see GH#18319
             ts_input = _tzinfo.localize(datetime(dts.year, dts.month, dts.day,
                                                  dts.hour, dts.min, dts.sec,
-                                                 dts.us))
+                                                 dts.us),
+                                        is_dst=not bool(fold))
             _tzinfo = ts_input.tzinfo
         else:
-            ts_input = datetime(dts.year, dts.month, dts.day,
-                                dts.hour, dts.min, dts.sec, dts.us,
-                                tzinfo=_tzinfo)
+            kwargs = {'year': dts.year, 'month': dts.month, 'day': dts.day,
+                      'hour': dts.hour, 'minute': dts.min, 'second': dts.sec,
+                      'microsecond': dts.us, 'tzinfo': _tzinfo}
+            if PY36:
+                kwargs['fold'] = fold
+            ts_input = datetime(**kwargs)
 
         ts = convert_datetime_to_tsobject(ts_input, _tzinfo)
         value = ts.value + (dts.ps // 1000)
diff --git a/pandas/tests/scalar/timestamp/test_unary_ops.py b/pandas/tests/scalar/timestamp/test_unary_ops.py
index 3f9a30d254126..adcf66200a672 100644
--- a/pandas/tests/scalar/timestamp/test_unary_ops.py
+++ b/pandas/tests/scalar/timestamp/test_unary_ops.py
@@ -8,7 +8,7 @@
 
 from pandas._libs.tslibs import conversion
 from pandas._libs.tslibs.frequencies import INVALID_FREQ_ERR_MSG
-from pandas.compat import PY3
+from pandas.compat import PY3, PY36
 import pandas.util._test_decorators as td
 
 from pandas import NaT, Timestamp
@@ -329,6 +329,19 @@ def test_replace_dst_border(self):
         expected = Timestamp('2013-11-3 03:00:00', tz='America/Chicago')
         assert result == expected
 
+    @pytest.mark.skipif(not PY36, reason='Fold not available until PY3.6')
+    @pytest.mark.parametrize('fold', [0, 1])
+    @pytest.mark.parametrize('tz', ['dateutil/Europe/London', 'Europe/London'])
+    def test_replace_dst_fold(self, fold, tz):
+        # GH 25017
+        d = datetime(2019, 10, 27, 2, 30)
+        ts = Timestamp(d, tz=tz)
+        result = ts.replace(hour=1, fold=fold)
+        expected = Timestamp(datetime(2019, 10, 27, 1, 30)).tz_localize(
+            tz, ambiguous=not fold
+        )
+        assert result == expected
+
     # --------------------------------------------------------------
     # Timestamp.normalize
 
 
 | 
	- [x] closes #25017
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
Since `Timestamp` has its own `replace` method, I think we can still introduce this while still supporting PY3.5 (`datetime.replace` gained the `fold` argument in 3.6) while it mimics the functionality in PY3.6 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25046 
 | 
	2019-01-31T01:54:09Z 
 | 
	2019-02-01T18:40:56Z 
 | 
	2019-02-01T18:40:56Z 
 | 
	2019-02-01T18:51:05Z 
 | 
					
	PERF: use new to_records() argument in to_stata() 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 939fb8b9415bd..130477f588c26 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -23,14 +23,6 @@ Other Enhancements
 -
 -
 
-.. _whatsnew_0250.performance:
-
-Performance Improvements
-~~~~~~~~~~~~~~~~~~~~~~~~
- - Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`)
-
-
-
 .. _whatsnew_0250.api_breaking:
 
 Backwards incompatible API changes
@@ -69,8 +61,8 @@ Removal of prior version deprecations/changes
 Performance Improvements
 ~~~~~~~~~~~~~~~~~~~~~~~~
 
--
--
+- Significant speedup in `SparseArray` initialization that benefits most operations, fixing performance regression introduced in v0.20.0 (:issue:`24985`)
+- `DataFrame.to_stata()` is now faster when outputting data with any string or non-native endian columns (:issue:`25045`)
 -
 
 
diff --git a/pandas/io/stata.py b/pandas/io/stata.py
index 1b0660171ecac..0bd084f4e5df7 100644
--- a/pandas/io/stata.py
+++ b/pandas/io/stata.py
@@ -2385,32 +2385,22 @@ def _prepare_data(self):
         data = self._convert_strls(data)
 
         # 3. Convert bad string data to '' and pad to correct length
-        dtypes = []
-        data_cols = []
-        has_strings = False
+        dtypes = {}
         native_byteorder = self._byteorder == _set_endianness(sys.byteorder)
         for i, col in enumerate(data):
             typ = typlist[i]
             if typ <= self._max_string_length:
-                has_strings = True
                 data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
                 stype = 'S{type}'.format(type=typ)
-                dtypes.append(('c' + str(i), stype))
-                string = data[col].str.encode(self._encoding)
-                data_cols.append(string.values.astype(stype))
+                dtypes[col] = stype
+                data[col] = data[col].str.encode(self._encoding).astype(stype)
             else:
-                values = data[col].values
                 dtype = data[col].dtype
                 if not native_byteorder:
                     dtype = dtype.newbyteorder(self._byteorder)
-                dtypes.append(('c' + str(i), dtype))
-                data_cols.append(values)
-        dtypes = np.dtype(dtypes)
+                dtypes[col] = dtype
 
-        if has_strings or not native_byteorder:
-            self.data = np.fromiter(zip(*data_cols), dtype=dtypes)
-        else:
-            self.data = data.to_records(index=False)
+        self.data = data.to_records(index=False, column_dtypes=dtypes)
 
     def _write_data(self):
         data = self.data
 
 | 
	The `to_stata()` function spends ~25-50% of its time massaging string/different endian data and creating a `np.recarray` in a roundabout way. Using `column_dtypes` in `to_records()` allows some cleanup and for a decent performance bump:
```
$ asv compare upstream/master HEAD -s --sort ratio
Benchmarks that have improved:
       before           after         ratio
     [4cbee179]       [9bf67cc5]
     <to_stata~1>       <to_stata>
-         709±9ms         552±20ms     0.78  io.stata.Stata.time_write_stata('tw')
-        409±30ms         233±30ms     0.57  io.stata.Stata.time_write_stata('tq')
-        402±20ms         227±30ms     0.56  io.stata.Stata.time_write_stata('tc')
-         398±9ms         222±30ms     0.56  io.stata.Stata.time_write_stata('th')
-        420±20ms         231±30ms     0.55  io.stata.Stata.time_write_stata('tm')
-        396±10ms          214±3ms     0.54  io.stata.Stata.time_write_stata('ty')
-         389±8ms         207±10ms     0.53  io.stata.Stata.time_write_stata('td')
Benchmarks that have stayed the same:
       before           after         ratio
     [4cbee179]       [9bf67cc5]
     <to_stata~1>       <to_stata>
          527±6ms         563±30ms     1.07  io.stata.Stata.time_read_stata('th')
         507±20ms          531±9ms     1.05  io.stata.Stata.time_read_stata('ty')
         519±10ms         543±30ms     1.05  io.stata.Stata.time_read_stata('tm')
         484±10ms         504±10ms     1.04  io.stata.Stata.time_read_stata('tw')
          149±6ms          152±2ms     1.02  io.stata.Stata.time_read_stata('tc')
          152±3ms          153±8ms     1.01  io.stata.Stata.time_read_stata('td')
         533±20ms          533±6ms     1.00  io.stata.Stata.time_read_stata('tq')
```
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25045 
 | 
	2019-01-30T23:41:47Z 
 | 
	2019-02-01T20:56:06Z 
 | 
	2019-02-01T20:56:06Z 
 | 
	2019-02-01T20:56:09Z 
 | 
					
	CLN: to_pickle internals 
 | 
	diff --git a/pandas/compat/pickle_compat.py b/pandas/compat/pickle_compat.py
index 61295b8249f58..8f16f8154b952 100644
--- a/pandas/compat/pickle_compat.py
+++ b/pandas/compat/pickle_compat.py
@@ -201,7 +201,7 @@ def load_newobj_ex(self):
     pass
 
 
-def load(fh, encoding=None, compat=False, is_verbose=False):
+def load(fh, encoding=None, is_verbose=False):
     """load a pickle, with a provided encoding
 
     if compat is True:
@@ -212,7 +212,6 @@ def load(fh, encoding=None, compat=False, is_verbose=False):
     ----------
     fh : a filelike object
     encoding : an optional encoding
-    compat : provide Series compatibility mode, boolean, default False
     is_verbose : show exception output
     """
 
diff --git a/pandas/io/pickle.py b/pandas/io/pickle.py
index 789f55a62dc58..ab4a266853a78 100644
--- a/pandas/io/pickle.py
+++ b/pandas/io/pickle.py
@@ -1,8 +1,7 @@
 """ pickle compat """
 import warnings
 
-import numpy as np
-from numpy.lib.format import read_array, write_array
+from numpy.lib.format import read_array
 
 from pandas.compat import PY3, BytesIO, cPickle as pkl, pickle_compat as pc
 
@@ -76,6 +75,7 @@ def to_pickle(obj, path, compression='infer', protocol=pkl.HIGHEST_PROTOCOL):
     try:
         f.write(pkl.dumps(obj, protocol=protocol))
     finally:
+        f.close()
         for _f in fh:
             _f.close()
 
@@ -138,63 +138,32 @@ def read_pickle(path, compression='infer'):
     >>> os.remove("./dummy.pkl")
     """
     path = _stringify_path(path)
+    f, fh = _get_handle(path, 'rb', compression=compression, is_text=False)
+
+    # 1) try with cPickle
+    # 2) try with the compat pickle to handle subclass changes
+    # 3) pass encoding only if its not None as py2 doesn't handle the param
 
-    def read_wrapper(func):
-        # wrapper file handle open/close operation
-        f, fh = _get_handle(path, 'rb',
-                            compression=compression,
-                            is_text=False)
-        try:
-            return func(f)
-        finally:
-            for _f in fh:
-                _f.close()
-
-    def try_read(path, encoding=None):
-        # try with cPickle
-        # try with current pickle, if we have a Type Error then
-        # try with the compat pickle to handle subclass changes
-        # pass encoding only if its not None as py2 doesn't handle
-        # the param
-
-        # cpickle
-        # GH 6899
-        try:
-            with warnings.catch_warnings(record=True):
-                # We want to silence any warnings about, e.g. moved modules.
-                warnings.simplefilter("ignore", Warning)
-                return read_wrapper(lambda f: pkl.load(f))
-        except Exception:  # noqa: E722
-            # reg/patched pickle
-            # compat not used in pandas/compat/pickle_compat.py::load
-            # TODO: remove except block OR modify pc.load to use compat
-            try:
-                return read_wrapper(
-                    lambda f: pc.load(f, encoding=encoding, compat=False))
-            # compat pickle
-            except Exception:  # noqa: E722
-                return read_wrapper(
-                    lambda f: pc.load(f, encoding=encoding, compat=True))
     try:
-        return try_read(path)
+        with warnings.catch_warnings(record=True):
+            # We want to silence any warnings about, e.g. moved modules.
+            warnings.simplefilter("ignore", Warning)
+            return pkl.load(f)
     except Exception:  # noqa: E722
-        if PY3:
-            return try_read(path, encoding='latin1')
-        raise
-
+        try:
+            return pc.load(f, encoding=None)
+        except Exception:  # noqa: E722
+            if PY3:
+                return pc.load(f, encoding='latin1')
+            raise
+    finally:
+        f.close()
+        for _f in fh:
+            _f.close()
 
 # compat with sparse pickle / unpickle
 
 
-def _pickle_array(arr):
-    arr = arr.view(np.ndarray)
-
-    buf = BytesIO()
-    write_array(buf, arr)
-
-    return buf.getvalue()
-
-
 def _unpickle_array(bytes):
     arr = read_array(BytesIO(bytes))
 
 
 | 
	- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25044 
 | 
	2019-01-30T23:27:07Z 
 | 
	2019-02-01T18:50:36Z 
 | 
	2019-02-01T18:50:36Z 
 | 
	2019-02-01T18:51:56Z 
 | 
					
	Backport PR #24993 on branch 0.24.x (Test nested PandasArray) 
 | 
	diff --git a/pandas/core/arrays/numpy_.py b/pandas/core/arrays/numpy_.py
index 47517782e2bbf..791ff44303e96 100644
--- a/pandas/core/arrays/numpy_.py
+++ b/pandas/core/arrays/numpy_.py
@@ -222,7 +222,7 @@ def __getitem__(self, item):
             item = item._ndarray
 
         result = self._ndarray[item]
-        if not lib.is_scalar(result):
+        if not lib.is_scalar(item):
             result = type(self)(result)
         return result
 
diff --git a/pandas/tests/extension/numpy_/__init__.py b/pandas/tests/extension/numpy_/__init__.py
new file mode 100644
index 0000000000000..e69de29bb2d1d
diff --git a/pandas/tests/extension/numpy_/conftest.py b/pandas/tests/extension/numpy_/conftest.py
new file mode 100644
index 0000000000000..daa93571c2957
--- /dev/null
+++ b/pandas/tests/extension/numpy_/conftest.py
@@ -0,0 +1,38 @@
+import numpy as np
+import pytest
+
+from pandas.core.arrays.numpy_ import PandasArray
+
+
[email protected]
+def allow_in_pandas(monkeypatch):
+    """
+    A monkeypatch to tell pandas to let us in.
+
+    By default, passing a PandasArray to an index / series / frame
+    constructor will unbox that PandasArray to an ndarray, and treat
+    it as a non-EA column. We don't want people using EAs without
+    reason.
+
+    The mechanism for this is a check against ABCPandasArray
+    in each constructor.
+
+    But, for testing, we need to allow them in pandas. So we patch
+    the _typ of PandasArray, so that we evade the ABCPandasArray
+    check.
+    """
+    with monkeypatch.context() as m:
+        m.setattr(PandasArray, '_typ', 'extension')
+        yield
+
+
[email protected]
+def na_value():
+    return np.nan
+
+
[email protected]
+def na_cmp():
+    def cmp(a, b):
+        return np.isnan(a) and np.isnan(b)
+    return cmp
diff --git a/pandas/tests/extension/test_numpy.py b/pandas/tests/extension/numpy_/test_numpy.py
similarity index 84%
rename from pandas/tests/extension/test_numpy.py
rename to pandas/tests/extension/numpy_/test_numpy.py
index 7ca6882c7441b..4c93d5ee0b9d7 100644
--- a/pandas/tests/extension/test_numpy.py
+++ b/pandas/tests/extension/numpy_/test_numpy.py
@@ -6,7 +6,7 @@
 from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
 import pandas.util.testing as tm
 
-from . import base
+from .. import base
 
 
 @pytest.fixture
@@ -14,28 +14,6 @@ def dtype():
     return PandasDtype(np.dtype('float'))
 
 
[email protected]
-def allow_in_pandas(monkeypatch):
-    """
-    A monkeypatch to tells pandas to let us in.
-
-    By default, passing a PandasArray to an index / series / frame
-    constructor will unbox that PandasArray to an ndarray, and treat
-    it as a non-EA column. We don't want people using EAs without
-    reason.
-
-    The mechanism for this is a check against ABCPandasArray
-    in each constructor.
-
-    But, for testing, we need to allow them in pandas. So we patch
-    the _typ of PandasArray, so that we evade the ABCPandasArray
-    check.
-    """
-    with monkeypatch.context() as m:
-        m.setattr(PandasArray, '_typ', 'extension')
-        yield
-
-
 @pytest.fixture
 def data(allow_in_pandas, dtype):
     return PandasArray(np.arange(1, 101, dtype=dtype._dtype))
@@ -46,18 +24,6 @@ def data_missing(allow_in_pandas):
     return PandasArray(np.array([np.nan, 1.0]))
 
 
[email protected]
-def na_value():
-    return np.nan
-
-
[email protected]
-def na_cmp():
-    def cmp(a, b):
-        return np.isnan(a) and np.isnan(b)
-    return cmp
-
-
 @pytest.fixture
 def data_for_sorting(allow_in_pandas):
     """Length-3 array with a known sort order.
diff --git a/pandas/tests/extension/numpy_/test_numpy_nested.py b/pandas/tests/extension/numpy_/test_numpy_nested.py
new file mode 100644
index 0000000000000..cf9b34dd08798
--- /dev/null
+++ b/pandas/tests/extension/numpy_/test_numpy_nested.py
@@ -0,0 +1,286 @@
+"""
+Tests for PandasArray with nested data. Users typically won't create
+these objects via `pd.array`, but they can show up through `.array`
+on a Series with nested data.
+
+We partition these tests into their own file, as many of the base
+tests fail, as they aren't appropriate for nested data. It is easier
+to have a seperate file with its own data generating fixtures, than
+trying to skip based upon the value of a fixture.
+"""
+import pytest
+
+import pandas as pd
+from pandas.core.arrays.numpy_ import PandasArray, PandasDtype
+
+from .. import base
+
+# For NumPy <1.16, np.array([np.nan, (1,)]) raises
+# ValueError: setting an array element with a sequence.
+np = pytest.importorskip('numpy', minversion='1.16.0')
+
+
[email protected]
+def dtype():
+    return PandasDtype(np.dtype('object'))
+
+
[email protected]
+def data(allow_in_pandas, dtype):
+    return pd.Series([(i,) for i in range(100)]).array
+
+
[email protected]
+def data_missing(allow_in_pandas):
+    return PandasArray(np.array([np.nan, (1,)]))
+
+
[email protected]
+def data_for_sorting(allow_in_pandas):
+    """Length-3 array with a known sort order.
+
+    This should be three items [B, C, A] with
+    A < B < C
+    """
+    # Use an empty tuple for first element, then remove,
+    # to disable np.array's shape inference.
+    return PandasArray(
+        np.array([(), (2,), (3,), (1,)])[1:]
+    )
+
+
[email protected]
+def data_missing_for_sorting(allow_in_pandas):
+    """Length-3 array with a known sort order.
+
+    This should be three items [B, NA, A] with
+    A < B and NA missing.
+    """
+    return PandasArray(
+        np.array([(1,), np.nan, (0,)])
+    )
+
+
[email protected]
+def data_for_grouping(allow_in_pandas):
+    """Data for factorization, grouping, and unique tests.
+
+    Expected to be like [B, B, NA, NA, A, A, B, C]
+
+    Where A < B < C and NA is missing
+    """
+    a, b, c = (1,), (2,), (3,)
+    return PandasArray(np.array(
+        [b, b, np.nan, np.nan, a, a, b, c]
+    ))
+
+
+skip_nested = pytest.mark.skip(reason="Skipping for nested PandasArray")
+
+
+class BaseNumPyTests(object):
+    pass
+
+
+class TestCasting(BaseNumPyTests, base.BaseCastingTests):
+
+    @skip_nested
+    def test_astype_str(self, data):
+        pass
+
+
+class TestConstructors(BaseNumPyTests, base.BaseConstructorsTests):
+    @pytest.mark.skip(reason="We don't register our dtype")
+    # We don't want to register. This test should probably be split in two.
+    def test_from_dtype(self, data):
+        pass
+
+    @skip_nested
+    def test_array_from_scalars(self, data):
+        pass
+
+
+class TestDtype(BaseNumPyTests, base.BaseDtypeTests):
+
+    @pytest.mark.skip(reason="Incorrect expected.")
+    # we unsurprisingly clash with a NumPy name.
+    def test_check_dtype(self, data):
+        pass
+
+
+class TestGetitem(BaseNumPyTests, base.BaseGetitemTests):
+
+    @skip_nested
+    def test_getitem_scalar(self, data):
+        pass
+
+    @skip_nested
+    def test_take_series(self, data):
+        pass
+
+
+class TestGroupby(BaseNumPyTests, base.BaseGroupbyTests):
+    @skip_nested
+    def test_groupby_extension_apply(self, data_for_grouping, op):
+        pass
+
+
+class TestInterface(BaseNumPyTests, base.BaseInterfaceTests):
+    @skip_nested
+    def test_array_interface(self, data):
+        # NumPy array shape inference
+        pass
+
+
+class TestMethods(BaseNumPyTests, base.BaseMethodsTests):
+
+    @pytest.mark.skip(reason="TODO: remove?")
+    def test_value_counts(self, all_data, dropna):
+        pass
+
+    @pytest.mark.skip(reason="Incorrect expected")
+    # We have a bool dtype, so the result is an ExtensionArray
+    # but expected is not
+    def test_combine_le(self, data_repeated):
+        super(TestMethods, self).test_combine_le(data_repeated)
+
+    @skip_nested
+    def test_combine_add(self, data_repeated):
+        # Not numeric
+        pass
+
+    @skip_nested
+    def test_shift_fill_value(self, data):
+        # np.array shape inference. Shift implementation fails.
+        super().test_shift_fill_value(data)
+
+    @skip_nested
+    def test_unique(self, data, box, method):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_fillna_copy_frame(self, data_missing):
+        # The "scalar" for this array isn't a scalar.
+        pass
+
+    @skip_nested
+    def test_fillna_copy_series(self, data_missing):
+        # The "scalar" for this array isn't a scalar.
+        pass
+
+    @skip_nested
+    def test_hash_pandas_object_works(self, data, as_frame):
+        # ndarray of tuples not hashable
+        pass
+
+    @skip_nested
+    def test_searchsorted(self, data_for_sorting, as_series):
+        # Test setup fails.
+        pass
+
+    @skip_nested
+    def test_where_series(self, data, na_value, as_frame):
+        # Test setup fails.
+        pass
+
+    @skip_nested
+    def test_repeat(self, data, repeats, as_series, use_numpy):
+        # Fails creating expected
+        pass
+
+
+class TestPrinting(BaseNumPyTests, base.BasePrintingTests):
+    pass
+
+
+class TestMissing(BaseNumPyTests, base.BaseMissingTests):
+
+    @skip_nested
+    def test_fillna_scalar(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_series_method(self, data_missing, method):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_series(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+    @skip_nested
+    def test_fillna_frame(self, data_missing):
+        # Non-scalar "scalar" values.
+        pass
+
+
+class TestReshaping(BaseNumPyTests, base.BaseReshapingTests):
+
+    @pytest.mark.skip("Incorrect parent test")
+    # not actually a mixed concat, since we concat int and int.
+    def test_concat_mixed_dtypes(self, data):
+        super(TestReshaping, self).test_concat_mixed_dtypes(data)
+
+    @skip_nested
+    def test_merge(self, data, na_value):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_merge_on_extension_array(self, data):
+        # Fails creating expected
+        pass
+
+    @skip_nested
+    def test_merge_on_extension_array_duplicates(self, data):
+        # Fails creating expected
+        pass
+
+
+class TestSetitem(BaseNumPyTests, base.BaseSetitemTests):
+
+    @skip_nested
+    def test_setitem_scalar_series(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence_mismatched_length_raises(self, data, as_array):
+        pass
+
+    @skip_nested
+    def test_setitem_sequence_broadcasts(self, data, box_in_series):
+        pass
+
+    @skip_nested
+    def test_setitem_loc_scalar_mixed(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_loc_scalar_multiple_homogoneous(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_iloc_scalar_mixed(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_iloc_scalar_multiple_homogoneous(self, data):
+        pass
+
+    @skip_nested
+    def test_setitem_mask_broadcast(self, data, setter):
+        pass
+
+    @skip_nested
+    def test_setitem_scalar_key_sequence_raise(self, data):
+        pass
+
+
+# Skip Arithmetics, NumericReduce, BooleanReduce, Parsing
 
 | 
	Backport PR #24993: Test nested PandasArray 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25042 
 | 
	2019-01-30T21:18:28Z 
 | 
	2019-01-30T22:28:45Z 
 | 
	2019-01-30T22:28:45Z 
 | 
	2019-01-30T22:28:46Z 
 | 
					
	Backport PR #25033 on branch 0.24.x (BUG: Fixed merging on tz-aware) 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 57fdff041db28..047404e93914b 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -23,6 +23,7 @@ Fixed Regressions
 - Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
 - Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
 - Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`).
+- Fixed regression in :func:`merge` when merging an empty ``DataFrame`` with multiple timezone-aware columns on one of the timezone-aware columns (:issue:`25014`).
 
 .. _whatsnew_0241.enhancements:
 
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 4a16707a376e9..640587b7f9f31 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -183,7 +183,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
                         is_datetime64tz_dtype(empty_dtype)):
                     if self.block is None:
                         array = empty_dtype.construct_array_type()
-                        return array(np.full(self.shape[1], fill_value),
+                        return array(np.full(self.shape[1], fill_value.value),
                                      dtype=empty_dtype)
                     pass
                 elif getattr(self.block, 'is_categorical', False):
@@ -335,8 +335,10 @@ def get_empty_dtype_and_na(join_units):
     elif 'category' in upcast_classes:
         return np.dtype(np.object_), np.nan
     elif 'datetimetz' in upcast_classes:
+        # GH-25014. We use NaT instead of iNaT, since this eventually
+        # ends up in DatetimeArray.take, which does not allow iNaT.
         dtype = upcast_classes['datetimetz']
-        return dtype[0], tslibs.iNaT
+        return dtype[0], tslibs.NaT
     elif 'datetime' in upcast_classes:
         return np.dtype('M8[ns]'), tslibs.iNaT
     elif 'timedelta' in upcast_classes:
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index f0a3ddc8ce8a4..1e60fdbebfeb3 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -616,6 +616,24 @@ def test_merge_on_datetime64tz(self):
         assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]'
         assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]'
 
+    def test_merge_on_datetime64tz_empty(self):
+        # https://github.com/pandas-dev/pandas/issues/25014
+        dtz = pd.DatetimeTZDtype(tz='UTC')
+        right = pd.DataFrame({'date': [pd.Timestamp('2018', tz=dtz.tz)],
+                              'value': [4.0],
+                              'date2': [pd.Timestamp('2019', tz=dtz.tz)]},
+                             columns=['date', 'value', 'date2'])
+        left = right[:0]
+        result = left.merge(right, on='date')
+        expected = pd.DataFrame({
+            'value_x': pd.Series(dtype=float),
+            'date2_x': pd.Series(dtype=dtz),
+            'date': pd.Series(dtype=dtz),
+            'value_y': pd.Series(dtype=float),
+            'date2_y': pd.Series(dtype=dtz),
+        }, columns=['value_x', 'date2_x', 'date', 'value_y', 'date2_y'])
+        tm.assert_frame_equal(result, expected)
+
     def test_merge_datetime64tz_with_dst_transition(self):
         # GH 18885
         df1 = pd.DataFrame(pd.date_range(
 
 | 
	Backport PR #25033: BUG: Fixed merging on tz-aware 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25041 
 | 
	2019-01-30T21:17:40Z 
 | 
	2019-01-30T22:27:20Z 
 | 
	2019-01-30T22:27:20Z 
 | 
	2019-01-30T22:27:20Z 
 | 
					
	BUG: to_clipboard text truncated for Python 3 on Windows for UTF-16 text 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index a9fa8b2174dd0..880eaed3b5dfb 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -163,6 +163,7 @@ MultiIndex
 I/O
 ^^^
 
+- Fixed bug in missing text when using :meth:`to_clipboard` if copying utf-16 characters in Python 3 on Windows (:issue:`25040`)
 -
 -
 -
diff --git a/pandas/io/clipboard/windows.py b/pandas/io/clipboard/windows.py
index 3d979a61b5f2d..4f5275af693b7 100644
--- a/pandas/io/clipboard/windows.py
+++ b/pandas/io/clipboard/windows.py
@@ -29,6 +29,7 @@ def init_windows_clipboard():
                                  HINSTANCE, HMENU, BOOL, UINT, HANDLE)
 
     windll = ctypes.windll
+    msvcrt = ctypes.CDLL('msvcrt')
 
     safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA)
     safeCreateWindowExA.argtypes = [DWORD, LPCSTR, LPCSTR, DWORD, INT, INT,
@@ -71,6 +72,10 @@ def init_windows_clipboard():
     safeGlobalUnlock.argtypes = [HGLOBAL]
     safeGlobalUnlock.restype = BOOL
 
+    wcslen = CheckedCall(msvcrt.wcslen)
+    wcslen.argtypes = [c_wchar_p]
+    wcslen.restype = UINT
+
     GMEM_MOVEABLE = 0x0002
     CF_UNICODETEXT = 13
 
@@ -129,13 +134,13 @@ def copy_windows(text):
                     # If the hMem parameter identifies a memory object,
                     # the object must have been allocated using the
                     # function with the GMEM_MOVEABLE flag.
-                    count = len(text) + 1
+                    count = wcslen(text) + 1
                     handle = safeGlobalAlloc(GMEM_MOVEABLE,
                                              count * sizeof(c_wchar))
                     locked_handle = safeGlobalLock(handle)
 
-                    ctypes.memmove(c_wchar_p(locked_handle),
-                                   c_wchar_p(text), count * sizeof(c_wchar))
+                    ctypes.memmove(c_wchar_p(locked_handle), c_wchar_p(text),
+                                   count * sizeof(c_wchar))
 
                     safeGlobalUnlock(handle)
                     safeSetClipboardData(CF_UNICODETEXT, handle)
diff --git a/pandas/tests/io/test_clipboard.py b/pandas/tests/io/test_clipboard.py
index 8eb26d9f3dec5..565db92210b0a 100644
--- a/pandas/tests/io/test_clipboard.py
+++ b/pandas/tests/io/test_clipboard.py
@@ -12,6 +12,7 @@
 from pandas.util import testing as tm
 from pandas.util.testing import makeCustomDataframe as mkdf
 
+from pandas.io.clipboard import clipboard_get, clipboard_set
 from pandas.io.clipboard.exceptions import PyperclipException
 
 try:
@@ -30,8 +31,8 @@ def build_kwargs(sep, excel):
     return kwargs
 
 
[email protected](params=['delims', 'utf8', 'string', 'long', 'nonascii',
-                        'colwidth', 'mixed', 'float', 'int'])
[email protected](params=['delims', 'utf8', 'utf16', 'string', 'long',
+                        'nonascii', 'colwidth', 'mixed', 'float', 'int'])
 def df(request):
     data_type = request.param
 
@@ -41,6 +42,10 @@ def df(request):
     elif data_type == 'utf8':
         return pd.DataFrame({'a': ['µasd', 'Ωœ∑´'],
                              'b': ['øπ∆˚¬', 'œ∑´®']})
+    elif data_type == 'utf16':
+        return pd.DataFrame({'a': ['\U0001f44d\U0001f44d',
+                                   '\U0001f44d\U0001f44d'],
+                             'b': ['abc', 'def']})
     elif data_type == 'string':
         return mkdf(5, 3, c_idx_type='s', r_idx_type='i',
                     c_idx_names=[None], r_idx_names=[None])
@@ -225,3 +230,14 @@ def test_invalid_encoding(self, df):
     @pytest.mark.parametrize('enc', ['UTF-8', 'utf-8', 'utf8'])
     def test_round_trip_valid_encodings(self, enc, df):
         self.check_round_trip_frame(df, encoding=enc)
+
+
[email protected]
[email protected]
[email protected](not _DEPS_INSTALLED,
+                    reason="clipboard primitives not installed")
[email protected]('data', [u'\U0001f44d...', u'Ωœ∑´...', 'abcd...'])
+def test_raw_roundtrip(data):
+    # PR #25040 wide unicode wasn't copied correctly on PY3 on windows
+    clipboard_set(data)
+    assert data == clipboard_get()
 
 | 
	- [ ] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
For windows users where Python is compiled with UCS-4 (Python 3 primarily), tables copied to clipboard are missing data from the end when there are any unicode characters in the dataframe that have a 4-byte representation in UTF-16 (i.e. in the U+010000 to U+10FFFF range).  The bug can be reproduced here:
```python
import pandas
obj=pandas.DataFrame([u'\U0001f44d\U0001f44d',
              u'12345'])
obj.to_clipboard()
```
where the clipboard text results in 
```
	0
0	👍👍
1	1234
```
One character is chopped from the end of the clipboard string for each 4-byte unicode character copied.
or more to the point:
```python
pandas.io.clipboard.clipboard_set(u'\U0001f44d 12345')
```
produces
```
👍 1234
```
The cause of this issue is that ```len(u'\U0001f44d')==1``` when python is in UCS-4, and Pandas allocates 2 bytes per python character in the clipboard buffer but the character consumes 4 bytes, displacing another character at the end of the string to be copied.  In UCS-2 (most Python 2 builds), ```len(u'\U0001f44d')==2``` and so 4 bytes are allocated and consumed by the character.
My proposed change (affecting only windows clipboard operations) first converts the text to UTF-16 little endian because that is the format used by windows, then measures the length of the resulting byte string, rather than using Python's ```len(text) * 2``` to measure how many bytes should be allocated to the clipboard buffer.
I've tested this change in python 3.6 and 2.7 on windows 7 x64.  I don't expect this causing other issues with other versions of windows but I would appreciate if anyone on older versions of windows would double check.
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25040 
 | 
	2019-01-30T21:17:08Z 
 | 
	2019-02-01T20:53:57Z 
 | 
	2019-02-01T20:53:56Z 
 | 
	2019-02-01T20:53:59Z 
 | 
					
	BUG: avoid usage in_qtconsole for recent IPython versions 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 047404e93914b..521319c55a503 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -83,7 +83,7 @@ Bug Fixes
 
 **Other**
 
--
+- Fixed AttributeError when printing a DataFrame's HTML repr after accessing the IPython config object (:issue:`25036`)
 -
 
 .. _whatsnew_0.241.contributors:
diff --git a/pandas/core/frame.py b/pandas/core/frame.py
index 2049a8aa960bf..78c9f2aa96472 100644
--- a/pandas/core/frame.py
+++ b/pandas/core/frame.py
@@ -17,6 +17,7 @@
 import itertools
 import sys
 import warnings
+from distutils.version import LooseVersion
 from textwrap import dedent
 
 import numpy as np
@@ -646,9 +647,15 @@ def _repr_html_(self):
         # XXX: In IPython 3.x and above, the Qt console will not attempt to
         # display HTML, so this check can be removed when support for
         # IPython 2.x is no longer needed.
-        if console.in_qtconsole():
-            # 'HTML output is disabled in QtConsole'
-            return None
+        try:
+            import IPython
+        except ImportError:
+            pass
+        else:
+            if LooseVersion(IPython.__version__) < LooseVersion('3.0'):
+                if console.in_qtconsole():
+                    # 'HTML output is disabled in QtConsole'
+                    return None
 
         if self._info_repr():
             buf = StringIO(u(""))
diff --git a/pandas/tests/io/formats/test_format.py b/pandas/tests/io/formats/test_format.py
index 5d922ccaf1fd5..b0cf5a2f17609 100644
--- a/pandas/tests/io/formats/test_format.py
+++ b/pandas/tests/io/formats/test_format.py
@@ -12,6 +12,7 @@
 import os
 import re
 import sys
+import textwrap
 import warnings
 
 import dateutil
@@ -2777,3 +2778,17 @@ def test_format_percentiles():
         fmt.format_percentiles([2, 0.1, 0.5])
     with pytest.raises(ValueError, match=msg):
         fmt.format_percentiles([0.1, 0.5, 'a'])
+
+
+def test_repr_html_ipython_config(ip):
+    code = textwrap.dedent("""\
+    import pandas as pd
+    df = pd.DataFrame({"A": [1, 2]})
+    df._repr_html_()
+
+    cfg = get_ipython().config
+    cfg['IPKernelApp']['parent_appname']
+    df._repr_html_()
+    """)
+    result = ip.run_cell(code)
+    assert not result.error_in_exec
 
 | 
	I've verified this manually with qtconsole 4.4.0, but if others want to check that'd be helpful.

What release should this be done in? 0.24.1, 0.24.2 or 0.25.0?
Closes https://github.com/pandas-dev/pandas/issues/25036 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25039 
 | 
	2019-01-30T20:23:35Z 
 | 
	2019-01-31T16:02:38Z 
 | 
	2019-01-31T16:02:37Z 
 | 
	2019-01-31T16:02:38Z 
 | 
					
	DOC: fix error in documentation #24981 
 | 
	diff --git a/doc/source/user_guide/groupby.rst b/doc/source/user_guide/groupby.rst
index 953f40d1afebe..2c2e5c5425216 100644
--- a/doc/source/user_guide/groupby.rst
+++ b/doc/source/user_guide/groupby.rst
@@ -15,7 +15,7 @@ steps:
 
 Out of these, the split step is the most straightforward. In fact, in many
 situations we may wish to split the data set into groups and do something with
-those groups. In the apply step, we might wish to one of the
+those groups. In the apply step, we might wish to do one of the
 following:
 
 * **Aggregation**: compute a summary statistic (or statistics) for each
 
 | 
	Added "do" in the last sentence of the second paragraph.
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25038 
 | 
	2019-01-30T20:06:46Z 
 | 
	2019-01-30T21:56:44Z 
 | 
	2019-01-30T21:56:44Z 
 | 
	2019-01-30T21:56:47Z 
 | 
					
	DOC: Example from docstring was proposing wrong interpolation order 
 | 
	diff --git a/pandas/core/generic.py b/pandas/core/generic.py
index a351233a77465..cff685c2ad7cb 100644
--- a/pandas/core/generic.py
+++ b/pandas/core/generic.py
@@ -6601,7 +6601,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
               'barycentric', 'polynomial': Passed to
               `scipy.interpolate.interp1d`. Both 'polynomial' and 'spline'
               require that you also specify an `order` (int),
-              e.g. ``df.interpolate(method='polynomial', order=4)``.
+              e.g. ``df.interpolate(method='polynomial', order=5)``.
               These use the numerical values of the index.
             * 'krogh', 'piecewise_polynomial', 'spline', 'pchip', 'akima':
               Wrappers around the SciPy interpolation methods of similar
 
 | 
	Currently doctring explaining interpolation proposes using polynomial interpolation with order equal to 4. Unfortunately, scipy does not allow that value to be used, throwing an ValueError from here: https://github.com/scipy/scipy/blob/5875fd397eb4e6adcfa0c65f7b9006424c066cb0/scipy/interpolate/_bsplines.py#L583
Looking at the blame, last edit was 5 years ago so that rather do not depend on any reasonable scipy version.
Interpolations with order equal to 2 that are spread around docstrings (and doctests) do not pass through the method throwing that exception so they are okay.
- [-] closes #xxxx
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [-] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25035 
 | 
	2019-01-30T17:55:28Z 
 | 
	2019-01-31T12:25:55Z 
 | 
	2019-01-31T12:25:55Z 
 | 
	2019-01-31T12:25:57Z 
 | 
					
	BUG: Fixed merging on tz-aware 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 57fdff041db28..047404e93914b 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -23,6 +23,7 @@ Fixed Regressions
 - Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
 - Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
 - Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`).
+- Fixed regression in :func:`merge` when merging an empty ``DataFrame`` with multiple timezone-aware columns on one of the timezone-aware columns (:issue:`25014`).
 
 .. _whatsnew_0241.enhancements:
 
diff --git a/pandas/core/internals/concat.py b/pandas/core/internals/concat.py
index 4a16707a376e9..640587b7f9f31 100644
--- a/pandas/core/internals/concat.py
+++ b/pandas/core/internals/concat.py
@@ -183,7 +183,7 @@ def get_reindexed_values(self, empty_dtype, upcasted_na):
                         is_datetime64tz_dtype(empty_dtype)):
                     if self.block is None:
                         array = empty_dtype.construct_array_type()
-                        return array(np.full(self.shape[1], fill_value),
+                        return array(np.full(self.shape[1], fill_value.value),
                                      dtype=empty_dtype)
                     pass
                 elif getattr(self.block, 'is_categorical', False):
@@ -335,8 +335,10 @@ def get_empty_dtype_and_na(join_units):
     elif 'category' in upcast_classes:
         return np.dtype(np.object_), np.nan
     elif 'datetimetz' in upcast_classes:
+        # GH-25014. We use NaT instead of iNaT, since this eventually
+        # ends up in DatetimeArray.take, which does not allow iNaT.
         dtype = upcast_classes['datetimetz']
-        return dtype[0], tslibs.iNaT
+        return dtype[0], tslibs.NaT
     elif 'datetime' in upcast_classes:
         return np.dtype('M8[ns]'), tslibs.iNaT
     elif 'timedelta' in upcast_classes:
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index c17c301968269..a0a20d1da6cef 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -616,6 +616,24 @@ def test_merge_on_datetime64tz(self):
         assert result['value_x'].dtype == 'datetime64[ns, US/Eastern]'
         assert result['value_y'].dtype == 'datetime64[ns, US/Eastern]'
 
+    def test_merge_on_datetime64tz_empty(self):
+        # https://github.com/pandas-dev/pandas/issues/25014
+        dtz = pd.DatetimeTZDtype(tz='UTC')
+        right = pd.DataFrame({'date': [pd.Timestamp('2018', tz=dtz.tz)],
+                              'value': [4.0],
+                              'date2': [pd.Timestamp('2019', tz=dtz.tz)]},
+                             columns=['date', 'value', 'date2'])
+        left = right[:0]
+        result = left.merge(right, on='date')
+        expected = pd.DataFrame({
+            'value_x': pd.Series(dtype=float),
+            'date2_x': pd.Series(dtype=dtz),
+            'date': pd.Series(dtype=dtz),
+            'value_y': pd.Series(dtype=float),
+            'date2_y': pd.Series(dtype=dtz),
+        }, columns=['value_x', 'date2_x', 'date', 'value_y', 'date2_y'])
+        tm.assert_frame_equal(result, expected)
+
     def test_merge_datetime64tz_with_dst_transition(self):
         # GH 18885
         df1 = pd.DataFrame(pd.date_range(
 
 | 
	Closes https://github.com/pandas-dev/pandas/issues/25014
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25033 
 | 
	2019-01-30T16:05:41Z 
 | 
	2019-01-30T21:17:31Z 
 | 
	2019-01-30T21:17:31Z 
 | 
	2019-01-30T21:17:35Z 
 | 
					
	(Closes #25029) Removed extra bracket from cheatsheet code example. 
 | 
	diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf
index 696ed288cf7a6..d50896dc5ccc5 100644
Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pdf and b/doc/cheatsheet/Pandas_Cheat_Sheet.pdf differ
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx
index f8b98a6f1f8e4..95f2771017db5 100644
Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet.pptx and b/doc/cheatsheet/Pandas_Cheat_Sheet.pptx differ
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf
index daa65a944e68a..05e4b87f6a210 100644
Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pdf differ
diff --git a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx
index 6270a71e20ee8..cb0f058db5448 100644
Binary files a/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx and b/doc/cheatsheet/Pandas_Cheat_Sheet_JA.pptx differ
 
 | 
	Closes #25029
There was an additional bracket present under the "Create DataFrame with a MultiIndex" code example.
I removed this in both the English and Japanese versions of the cheatsheet. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25032 
 | 
	2019-01-30T15:58:02Z 
 | 
	2019-02-09T17:26:39Z 
 | 
	2019-02-09T17:26:39Z 
 | 
	2019-02-09T17:26:42Z 
 | 
					
	ENH: Support index=True for io.sql.get_schema 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 222963a7ff71a..0923b05d41479 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -31,7 +31,6 @@ Fixed Regressions
 Enhancements
 ^^^^^^^^^^^^
 
-
 .. _whatsnew_0241.bug_fixes:
 
 Bug Fixes
diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 939fb8b9415bd..052f052420e41 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -165,7 +165,7 @@ MultiIndex
 I/O
 ^^^
 
--
+- :func:`get_schema` now accepts an `index` parameter (default: `False`) that includes the index in the generated schema. (:issue:`9084`)
 -
 -
 
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index aaface5415384..7e4cefddc2746 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -1223,8 +1223,9 @@ def drop_table(self, table_name, schema=None):
             self.get_table(table_name, schema).drop()
             self.meta.clear()
 
-    def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
-        table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
+    def _create_sql_schema(self, frame, table_name, keys=None, dtype=None,
+                           index=False):
+        table = SQLTable(table_name, self, frame=frame, index=index, keys=keys,
                          dtype=dtype)
         return str(table.sql_schema())
 
@@ -1565,13 +1566,14 @@ def drop_table(self, name, schema=None):
             name=_get_valid_sqlite_name(name))
         self.execute(drop_sql)
 
-    def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
-        table = SQLiteTable(table_name, self, frame=frame, index=False,
+    def _create_sql_schema(self, frame, table_name, keys=None, dtype=None,
+                           index=False):
+        table = SQLiteTable(table_name, self, frame=frame, index=index,
                             keys=keys, dtype=dtype)
         return str(table.sql_schema())
 
 
-def get_schema(frame, name, keys=None, con=None, dtype=None):
+def get_schema(frame, name, keys=None, con=None, dtype=None, index=False):
     """
     Get the SQL db table schema for the given frame.
 
@@ -1589,8 +1591,11 @@ def get_schema(frame, name, keys=None, con=None, dtype=None):
     dtype : dict of column name to SQL type, default None
         Optional specifying the datatype for columns. The SQL type should
         be a SQLAlchemy type, or a string for sqlite3 fallback connection.
+    index : boolean, default False
+        Whether to include DataFrame index as a column
 
     """
 
     pandas_sql = pandasSQL_builder(con=con)
-    return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
+    return pandas_sql._create_sql_schema(
+        frame, name, keys=keys, dtype=dtype, index=index)
diff --git a/pandas/tests/io/test_sql.py b/pandas/tests/io/test_sql.py
index 75a6d8d009083..e37921441596b 100644
--- a/pandas/tests/io/test_sql.py
+++ b/pandas/tests/io/test_sql.py
@@ -823,6 +823,21 @@ def test_get_schema_keys(self):
         constraint_sentence = 'CONSTRAINT test_pk PRIMARY KEY ("A", "B")'
         assert constraint_sentence in create_sql
 
+    @pytest.mark.parametrize("index_arg, expected", [
+        ({}, False),
+        ({"index": False}, False),
+        ({"index": True}, True),
+    ])
+    def test_get_schema_with_index(self, index_arg, expected):
+        frame = DataFrame({
+            'one': pd.Series([1, 2, 3], index=['a', 'b', 'c']),
+            'two': pd.Series([1, 2, 3], index=['a', 'b', 'c'])
+        })
+        frame.index.name = 'alphabet'
+
+        create_sql = sql.get_schema(frame, 'test', con=self.conn, **index_arg)
+        assert ('alphabet' in create_sql) == expected
+
     def test_chunksize_read(self):
         df = DataFrame(np.random.randn(22, 5), columns=list('abcde'))
         df.to_sql('test_chunksize', self.conn, index=False)
 
 | 
	Closes pandas-dev/pandas#9084
- Decided to keep the default as `index=False` to keep the API consistent. `to_sql` has `index=True`.
- Tempted to name the parameter `include_dataframe_index` as "index" has
a different meaning in a SQL context.
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25030 
 | 
	2019-01-30T15:46:49Z 
 | 
	2019-05-03T05:37:19Z 
 | null  | 
	2019-05-03T05:37:20Z 
 | 
					
	CLN: typo fixups 
 | 
	diff --git a/pandas/_libs/interval.pyx b/pandas/_libs/interval.pyx
index 3147f36dcc835..eb511b1adb28a 100644
--- a/pandas/_libs/interval.pyx
+++ b/pandas/_libs/interval.pyx
@@ -18,7 +18,6 @@ cnp.import_array()
 
 
 cimport pandas._libs.util as util
-util.import_array()
 
 from pandas._libs.hashtable cimport Int64Vector, Int64VectorData
 
diff --git a/pandas/_libs/tslibs/nattype.pyx b/pandas/_libs/tslibs/nattype.pyx
index a55d15a7c4e85..92cbcce6c7042 100644
--- a/pandas/_libs/tslibs/nattype.pyx
+++ b/pandas/_libs/tslibs/nattype.pyx
@@ -382,7 +382,7 @@ class NaTType(_NaT):
     )
     combine = _make_error_func('combine',  # noqa:E128
         """
-        Timsetamp.combine(date, time)
+        Timestamp.combine(date, time)
 
         date, time -> datetime with same date and time fields
         """
diff --git a/pandas/_libs/tslibs/timestamps.pyx b/pandas/_libs/tslibs/timestamps.pyx
index fe0564cb62c30..3e6763e226a4a 100644
--- a/pandas/_libs/tslibs/timestamps.pyx
+++ b/pandas/_libs/tslibs/timestamps.pyx
@@ -197,7 +197,7 @@ def round_nsint64(values, mode, freq):
 
 # This is PITA. Because we inherit from datetime, which has very specific
 # construction requirements, we need to do object instantiation in python
-# (see Timestamp class above). This will serve as a C extension type that
+# (see Timestamp class below). This will serve as a C extension type that
 # shadows the python class, where we do any heavy lifting.
 cdef class _Timestamp(datetime):
 
@@ -670,7 +670,7 @@ class Timestamp(_Timestamp):
     @classmethod
     def combine(cls, date, time):
         """
-        Timsetamp.combine(date, time)
+        Timestamp.combine(date, time)
 
         date, time -> datetime with same date and time fields
         """
diff --git a/pandas/core/internals/blocks.py b/pandas/core/internals/blocks.py
index df764aa4ba666..36144c31dfef9 100644
--- a/pandas/core/internals/blocks.py
+++ b/pandas/core/internals/blocks.py
@@ -2072,17 +2072,9 @@ def get_values(self, dtype=None):
         return object dtype as boxed values, such as Timestamps/Timedelta
         """
         if is_object_dtype(dtype):
-            values = self.values
-
-            if self.ndim > 1:
-                values = values.ravel()
-
-            values = lib.map_infer(values, self._box_func)
-
-            if self.ndim > 1:
-                values = values.reshape(self.values.shape)
-
-            return values
+            values = self.values.ravel()
+            result = self._holder(values).astype(object)
+            return result.reshape(self.values.shape)
         return self.values
 
 
 
 | 
	Also edit DatetimeLikeBlockMixin.get_values to be much simpler. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25028 
 | 
	2019-01-30T14:57:35Z 
 | 
	2019-01-31T12:27:49Z 
 | 
	2019-01-31T12:27:49Z 
 | 
	2020-04-05T17:36:54Z 
 | 
					
	DOC: 0.24.1 whatsnew 
 | 
	diff --git a/doc/source/index.rst.template b/doc/source/index.rst.template
index 51487c0d325b5..df2a29a76f3c5 100644
--- a/doc/source/index.rst.template
+++ b/doc/source/index.rst.template
@@ -39,7 +39,7 @@ See the :ref:`overview` for more detail about what's in the library.
 {% endif %}
 
     {% if not single_doc -%}
-    What's New in 0.24.0 <whatsnew/v0.24.0>
+    What's New in 0.24.1 <whatsnew/v0.24.1>
     install
     getting_started/index
     user_guide/index
 
 | 
	This PR has the documentation changes that are just for 0.24.x. I'll have another PR later with changes to 0.24.1.rst that should go to master first, before being backported. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25027 
 | 
	2019-01-30T14:52:06Z 
 | 
	2019-02-01T20:09:00Z 
 | 
	2019-02-01T20:09:00Z 
 | 
	2019-02-01T20:09:03Z 
 | 
					
	DOC: Start 0.24.2.rst 
 | 
	diff --git a/doc/source/whatsnew/v0.24.2.rst b/doc/source/whatsnew/v0.24.2.rst
new file mode 100644
index 0000000000000..cba21ce7ee1e6
--- /dev/null
+++ b/doc/source/whatsnew/v0.24.2.rst
@@ -0,0 +1,99 @@
+:orphan:
+
+.. _whatsnew_0242:
+
+Whats New in 0.24.2 (February XX, 2019)
+---------------------------------------
+
+.. warning::
+
+   The 0.24.x series of releases will be the last to support Python 2. Future feature
+   releases will support Python 3 only. See :ref:`install.dropping-27` for more.
+
+{{ header }}
+
+These are the changes in pandas 0.24.2. See :ref:`release` for a full changelog
+including other versions of pandas.
+
+.. _whatsnew_0242.regressions:
+
+Fixed Regressions
+^^^^^^^^^^^^^^^^^
+
+-
+-
+-
+
+.. _whatsnew_0242.enhancements:
+
+Enhancements
+^^^^^^^^^^^^
+
+-
+-
+
+.. _whatsnew_0242.bug_fixes:
+
+Bug Fixes
+~~~~~~~~~
+
+**Conversion**
+
+-
+-
+-
+
+**Indexing**
+
+-
+-
+-
+
+**I/O**
+
+-
+-
+-
+
+**Categorical**
+
+-
+-
+-
+
+**Timezones**
+
+-
+-
+-
+
+**Timedelta**
+
+-
+-
+-
+
+**Reshaping**
+
+-
+-
+-
+
+**Visualization**
+
+-
+-
+-
+
+**Other**
+
+-
+-
+-
+
+.. _whatsnew_0.242.contributors:
+
+Contributors
+~~~~~~~~~~~~
+
+.. contributors:: v0.24.1..v0.24.2
\ No newline at end of file
 
 | 
	[ci skip]
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25026 
 | 
	2019-01-30T14:18:55Z 
 | 
	2019-02-01T12:27:16Z 
 | 
	2019-02-01T12:27:16Z 
 | 
	2019-02-01T12:27:16Z 
 | 
					
	Backport PR #24961 on branch 0.24.x (fix+test to_timedelta('NaT', box=False)) 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 8f4c3982c745f..82885f851e86b 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -65,7 +65,7 @@ Bug Fixes
 -
 
 **Timedelta**
-
+- Bug in :func:`to_timedelta` with `box=False` incorrectly returning a ``datetime64`` object instead of a ``timedelta64`` object (:issue:`24961`)
 -
 -
 -
diff --git a/pandas/core/tools/timedeltas.py b/pandas/core/tools/timedeltas.py
index e3428146b91d8..ddd21d0f62d08 100644
--- a/pandas/core/tools/timedeltas.py
+++ b/pandas/core/tools/timedeltas.py
@@ -120,7 +120,8 @@ def _coerce_scalar_to_timedelta_type(r, unit='ns', box=True, errors='raise'):
     try:
         result = Timedelta(r, unit)
         if not box:
-            result = result.asm8
+            # explicitly view as timedelta64 for case when result is pd.NaT
+            result = result.asm8.view('timedelta64[ns]')
     except ValueError:
         if errors == 'raise':
             raise
diff --git a/pandas/tests/scalar/timedelta/test_timedelta.py b/pandas/tests/scalar/timedelta/test_timedelta.py
index 9b5fdfb06a9fa..e1838e0160fec 100644
--- a/pandas/tests/scalar/timedelta/test_timedelta.py
+++ b/pandas/tests/scalar/timedelta/test_timedelta.py
@@ -309,8 +309,13 @@ def test_iso_conversion(self):
         assert to_timedelta('P0DT0H0M1S') == expected
 
     def test_nat_converters(self):
-        assert to_timedelta('nat', box=False).astype('int64') == iNaT
-        assert to_timedelta('nan', box=False).astype('int64') == iNaT
+        result = to_timedelta('nat', box=False)
+        assert result.dtype.kind == 'm'
+        assert result.astype('int64') == iNaT
+
+        result = to_timedelta('nan', box=False)
+        assert result.dtype.kind == 'm'
+        assert result.astype('int64') == iNaT
 
     @pytest.mark.parametrize('units, np_unit',
                              [(['Y', 'y'], 'Y'),
 
 | 
	Backport PR #24961: fix+test to_timedelta('NaT', box=False) 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25025 
 | 
	2019-01-30T12:43:22Z 
 | 
	2019-01-30T13:18:39Z 
 | 
	2019-01-30T13:18:39Z 
 | 
	2019-01-30T13:20:24Z 
 | 
					
	REGR: fix read_sql delegation for queries on MySQL/pymysql 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 828c35c10e958..defb84f438e3a 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -22,6 +22,7 @@ Fixed Regressions
 
 - Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
 - Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
+- Fixed regression in :func:`read_sql` when passing certain queries with MySQL/pymysql (:issue:`24988`).
 - Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`).
 
 .. _whatsnew_0241.enhancements:
diff --git a/pandas/io/sql.py b/pandas/io/sql.py
index 5d1163b3e0024..aaface5415384 100644
--- a/pandas/io/sql.py
+++ b/pandas/io/sql.py
@@ -381,7 +381,8 @@ def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
 
     try:
         _is_table_name = pandas_sql.has_table(sql)
-    except (ImportError, AttributeError):
+    except Exception:
+        # using generic exception to catch errors from sql drivers (GH24988)
         _is_table_name = False
 
     if _is_table_name:
 
 | 
	Closes #24988, see discussion there regarding lack of test. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25024 
 | 
	2019-01-30T09:49:43Z 
 | 
	2019-01-31T21:24:58Z 
 | 
	2019-01-31T21:24:57Z 
 | 
	2019-01-31T21:24:58Z 
 | 
					
	BUG: to_datetime(strs, utc=True) used previous UTC offset 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index 867007b2ba7f5..24e3b42859416 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -103,7 +103,7 @@ Timedelta
 Timezones
 ^^^^^^^^^
 
--
+- Bug in :func:`to_datetime` with ``utc=True`` and datetime strings that would apply previously parsed UTC offsets to subsequent arguments (:issue:`24992`)
 -
 -
 
diff --git a/pandas/_libs/tslib.pyx b/pandas/_libs/tslib.pyx
index 798e338d5581b..f932e236b5218 100644
--- a/pandas/_libs/tslib.pyx
+++ b/pandas/_libs/tslib.pyx
@@ -645,6 +645,8 @@ cpdef array_to_datetime(ndarray[object] values, str errors='raise',
                             out_tzoffset_vals.add(out_tzoffset * 60.)
                             tz = pytz.FixedOffset(out_tzoffset)
                             value = tz_convert_single(value, tz, UTC)
+                            out_local = 0
+                            out_tzoffset = 0
                         else:
                             # Add a marker for naive string, to track if we are
                             # parsing mixed naive and aware strings
diff --git a/pandas/tests/indexes/datetimes/test_tools.py b/pandas/tests/indexes/datetimes/test_tools.py
index 38f5eab15041f..b94935d2521eb 100644
--- a/pandas/tests/indexes/datetimes/test_tools.py
+++ b/pandas/tests/indexes/datetimes/test_tools.py
@@ -714,6 +714,29 @@ def test_iso_8601_strings_with_different_offsets(self):
                                   NaT], tz='UTC')
         tm.assert_index_equal(result, expected)
 
+    def test_iss8601_strings_mixed_offsets_with_naive(self):
+        # GH 24992
+        result = pd.to_datetime([
+            '2018-11-28T00:00:00',
+            '2018-11-28T00:00:00+12:00',
+            '2018-11-28T00:00:00',
+            '2018-11-28T00:00:00+06:00',
+            '2018-11-28T00:00:00'
+        ], utc=True)
+        expected = pd.to_datetime([
+            '2018-11-28T00:00:00',
+            '2018-11-27T12:00:00',
+            '2018-11-28T00:00:00',
+            '2018-11-27T18:00:00',
+            '2018-11-28T00:00:00'
+        ], utc=True)
+        tm.assert_index_equal(result, expected)
+
+        items = ['2018-11-28T00:00:00+12:00', '2018-11-28T00:00:00']
+        result = pd.to_datetime(items, utc=True)
+        expected = pd.to_datetime(list(reversed(items)), utc=True)[::-1]
+        tm.assert_index_equal(result, expected)
+
     def test_non_iso_strings_with_tz_offset(self):
         result = to_datetime(['March 1, 2018 12:00:00+0400'] * 2)
         expected = DatetimeIndex([datetime(2018, 3, 1, 12,
 
 | 
	- [x] closes #24992
- [x] tests added / passed
- [x] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [x] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25020 
 | 
	2019-01-30T07:04:27Z 
 | 
	2019-01-31T12:29:33Z 
 | 
	2019-01-31T12:29:32Z 
 | 
	2019-01-31T15:59:28Z 
 | 
					
	CLN: do not use .repeat asv setting for storing benchmark data 
 | 
	diff --git a/asv_bench/benchmarks/strings.py b/asv_bench/benchmarks/strings.py
index e9f2727f64e15..b5b2c955f0133 100644
--- a/asv_bench/benchmarks/strings.py
+++ b/asv_bench/benchmarks/strings.py
@@ -102,10 +102,10 @@ def setup(self, repeats):
         N = 10**5
         self.s = Series(tm.makeStringIndex(N))
         repeat = {'int': 1, 'array': np.random.randint(1, 3, N)}
-        self.repeat = repeat[repeats]
+        self.values = repeat[repeats]
 
     def time_repeat(self, repeats):
-        self.s.str.repeat(self.repeat)
+        self.s.str.repeat(self.values)
 
 
 class Cat(object):
 
 | 
	`asv` uses `.repeat` to specify the number of times a benchmark should be repeated; our `strings.Replace` benchmark inadvertently uses this to store benchmark data. This doesn't cause issues until after the first parameter:
```
[ 99.87%] ··· strings.Repeat.time_repeat                                                                                                                                                          1/2 failed
[ 99.87%] ··· ========= ===========
               repeats             
              --------- -----------
                 int     151±0.9ms 
                array      failed  
              ========= ===========
[ 99.87%] ···· For parameters: 'array'
               Traceback (most recent call last):
                 File "/home/chris/code/asv/asv/benchmark.py", line 595, in run
                   min_repeat, max_repeat, max_time = self.repeat
               ValueError: too many values to unpack (expected 3)
               
               During handling of the above exception, another exception occurred:
               
               Traceback (most recent call last):
                 File "/home/chris/code/asv/asv/benchmark.py", line 1170, in main_run_server
                   main_run(run_args)
                 File "/home/chris/code/asv/asv/benchmark.py", line 1044, in main_run
                   result = benchmark.do_run()
                 File "/home/chris/code/asv/asv/benchmark.py", line 523, in do_run
                   return self.run(*self._current_params)
                 File "/home/chris/code/asv/asv/benchmark.py", line 597, in run
                   if self.repeat == 0:
               ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
```
With this PR, both parameters now succeed:
```
[  0.00%] · For pandas commit 8825f78e <repeat>:
[  0.00%] ·· Benchmarking conda-py3.6-Cython-matplotlib-numexpr-numpy-openpyxl-pytables-pytest-scipy-sqlalchemy-xlrd-xlsxwriter-xlwt
[ 50.00%] ··· Running (strings.Repeat.time_repeat--).
[100.00%] ··· strings.Repeat.time_repeat                                                                                                                                                                  ok
[100.00%] ··· ========= ===========
               repeats             
              --------- -----------
                 int      152±1ms  
                array    150±0.6ms 
              ========= ===========
```
- [ ] closes #xxxx
- [ ] tests added / passed
- [ ] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25015 
 | 
	2019-01-29T23:21:55Z 
 | 
	2019-01-30T05:48:52Z 
 | 
	2019-01-30T05:48:52Z 
 | 
	2019-01-30T05:48:59Z 
 | 
					
	Backport PR #24967 on branch 0.24.x (REGR: Preserve order by default in Index.difference) 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 8f4c3982c745f..828c35c10e958 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -22,6 +22,7 @@ Fixed Regressions
 
 - Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
 - Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
+- Fixed regression in :class:`Index.intersection` incorrectly sorting the values by default (:issue:`24959`).
 
 .. _whatsnew_0241.enhancements:
 
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 767da81c5c43a..3d176012df22b 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2333,7 +2333,7 @@ def union(self, other, sort=True):
     def _wrap_setop_result(self, other, result):
         return self._constructor(result, name=get_op_result_name(self, other))
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two Index objects.
 
@@ -2342,11 +2342,15 @@ def intersection(self, other, sort=True):
         Parameters
         ----------
         other : Index or array-like
-        sort : bool, default True
+        sort : bool, default False
             Sort the resulting index if possible
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         intersection : Index
diff --git a/pandas/core/indexes/datetimes.py b/pandas/core/indexes/datetimes.py
index cc373c06efcc9..ef941ab87ba12 100644
--- a/pandas/core/indexes/datetimes.py
+++ b/pandas/core/indexes/datetimes.py
@@ -594,7 +594,7 @@ def _wrap_setop_result(self, other, result):
         name = get_op_result_name(self, other)
         return self._shallow_copy(result, name=name, freq=None, tz=self.tz)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Specialized intersection for DatetimeIndex objects. May be much faster
         than Index.intersection
@@ -602,6 +602,14 @@ def intersection(self, other, sort=True):
         Parameters
         ----------
         other : DatetimeIndex or array-like
+        sort : bool, default True
+            Sort the resulting index if possible.
+
+            .. versionadded:: 0.24.0
+
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
 
         Returns
         -------
diff --git a/pandas/core/indexes/interval.py b/pandas/core/indexes/interval.py
index 0210560aaa21f..736de94991181 100644
--- a/pandas/core/indexes/interval.py
+++ b/pandas/core/indexes/interval.py
@@ -1093,8 +1093,8 @@ def equals(self, other):
     def overlaps(self, other):
         return self._data.overlaps(other)
 
-    def _setop(op_name):
-        def func(self, other, sort=True):
+    def _setop(op_name, sort=True):
+        def func(self, other, sort=sort):
             other = self._as_like_interval_index(other)
 
             # GH 19016: ensure set op will not return a prohibited dtype
@@ -1128,7 +1128,7 @@ def is_all_dates(self):
         return False
 
     union = _setop('union')
-    intersection = _setop('intersection')
+    intersection = _setop('intersection', sort=False)
     difference = _setop('difference')
     symmetric_difference = _setop('symmetric_difference')
 
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index e4d01a40bd181..16af3fe8eef26 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2910,7 +2910,7 @@ def union(self, other, sort=True):
         return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
                                       names=result_names)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two MultiIndex objects.
 
@@ -2922,6 +2922,10 @@ def intersection(self, other, sort=True):
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         Index
diff --git a/pandas/core/indexes/range.py b/pandas/core/indexes/range.py
index ebf5b279563cf..e17a6a682af40 100644
--- a/pandas/core/indexes/range.py
+++ b/pandas/core/indexes/range.py
@@ -343,7 +343,7 @@ def equals(self, other):
 
         return super(RangeIndex, self).equals(other)
 
-    def intersection(self, other, sort=True):
+    def intersection(self, other, sort=False):
         """
         Form the intersection of two Index objects.
 
@@ -355,6 +355,10 @@ def intersection(self, other, sort=True):
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default from ``True`` to ``False``.
+
         Returns
         -------
         intersection : Index
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index f3e9d835c7391..20e439de46bde 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -765,6 +765,11 @@ def test_intersect_str_dates(self, sort):
 
         assert len(result) == 0
 
+    def test_intersect_nosort(self):
+        result = pd.Index(['c', 'b', 'a']).intersection(['b', 'a'])
+        expected = pd.Index(['b', 'a'])
+        tm.assert_index_equal(result, expected)
+
     @pytest.mark.parametrize("sort", [True, False])
     def test_chained_union(self, sort):
         # Chained unions handles names correctly
@@ -1595,20 +1600,27 @@ def test_drop_tuple(self, values, to_drop):
         for drop_me in to_drop[1], [to_drop[1]]:
             pytest.raises(KeyError, removed.drop, drop_me)
 
-    @pytest.mark.parametrize("method,expected", [
+    @pytest.mark.parametrize("method,expected,sort", [
+        ('intersection', np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
+                                  dtype=[('num', int), ('let', 'a1')]),
+         False),
+
         ('intersection', np.array([(1, 'A'), (1, 'B'), (2, 'A'), (2, 'B')],
-                                  dtype=[('num', int), ('let', 'a1')])),
+                                  dtype=[('num', int), ('let', 'a1')]),
+         True),
+
         ('union', np.array([(1, 'A'), (1, 'B'), (1, 'C'), (2, 'A'), (2, 'B'),
-                            (2, 'C')], dtype=[('num', int), ('let', 'a1')]))
+                            (2, 'C')], dtype=[('num', int), ('let', 'a1')]),
+         True)
     ])
-    def test_tuple_union_bug(self, method, expected):
+    def test_tuple_union_bug(self, method, expected, sort):
         index1 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'), (2, 'B')],
                                 dtype=[('num', int), ('let', 'a1')]))
         index2 = Index(np.array([(1, 'A'), (2, 'A'), (1, 'B'),
                                  (2, 'B'), (1, 'C'), (2, 'C')],
                                 dtype=[('num', int), ('let', 'a1')]))
 
-        result = getattr(index1, method)(index2)
+        result = getattr(index1, method)(index2, sort=sort)
         assert result.ndim == 1
 
         expected = Index(expected)
 
 | 
	Backport PR #24967: REGR: Preserve order by default in Index.difference 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25013 
 | 
	2019-01-29T21:43:34Z 
 | 
	2019-01-30T12:50:04Z 
 | 
	2019-01-30T12:50:04Z 
 | 
	2019-01-30T12:50:04Z 
 | 
					
	BUG-24212 fix when other_index has incompatible dtype 
 | 
	diff --git a/doc/source/whatsnew/v0.25.0.rst b/doc/source/whatsnew/v0.25.0.rst
index b2a379d9fe6f5..bb7fdf97c9383 100644
--- a/doc/source/whatsnew/v0.25.0.rst
+++ b/doc/source/whatsnew/v0.25.0.rst
@@ -399,7 +399,7 @@ Reshaping
 ^^^^^^^^^
 
 - Bug in :func:`pandas.merge` adds a string of ``None``, if ``None`` is assigned in suffixes instead of remain the column name as-is (:issue:`24782`).
-- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (:issue:`24212`)
+- Bug in :func:`merge` when merging by index name would sometimes result in an incorrectly numbered index (missing index values are now assigned NA) (:issue:`24212`, :issue:`25009`)
 - :func:`to_records` now accepts dtypes to its ``column_dtypes`` parameter (:issue:`24895`)
 - Bug in :func:`concat` where order of ``OrderedDict`` (and ``dict`` in Python 3.6+) is not respected, when passed in as  ``objs`` argument (:issue:`21510`)
 - Bug in :func:`pivot_table` where columns with ``NaN`` values are dropped even if ``dropna`` argument is ``False``, when the ``aggfunc`` argument contains a ``list`` (:issue:`22159`)
diff --git a/pandas/core/reshape/merge.py b/pandas/core/reshape/merge.py
index 0837186e33267..78309ce9c863c 100644
--- a/pandas/core/reshape/merge.py
+++ b/pandas/core/reshape/merge.py
@@ -803,22 +803,18 @@ def _create_join_index(self, index, other_index, indexer,
         -------
         join_index
         """
-        join_index = index.take(indexer)
         if (self.how in (how, 'outer') and
                 not isinstance(other_index, MultiIndex)):
             # if final index requires values in other_index but not target
             # index, indexer may hold missing (-1) values, causing Index.take
-            # to take the final value in target index
+            # to take the final value in target index. So, we set the last
+            # element to be the desired fill value. We do not use allow_fill
+            # and fill_value because it throws a ValueError on integer indices
             mask = indexer == -1
             if np.any(mask):
-                # if values missing (-1) from target index,
-                # take from other_index instead
-                join_list = join_index.to_numpy()
-                other_list = other_index.take(other_indexer).to_numpy()
-                join_list[mask] = other_list[mask]
-                join_index = Index(join_list, dtype=join_index.dtype,
-                                   name=join_index.name)
-        return join_index
+                fill_value = na_value_for_dtype(index.dtype, compat=False)
+                index = index.append(Index([fill_value]))
+        return index.take(indexer)
 
     def _get_merge_keys(self):
         """
diff --git a/pandas/tests/reshape/merge/test_merge.py b/pandas/tests/reshape/merge/test_merge.py
index b4a58628faa4d..8bc68cc7f8fc2 100644
--- a/pandas/tests/reshape/merge/test_merge.py
+++ b/pandas/tests/reshape/merge/test_merge.py
@@ -15,7 +15,8 @@
 import pandas as pd
 from pandas import (
     Categorical, CategoricalIndex, DataFrame, DatetimeIndex, Float64Index,
-    Int64Index, MultiIndex, RangeIndex, Series, UInt64Index)
+    Int64Index, IntervalIndex, MultiIndex, PeriodIndex, RangeIndex, Series,
+    TimedeltaIndex, UInt64Index)
 from pandas.api.types import CategoricalDtype as CDT
 from pandas.core.reshape.concat import concat
 from pandas.core.reshape.merge import MergeError, merge
@@ -1034,11 +1035,30 @@ def test_merge_two_empty_df_no_division_error(self):
             merge(a, a, on=('a', 'b'))
 
     @pytest.mark.parametrize('how', ['right', 'outer'])
-    def test_merge_on_index_with_more_values(self, how):
+    @pytest.mark.parametrize(
+        'index,expected_index',
+        [(CategoricalIndex([1, 2, 4]),
+          CategoricalIndex([1, 2, 4, None, None, None])),
+         (DatetimeIndex(['2001-01-01', '2002-02-02', '2003-03-03']),
+          DatetimeIndex(['2001-01-01', '2002-02-02', '2003-03-03',
+                         pd.NaT, pd.NaT, pd.NaT])),
+         (Float64Index([1, 2, 3]),
+          Float64Index([1, 2, 3, None, None, None])),
+         (Int64Index([1, 2, 3]),
+          Float64Index([1, 2, 3, None, None, None])),
+         (IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4)]),
+          IntervalIndex.from_tuples([(1, 2), (2, 3), (3, 4),
+                                     np.nan, np.nan, np.nan])),
+         (PeriodIndex(['2001-01-01', '2001-01-02', '2001-01-03'], freq='D'),
+          PeriodIndex(['2001-01-01', '2001-01-02', '2001-01-03',
+                       pd.NaT, pd.NaT, pd.NaT], freq='D')),
+         (TimedeltaIndex(['1d', '2d', '3d']),
+          TimedeltaIndex(['1d', '2d', '3d', pd.NaT, pd.NaT, pd.NaT]))])
+    def test_merge_on_index_with_more_values(self, how, index, expected_index):
         # GH 24212
         # pd.merge gets [0, 1, 2, -1, -1, -1] as left_indexer, ensure that
         # -1 is interpreted as a missing value instead of the last element
-        df1 = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 2, 2]})
+        df1 = pd.DataFrame({'a': [1, 2, 3], 'key': [0, 2, 2]}, index=index)
         df2 = pd.DataFrame({'b': [1, 2, 3, 4, 5]})
         result = df1.merge(df2, left_on='key', right_index=True, how=how)
         expected = pd.DataFrame([[1.0, 0, 1],
@@ -1048,7 +1068,7 @@ def test_merge_on_index_with_more_values(self, how):
                                  [np.nan, 3, 4],
                                  [np.nan, 4, 5]],
                                 columns=['a', 'key', 'b'])
-        expected.set_index(Int64Index([0, 1, 2, 1, 3, 4]), inplace=True)
+        expected.set_index(expected_index, inplace=True)
         assert_frame_equal(result, expected)
 
     def test_merge_right_index_right(self):
@@ -1062,11 +1082,27 @@ def test_merge_right_index_right(self):
                                  'key': [0, 1, 1, 2],
                                  'b': [1, 2, 2, 3]},
                                 columns=['a', 'key', 'b'],
-                                index=[0, 1, 2, 2])
+                                index=[0, 1, 2, np.nan])
         result = left.merge(right, left_on='key', right_index=True,
                             how='right')
         tm.assert_frame_equal(result, expected)
 
+    def test_merge_take_missing_values_from_index_of_other_dtype(self):
+        # GH 24212
+        left = pd.DataFrame({'a': [1, 2, 3],
+                             'key': pd.Categorical(['a', 'a', 'b'],
+                                                   categories=list('abc'))})
+        right = pd.DataFrame({'b': [1, 2, 3]},
+                             index=pd.CategoricalIndex(['a', 'b', 'c']))
+        result = left.merge(right, left_on='key',
+                            right_index=True, how='right')
+        expected = pd.DataFrame({'a': [1, 2, 3, None],
+                                 'key': pd.Categorical(['a', 'a', 'b', 'c']),
+                                 'b': [1, 1, 2, 3]},
+                                index=[0, 1, 2, np.nan])
+        expected = expected.reindex(columns=['a', 'key', 'b'])
+        tm.assert_frame_equal(result, expected)
+
 
 def _check_merge(x, y):
     for how in ['inner', 'left', 'outer']:
 
 | 
	- [X] closes #25001
- [X] tests added / passed
- [X] passes `git diff upstream/master -u -- "*.py" | flake8 --diff`
- [ ] whatsnew entry
Followup to #24916, addresses the case when the other index has an incompatible dtype, so we cannot take directly from it. Currently, this PR ~naively replaces the missing index values with the number of the rows in the other index that caused them~ replaces the missing index values with the appropriate NA value.
~Still working on adding cases when it is possible to combine indices of sparse/categorical dtypes without densifying.~ 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25009 
 | 
	2019-01-29T19:32:15Z 
 | 
	2019-05-05T21:21:55Z 
 | 
	2019-05-05T21:21:55Z 
 | 
	2019-05-05T21:22:00Z 
 | 
					
	require Return section only if return is not None nor commentary 
 | 
	diff --git a/scripts/tests/test_validate_docstrings.py b/scripts/tests/test_validate_docstrings.py
index bb58449843096..6f78b91653a3f 100644
--- a/scripts/tests/test_validate_docstrings.py
+++ b/scripts/tests/test_validate_docstrings.py
@@ -229,6 +229,27 @@ def good_imports(self):
         """
         pass
 
+    def no_returns(self):
+        """
+        Say hello and have no returns.
+        """
+        pass
+
+    def empty_returns(self):
+        """
+        Say hello and always return None.
+
+        Since this function never returns a value, this
+        docstring doesn't need a return section.
+        """
+        def say_hello():
+            return "Hello World!"
+        say_hello()
+        if True:
+            return
+        else:
+            return None
+
 
 class BadGenericDocStrings(object):
     """Everything here has a bad docstring
@@ -783,7 +804,7 @@ def test_good_class(self, capsys):
 
     @pytest.mark.parametrize("func", [
         'plot', 'sample', 'random_letters', 'sample_values', 'head', 'head1',
-        'contains', 'mode', 'good_imports'])
+        'contains', 'mode', 'good_imports', 'no_returns', 'empty_returns'])
     def test_good_functions(self, capsys, func):
         errors = validate_one(self._import_path(
             klass='GoodDocStrings', func=func))['errors']
diff --git a/scripts/validate_docstrings.py b/scripts/validate_docstrings.py
index bce33f7e78daa..446cd60968312 100755
--- a/scripts/validate_docstrings.py
+++ b/scripts/validate_docstrings.py
@@ -26,6 +26,8 @@
 import importlib
 import doctest
 import tempfile
+import ast
+import textwrap
 
 import flake8.main.application
 
@@ -490,9 +492,45 @@ def yields(self):
     @property
     def method_source(self):
         try:
-            return inspect.getsource(self.obj)
+            source = inspect.getsource(self.obj)
         except TypeError:
             return ''
+        return textwrap.dedent(source)
+
+    @property
+    def method_returns_something(self):
+        '''
+        Check if the docstrings method can return something.
+
+        Bare returns, returns valued None and returns from nested functions are
+        disconsidered.
+
+        Returns
+        -------
+        bool
+            Whether the docstrings method can return something.
+        '''
+
+        def get_returns_not_on_nested_functions(node):
+            returns = [node] if isinstance(node, ast.Return) else []
+            for child in ast.iter_child_nodes(node):
+                # Ignore nested functions and its subtrees.
+                if not isinstance(child, ast.FunctionDef):
+                    child_returns = get_returns_not_on_nested_functions(child)
+                    returns.extend(child_returns)
+            return returns
+
+        tree = ast.parse(self.method_source).body
+        if tree:
+            returns = get_returns_not_on_nested_functions(tree[0])
+            return_values = [r.value for r in returns]
+            # Replace NameConstant nodes valued None for None.
+            for i, v in enumerate(return_values):
+                if isinstance(v, ast.NameConstant) and v.value is None:
+                    return_values[i] = None
+            return any(return_values)
+        else:
+            return False
 
     @property
     def first_line_ends_in_dot(self):
@@ -691,7 +729,7 @@ def get_validation_data(doc):
 
     if doc.is_function_or_method:
         if not doc.returns:
-            if 'return' in doc.method_source:
+            if doc.method_returns_something:
                 errs.append(error('RT01'))
         else:
             if len(doc.returns) == 1 and doc.returns[0][1]:
 
 | 
	- [ ] closes #23488 
Updated return lookup at source in validate_docstrings.py:
- ignore "return None"
- ignore empty return
- ignore the word "return" on commentaries
Updated test_validate_docstrings.py:
- added a test which contains the "returns" listed above and has a valid docstring with no Return section 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25008 
 | 
	2019-01-29T18:23:09Z 
 | 
	2019-03-11T12:02:05Z 
 | 
	2019-03-11T12:02:04Z 
 | 
	2019-03-11T12:02:05Z 
 | 
					
	API: Change default for Index.union sort 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 047404e93914b..948350df140eb 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -15,10 +15,84 @@ Whats New in 0.24.1 (February XX, 2019)
 These are the changes in pandas 0.24.1. See :ref:`release` for a full changelog
 including other versions of pandas.
 
+.. _whatsnew_0241.api:
+
+API Changes
+~~~~~~~~~~~
+
+Changing the ``sort`` parameter for :meth:`Index.union`
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+The default ``sort`` value for :meth:`Index.union` has changed from ``True`` to ``None`` (:issue:`24959`).
+The default *behavior* remains the same: The result is sorted, unless
+
+1. ``self`` and ``other`` are identical
+2. ``self`` or ``other`` is empty
+3. ``self`` or ``other`` contain values that can not be compared (a ``RuntimeWarning`` is raised).
+
+This allows ``sort=True`` to now mean "always sort". A ``TypeError`` is raised if the values cannot be compared.
+
+**Behavior in 0.24.0**
+
+.. ipython:: python
+
+   In [1]: idx = pd.Index(['b', 'a'])
+
+   In [2]: idx.union(idx)  # sort=True was the default.
+   Out[2]: Index(['b', 'a'], dtype='object')
+
+   In [3]: idx.union(idx, sort=True)  # result is still not sorted.
+   Out[32]: Index(['b', 'a'], dtype='object')
+
+**New Behavior**
+
+.. ipython:: python
+
+   idx = pd.Index(['b', 'a'])
+   idx.union(idx)  # sort=None is the default. Don't sort identical operands.
+
+   idx.union(idx, sort=True)
+
+The same change applies to :meth:`Index.difference` and :meth:`Index.symmetric_difference`, which
+would previously not sort the result when ``sort=True`` but the values could not be compared.
+
+Changed the behavior of :meth:`Index.intersection` with ``sort=True``
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+When ``sort=True`` is provided to :meth:`Index.intersection`, the values are always sorted. In 0.24.0,
+the values would not be sorted when ``self`` and ``other`` were identical. Pass ``sort=False`` to not
+sort the values. This matches the behavior of pandas 0.23.4 and earlier.
+
+**Behavior in 0.23.4**
+
+.. ipython:: python
+
+   In [2]: idx = pd.Index(['b', 'a'])
+
+   In [3]: idx.intersection(idx)  # sort was not a keyword.
+   Out[3]: Index(['b', 'a'], dtype='object')
+
+**Behavior in 0.24.0**
+
+.. ipython:: python
+
+   In [5]: idx.intersection(idx)  # sort=True by default. Don't sort identical.
+   Out[5]: Index(['b', 'a'], dtype='object')
+
+   In [6]: idx.intersection(idx, sort=True)
+   Out[6]: Index(['b', 'a'], dtype='object')
+
+**New Behavior**
+
+.. ipython:: python
+
+   idx.intersection(idx)  # sort=False by default
+   idx.intersection(idx, sort=True)
+
 .. _whatsnew_0241.regressions:
 
 Fixed Regressions
-^^^^^^^^^^^^^^^^^
+~~~~~~~~~~~~~~~~~
 
 - Bug in :meth:`DataFrame.itertuples` with ``records`` orient raising an ``AttributeError`` when the ``DataFrame`` contained more than 255 columns (:issue:`24939`)
 - Bug in :meth:`DataFrame.itertuples` orient converting integer column names to strings prepended with an underscore (:issue:`24940`)
@@ -28,7 +102,7 @@ Fixed Regressions
 .. _whatsnew_0241.enhancements:
 
 Enhancements
-^^^^^^^^^^^^
+~~~~~~~~~~~~
 
 
 .. _whatsnew_0241.bug_fixes:
diff --git a/pandas/_libs/lib.pyx b/pandas/_libs/lib.pyx
index 4a3440e14ba14..c9473149d8a84 100644
--- a/pandas/_libs/lib.pyx
+++ b/pandas/_libs/lib.pyx
@@ -233,11 +233,14 @@ def fast_unique_multiple(list arrays, sort: bool=True):
             if val not in table:
                 table[val] = stub
                 uniques.append(val)
-    if sort:
+    if sort is None:
         try:
             uniques.sort()
         except Exception:
+            # TODO: RuntimeWarning?
             pass
+    elif sort:
+        uniques.sort()
 
     return uniques
 
diff --git a/pandas/core/indexes/base.py b/pandas/core/indexes/base.py
index 3d176012df22b..12880ed93cc2a 100644
--- a/pandas/core/indexes/base.py
+++ b/pandas/core/indexes/base.py
@@ -2245,18 +2245,34 @@ def _get_reconciled_name_object(self, other):
             return self._shallow_copy(name=name)
         return self
 
-    def union(self, other, sort=True):
+    def union(self, other, sort=None):
         """
         Form the union of two Index objects.
 
         Parameters
         ----------
         other : Index or array-like
-        sort : bool, default True
-            Sort the resulting index if possible
+        sort : bool or None, default None
+            Whether to sort the resulting Index.
+
+            * None : Sort the result, except when
+
+              1. `self` and `other` are equal.
+              2. `self` or `other` has length 0.
+              3. Some values in `self` or `other` cannot be compared.
+                 A RuntimeWarning is issued in this case.
+
+            * True : sort the result. A TypeError is raised when the
+              values cannot be compared.
+            * False : do not sort the result.
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default `sort` to None, matching the
+               behavior of pandas 0.23.4 and earlier.
+
         Returns
         -------
         union : Index
@@ -2273,10 +2289,16 @@ def union(self, other, sort=True):
         other = ensure_index(other)
 
         if len(other) == 0 or self.equals(other):
-            return self._get_reconciled_name_object(other)
+            result = self._get_reconciled_name_object(other)
+            if sort:
+                result = result.sort_values()
+            return result
 
         if len(self) == 0:
-            return other._get_reconciled_name_object(self)
+            result = other._get_reconciled_name_object(self)
+            if sort:
+                result = result.sort_values()
+            return result
 
         # TODO: is_dtype_union_equal is a hack around
         # 1. buggy set ops with duplicates (GH #13432)
@@ -2319,13 +2341,16 @@ def union(self, other, sort=True):
             else:
                 result = lvals
 
-            if sort:
+            if sort is None:
                 try:
                     result = sorting.safe_sort(result)
                 except TypeError as e:
                     warnings.warn("{}, sort order is undefined for "
                                   "incomparable objects".format(e),
                                   RuntimeWarning, stacklevel=3)
+            elif sort:
+                # raise if not sortable.
+                result = sorting.safe_sort(result)
 
         # for subclasses
         return self._wrap_setop_result(other, result)
@@ -2342,8 +2367,12 @@ def intersection(self, other, sort=False):
         Parameters
         ----------
         other : Index or array-like
-        sort : bool, default False
-            Sort the resulting index if possible
+        sort : bool or None, default False
+            Whether to sort the resulting index.
+
+            * False : do not sort the result.
+            * True : sort the result. A TypeError is raised when the
+              values cannot be compared.
 
             .. versionadded:: 0.24.0
 
@@ -2367,7 +2396,10 @@ def intersection(self, other, sort=False):
         other = ensure_index(other)
 
         if self.equals(other):
-            return self._get_reconciled_name_object(other)
+            result = self._get_reconciled_name_object(other)
+            if sort:
+                result = result.sort_values()
+            return result
 
         if not is_dtype_equal(self.dtype, other.dtype):
             this = self.astype('O')
@@ -2415,7 +2447,7 @@ def intersection(self, other, sort=False):
 
         return taken
 
-    def difference(self, other, sort=True):
+    def difference(self, other, sort=None):
         """
         Return a new Index with elements from the index that are not in
         `other`.
@@ -2425,11 +2457,24 @@ def difference(self, other, sort=True):
         Parameters
         ----------
         other : Index or array-like
-        sort : bool, default True
-            Sort the resulting index if possible
+        sort : bool or None, default None
+            Whether to sort the resulting index. By default, the
+            values are attempted to be sorted, but any TypeError from
+            incomparable elements is caught by pandas.
+
+            * None : Attempt to sort the result, but catch any TypeErrors
+              from comparing incomparable elements.
+            * False : Do not sort the result.
+            * True : Sort the result, raising a TypeError if any elements
+              cannot be compared.
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Added the `None` option, which matches the behavior of
+               pandas 0.23.4 and earlier.
+
         Returns
         -------
         difference : Index
@@ -2460,15 +2505,17 @@ def difference(self, other, sort=True):
         label_diff = np.setdiff1d(np.arange(this.size), indexer,
                                   assume_unique=True)
         the_diff = this.values.take(label_diff)
-        if sort:
+        if sort is None:
             try:
                 the_diff = sorting.safe_sort(the_diff)
             except TypeError:
                 pass
+        elif sort:
+            the_diff = sorting.safe_sort(the_diff)
 
         return this._shallow_copy(the_diff, name=result_name, freq=None)
 
-    def symmetric_difference(self, other, result_name=None, sort=True):
+    def symmetric_difference(self, other, result_name=None, sort=None):
         """
         Compute the symmetric difference of two Index objects.
 
@@ -2476,11 +2523,24 @@ def symmetric_difference(self, other, result_name=None, sort=True):
         ----------
         other : Index or array-like
         result_name : str
-        sort : bool, default True
-            Sort the resulting index if possible
+        sort : bool or None, default None
+            Whether to sort the resulting index. By default, the
+            values are attempted to be sorted, but any TypeError from
+            incomparable elements is caught by pandas.
+
+            * None : Attempt to sort the result, but catch any TypeErrors
+              from comparing incomparable elements.
+            * False : Do not sort the result.
+            * True : Sort the result, raising a TypeError if any elements
+              cannot be compared.
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Added the `None` option, which matches the behavior of
+               pandas 0.23.4 and earlier.
+
         Returns
         -------
         symmetric_difference : Index
@@ -2524,11 +2584,13 @@ def symmetric_difference(self, other, result_name=None, sort=True):
         right_diff = other.values.take(right_indexer)
 
         the_diff = _concat._concat_compat([left_diff, right_diff])
-        if sort:
+        if sort is None:
             try:
                 the_diff = sorting.safe_sort(the_diff)
             except TypeError:
                 pass
+        elif sort:
+            the_diff = sorting.safe_sort(the_diff)
 
         attribs = self._get_attributes_dict()
         attribs['name'] = result_name
diff --git a/pandas/core/indexes/multi.py b/pandas/core/indexes/multi.py
index 16af3fe8eef26..32a5a09359019 100644
--- a/pandas/core/indexes/multi.py
+++ b/pandas/core/indexes/multi.py
@@ -2879,18 +2879,34 @@ def equal_levels(self, other):
                 return False
         return True
 
-    def union(self, other, sort=True):
+    def union(self, other, sort=None):
         """
         Form the union of two MultiIndex objects
 
         Parameters
         ----------
         other : MultiIndex or array / Index of tuples
-        sort : bool, default True
-            Sort the resulting MultiIndex if possible
+        sort : bool or None, default None
+            Whether to sort the resulting Index.
+
+            * None : Sort the result, except when
+
+              1. `self` and `other` are equal.
+              2. `self` has length 0.
+              3. Some values in `self` or `other` cannot be compared.
+                 A RuntimeWarning is issued in this case.
+
+            * True : sort the result. A TypeError is raised when the
+              values cannot be compared.
+            * False : do not sort the result.
 
             .. versionadded:: 0.24.0
 
+            .. versionchanged:: 0.24.1
+
+               Changed the default `sort` to None, matching the
+               behavior of pandas 0.23.4 and earlier.
+
         Returns
         -------
         Index
@@ -2901,8 +2917,12 @@ def union(self, other, sort=True):
         other, result_names = self._convert_can_do_setop(other)
 
         if len(other) == 0 or self.equals(other):
+            if sort:
+                return self.sort_values()
             return self
 
+        # TODO: Index.union returns other when `len(self)` is 0.
+
         uniq_tuples = lib.fast_unique_multiple([self._ndarray_values,
                                                 other._ndarray_values],
                                                sort=sort)
@@ -2917,7 +2937,7 @@ def intersection(self, other, sort=False):
         Parameters
         ----------
         other : MultiIndex or array / Index of tuples
-        sort : bool, default True
+        sort : bool, default False
             Sort the resulting MultiIndex if possible
 
             .. versionadded:: 0.24.0
@@ -2934,6 +2954,8 @@ def intersection(self, other, sort=False):
         other, result_names = self._convert_can_do_setop(other)
 
         if self.equals(other):
+            if sort:
+                return self.sort_values()
             return self
 
         self_tuples = self._ndarray_values
@@ -2951,7 +2973,7 @@ def intersection(self, other, sort=False):
             return MultiIndex.from_arrays(lzip(*uniq_tuples), sortorder=0,
                                           names=result_names)
 
-    def difference(self, other, sort=True):
+    def difference(self, other, sort=None):
         """
         Compute set difference of two MultiIndex objects
 
@@ -2971,6 +2993,8 @@ def difference(self, other, sort=True):
         other, result_names = self._convert_can_do_setop(other)
 
         if len(other) == 0:
+            if sort:
+                return self.sort_values()
             return self
 
         if self.equals(other):
diff --git a/pandas/tests/indexes/multi/test_set_ops.py b/pandas/tests/indexes/multi/test_set_ops.py
index 208d6cf1c639f..6a42e29aa8f5c 100644
--- a/pandas/tests/indexes/multi/test_set_ops.py
+++ b/pandas/tests/indexes/multi/test_set_ops.py
@@ -174,7 +174,10 @@ def test_difference(idx, sort):
 
     # name from empty array
     result = first.difference([], sort=sort)
-    assert first.equals(result)
+    if sort:
+        assert first.sort_values().equals(result)
+    else:
+        assert first.equals(result)
     assert first.names == result.names
 
     # name from non-empty array
@@ -189,6 +192,36 @@ def test_difference(idx, sort):
         first.difference([1, 2, 3, 4, 5], sort=sort)
 
 
+def test_difference_sort_special():
+    idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']])
+    # sort=None, the default
+    result = idx.difference([])
+    tm.assert_index_equal(result, idx)
+
+    result = idx.difference([], sort=True)
+    expected = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
+    tm.assert_index_equal(result, expected)
+
+
+def test_difference_sort_incomparable():
+    idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000'), 2],
+                                      ['a', 'b']])
+
+    other = pd.MultiIndex.from_product([[3, pd.Timestamp('2000'), 4],
+                                        ['c', 'd']])
+    # sort=None, the default
+    # result = idx.difference(other)
+    # tm.assert_index_equal(result, idx)
+
+    # sort=False
+    result = idx.difference(other)
+    tm.assert_index_equal(result, idx)
+
+    # sort=True, raises
+    with pytest.raises(TypeError):
+        idx.difference(other, sort=True)
+
+
 @pytest.mark.parametrize("sort", [True, False])
 def test_union(idx, sort):
     piece1 = idx[:5][::-1]
@@ -203,10 +236,16 @@ def test_union(idx, sort):
 
     # corner case, pass self or empty thing:
     the_union = idx.union(idx, sort=sort)
-    assert the_union is idx
+    if sort:
+        tm.assert_index_equal(the_union, idx.sort_values())
+    else:
+        assert the_union is idx
 
     the_union = idx.union(idx[:0], sort=sort)
-    assert the_union is idx
+    if sort:
+        tm.assert_index_equal(the_union, idx.sort_values())
+    else:
+        assert the_union is idx
 
     # won't work in python 3
     # tuples = _index.values
@@ -238,7 +277,10 @@ def test_intersection(idx, sort):
 
     # corner case, pass self
     the_int = idx.intersection(idx, sort=sort)
-    assert the_int is idx
+    if sort:
+        tm.assert_index_equal(the_int, idx.sort_values())
+    else:
+        assert the_int is idx
 
     # empty intersection: disjoint
     empty = idx[:2].intersection(idx[2:], sort=sort)
@@ -249,3 +291,47 @@ def test_intersection(idx, sort):
     # tuples = _index.values
     # result = _index & tuples
     # assert result.equals(tuples)
+
+
+def test_intersect_equal_sort():
+    idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']])
+    sorted_ = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
+    tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
+    tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
+
+
[email protected]('slice_', [slice(None), slice(0)])
+def test_union_sort_other_empty(slice_):
+    # https://github.com/pandas-dev/pandas/issues/24959
+    idx = pd.MultiIndex.from_product([[1, 0], ['a', 'b']])
+
+    # default, sort=None
+    other = idx[slice_]
+    tm.assert_index_equal(idx.union(other), idx)
+    # MultiIndex does not special case empty.union(idx)
+    # tm.assert_index_equal(other.union(idx), idx)
+
+    # sort=False
+    tm.assert_index_equal(idx.union(other, sort=False), idx)
+
+    # sort=True
+    result = idx.union(other, sort=True)
+    expected = pd.MultiIndex.from_product([[0, 1], ['a', 'b']])
+    tm.assert_index_equal(result, expected)
+
+
+def test_union_sort_other_incomparable():
+    # https://github.com/pandas-dev/pandas/issues/24959
+    idx = pd.MultiIndex.from_product([[1, pd.Timestamp('2000')], ['a', 'b']])
+
+    # default, sort=None
+    result = idx.union(idx[:1])
+    tm.assert_index_equal(result, idx)
+
+    # sort=False
+    result = idx.union(idx[:1], sort=False)
+    tm.assert_index_equal(result, idx)
+
+    # sort=True
+    with pytest.raises(TypeError, match='Cannot compare'):
+        idx.union(idx[:1], sort=True)
diff --git a/pandas/tests/indexes/test_base.py b/pandas/tests/indexes/test_base.py
index 20e439de46bde..4e8555cbe1aab 100644
--- a/pandas/tests/indexes/test_base.py
+++ b/pandas/tests/indexes/test_base.py
@@ -3,6 +3,7 @@
 from collections import defaultdict
 from datetime import datetime, timedelta
 import math
+import operator
 import sys
 
 import numpy as np
@@ -695,7 +696,10 @@ def test_intersection(self, sort):
 
         # Corner cases
         inter = first.intersection(first, sort=sort)
-        assert inter is first
+        if sort:
+            tm.assert_index_equal(inter, first.sort_values())
+        else:
+            assert inter is first
 
     @pytest.mark.parametrize("index2,keeps_name", [
         (Index([3, 4, 5, 6, 7], name="index"), True),  # preserve same name
@@ -770,6 +774,12 @@ def test_intersect_nosort(self):
         expected = pd.Index(['b', 'a'])
         tm.assert_index_equal(result, expected)
 
+    def test_intersect_equal_sort(self):
+        idx = pd.Index(['c', 'a', 'b'])
+        sorted_ = pd.Index(['a', 'b', 'c'])
+        tm.assert_index_equal(idx.intersection(idx, sort=False), idx)
+        tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)
+
     @pytest.mark.parametrize("sort", [True, False])
     def test_chained_union(self, sort):
         # Chained unions handles names correctly
@@ -799,6 +809,41 @@ def test_union(self, sort):
             tm.assert_index_equal(union, everything.sort_values())
         assert tm.equalContents(union, everything)
 
+    @pytest.mark.parametrize('slice_', [slice(None), slice(0)])
+    def test_union_sort_other_special(self, slice_):
+        # https://github.com/pandas-dev/pandas/issues/24959
+
+        idx = pd.Index([1, 0, 2])
+        # default, sort=None
+        other = idx[slice_]
+        tm.assert_index_equal(idx.union(other), idx)
+        tm.assert_index_equal(other.union(idx), idx)
+
+        # sort=False
+        tm.assert_index_equal(idx.union(other, sort=False), idx)
+
+        # sort=True
+        result = idx.union(other, sort=True)
+        expected = pd.Index([0, 1, 2])
+        tm.assert_index_equal(result, expected)
+
+    def test_union_sort_other_incomparable(self):
+        # https://github.com/pandas-dev/pandas/issues/24959
+        idx = pd.Index([1, pd.Timestamp('2000')])
+        # default, sort=None
+        with tm.assert_produces_warning(RuntimeWarning):
+            result = idx.union(idx[:1])
+
+        tm.assert_index_equal(result, idx)
+
+        # sort=True
+        with pytest.raises(TypeError, match='.*'):
+            idx.union(idx[:1], sort=True)
+
+        # sort=False
+        result = idx.union(idx[:1], sort=False)
+        tm.assert_index_equal(result, idx)
+
     @pytest.mark.parametrize("klass", [
         np.array, Series, list])
     @pytest.mark.parametrize("sort", [True, False])
@@ -815,19 +860,20 @@ def test_union_from_iterables(self, klass, sort):
             tm.assert_index_equal(result, everything.sort_values())
         assert tm.equalContents(result, everything)
 
-    @pytest.mark.parametrize("sort", [True, False])
+    @pytest.mark.parametrize("sort", [None, True, False])
     def test_union_identity(self, sort):
         # TODO: replace with fixturesult
         first = self.strIndex[5:20]
 
         union = first.union(first, sort=sort)
-        assert union is first
+        # i.e. identity is not preserved when sort is True
+        assert (union is first) is (not sort)
 
         union = first.union([], sort=sort)
-        assert union is first
+        assert (union is first) is (not sort)
 
         union = Index([]).union(first, sort=sort)
-        assert union is first
+        assert (union is first) is (not sort)
 
     @pytest.mark.parametrize("first_list", [list('ba'), list()])
     @pytest.mark.parametrize("second_list", [list('ab'), list()])
@@ -1054,6 +1100,29 @@ def test_symmetric_difference(self, sort):
         assert tm.equalContents(result, expected)
         assert result.name is None
 
+    @pytest.mark.parametrize('opname', ['difference', 'symmetric_difference'])
+    def test_difference_incomparable(self, opname):
+        a = pd.Index([3, pd.Timestamp('2000'), 1])
+        b = pd.Index([2, pd.Timestamp('1999'), 1])
+        op = operator.methodcaller(opname, b)
+
+        # sort=None, the default
+        result = op(a)
+        expected = pd.Index([3, pd.Timestamp('2000'), 2, pd.Timestamp('1999')])
+        if opname == 'difference':
+            expected = expected[:2]
+        tm.assert_index_equal(result, expected)
+
+        # sort=False
+        op = operator.methodcaller(opname, b, sort=False)
+        result = op(a)
+        tm.assert_index_equal(result, expected)
+
+        # sort=True, raises
+        op = operator.methodcaller(opname, b, sort=True)
+        with pytest.raises(TypeError, match='Cannot compare'):
+            op(a)
+
     @pytest.mark.parametrize("sort", [True, False])
     def test_symmetric_difference_mi(self, sort):
         index1 = MultiIndex.from_tuples(self.tuples)
 
 | 
	Closes https://github.com/pandas-dev/pandas/issues/24959
Haven't done MultiIndex yet, just opening for discussion on *if* we should do this for 0.24.1. 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25007 
 | 
	2019-01-29T18:02:12Z 
 | 
	2019-02-04T22:12:40Z 
 | null  | 
	2019-02-04T22:12:43Z 
 | 
					
	Backport PR #24973: fix for BUG: grouping with tz-aware: Values falls… 
 | 
	diff --git a/doc/source/whatsnew/v0.24.1.rst b/doc/source/whatsnew/v0.24.1.rst
index 7647e199030d2..8f4c3982c745f 100644
--- a/doc/source/whatsnew/v0.24.1.rst
+++ b/doc/source/whatsnew/v0.24.1.rst
@@ -70,6 +70,9 @@ Bug Fixes
 -
 -
 
+**Reshaping**
+
+- Bug in :meth:`DataFrame.groupby` with :class:`Grouper` when there is a time change (DST) and grouping frequency is ``'1d'`` (:issue:`24972`)
 
 **Visualization**
 
diff --git a/pandas/core/resample.py b/pandas/core/resample.py
index 6822225273906..7723827ff478a 100644
--- a/pandas/core/resample.py
+++ b/pandas/core/resample.py
@@ -30,8 +30,7 @@
 from pandas.core.indexes.timedeltas import TimedeltaIndex, timedelta_range
 
 from pandas.tseries.frequencies import to_offset
-from pandas.tseries.offsets import (
-    DateOffset, Day, Nano, Tick, delta_to_nanoseconds)
+from pandas.tseries.offsets import DateOffset, Day, Nano, Tick
 
 _shared_docs_kwargs = dict()
 
@@ -1613,20 +1612,20 @@ def _get_timestamp_range_edges(first, last, offset, closed='left', base=0):
     A tuple of length 2, containing the adjusted pd.Timestamp objects.
     """
     if isinstance(offset, Tick):
-        is_day = isinstance(offset, Day)
-        day_nanos = delta_to_nanoseconds(timedelta(1))
-
-        # #1165 and #24127
-        if (is_day and not offset.nanos % day_nanos) or not is_day:
-            first, last = _adjust_dates_anchored(first, last, offset,
-                                                 closed=closed, base=base)
-            if is_day and first.tz is not None:
-                # _adjust_dates_anchored assumes 'D' means 24H, but first/last
-                # might contain a DST transition (23H, 24H, or 25H).
-                # Ensure first/last snap to midnight.
-                first = first.normalize()
-                last = last.normalize()
-            return first, last
+        if isinstance(offset, Day):
+            # _adjust_dates_anchored assumes 'D' means 24H, but first/last
+            # might contain a DST transition (23H, 24H, or 25H).
+            # So "pretend" the dates are naive when adjusting the endpoints
+            tz = first.tz
+            first = first.tz_localize(None)
+            last = last.tz_localize(None)
+
+        first, last = _adjust_dates_anchored(first, last, offset,
+                                             closed=closed, base=base)
+        if isinstance(offset, Day):
+            first = first.tz_localize(tz)
+            last = last.tz_localize(tz)
+        return first, last
 
     else:
         first = first.normalize()
diff --git a/pandas/tests/resample/test_datetime_index.py b/pandas/tests/resample/test_datetime_index.py
index 73995cbe79ecd..b743aeecdc756 100644
--- a/pandas/tests/resample/test_datetime_index.py
+++ b/pandas/tests/resample/test_datetime_index.py
@@ -1276,6 +1276,21 @@ def test_resample_across_dst():
     assert_frame_equal(result, expected)
 
 
+def test_groupby_with_dst_time_change():
+    # GH 24972
+    index = pd.DatetimeIndex([1478064900001000000, 1480037118776792000],
+                             tz='UTC').tz_convert('America/Chicago')
+
+    df = pd.DataFrame([1, 2], index=index)
+    result = df.groupby(pd.Grouper(freq='1d')).last()
+    expected_index_values = pd.date_range('2016-11-02', '2016-11-24',
+                                          freq='d', tz='America/Chicago')
+
+    index = pd.DatetimeIndex(expected_index_values)
+    expected = pd.DataFrame([1.0] + ([np.nan] * 21) + [2.0], index=index)
+    assert_frame_equal(result, expected)
+
+
 def test_resample_dst_anchor():
     # 5172
     dti = DatetimeIndex([datetime(2012, 11, 4, 23)], tz='US/Eastern')
 
 | 
	… after last bin
 
 | 
	https://api.github.com/repos/pandas-dev/pandas/pulls/25005 
 | 
	2019-01-29T16:12:28Z 
 | 
	2019-01-29T16:46:52Z 
 | 
	2019-01-29T16:46:52Z 
 | 
	2019-01-29T16:46:56Z 
 | 
					
End of preview. Expand
						in Data Studio
					
	No dataset card yet
- Downloads last month
 - 11