diff --git a/doc/source/whatsnew/v0.19.0.txt b/doc/source/whatsnew/v0.19.0.txt index bafb351b2f678..646e8822ed46f 100644 --- a/doc/source/whatsnew/v0.19.0.txt +++ b/doc/source/whatsnew/v0.19.0.txt @@ -310,6 +310,7 @@ Other enhancements - ``pd.read_html()`` has gained support for the ``decimal`` option (:issue:`12907`) - A function :func:`union_categorical` has been added for combining categoricals, see :ref:`Unioning Categoricals` (:issue:`13361`) - ``Series`` has gained the properties ``.is_monotonic``, ``.is_monotonic_increasing``, ``.is_monotonic_decreasing``, similar to ``Index`` (:issue:`13336`) +- ``DataFrame.to_sql `` now allows a single value as the SQL type for all columns (:issue:`11886`). - ``Series.append`` now supports the ``ignore_index`` option (:issue:`13677`) - ``.to_stata()`` and ``StataWriter`` can now write variable labels to Stata dta files using a dictionary to make column names to labels (:issue:`13535`, :issue:`13536`) - ``.to_stata()`` and ``StataWriter`` will automatically convert ``datetime64[ns]`` columns to Stata format ``%tc``, rather than raising a ``ValueError`` (:issue:`12259`) @@ -322,7 +323,6 @@ Other enhancements index=['row1', 'row2']) df.sort_values(by='row2', axis=1) - .. _whatsnew_0190.api: API changes diff --git a/pandas/io/sql.py b/pandas/io/sql.py index dfc9e80aa27d1..49f277f6ba7bc 100644 --- a/pandas/io/sql.py +++ b/pandas/io/sql.py @@ -14,7 +14,7 @@ import pandas.lib as lib from pandas.types.missing import isnull from pandas.types.dtypes import DatetimeTZDtype -from pandas.types.common import (is_list_like, +from pandas.types.common import (is_list_like, is_dict_like, is_datetime64tz_dtype) from pandas.compat import (map, zip, raise_with_traceback, @@ -448,9 +448,10 @@ def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail', chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. - dtype : dict of column name to SQL type, default None + dtype : single SQLtype or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection. + If all columns are of the same type, one single value can be used. """ if if_exists not in ('fail', 'replace', 'append'): @@ -1121,11 +1122,15 @@ def to_sql(self, frame, name, if_exists='fail', index=True, chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. - dtype : dict of column name to SQL type, default None + dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should - be a SQLAlchemy type. + be a SQLAlchemy type. If all columns are of the same type, one + single value can be used. """ + if dtype and not is_dict_like(dtype): + dtype = {col_name: dtype for col_name in frame} + if dtype is not None: from sqlalchemy.types import to_instance, TypeEngine for col, my_type in dtype.items(): @@ -1473,11 +1478,15 @@ def to_sql(self, frame, name, if_exists='fail', index=True, chunksize : int, default None If not None, then rows will be written in batches of this size at a time. If None, all rows will be written at once. - dtype : dict of column name to SQL type, default None + dtype : single type or dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should - be a string. + be a string. If all columns are of the same type, one single value + can be used. """ + if dtype and not is_dict_like(dtype): + dtype = {col_name: dtype for col_name in frame} + if dtype is not None: for col, my_type in dtype.items(): if not isinstance(my_type, str): diff --git a/pandas/io/tests/test_sql.py b/pandas/io/tests/test_sql.py index f4001420a77b6..21c3ea416e091 100644 --- a/pandas/io/tests/test_sql.py +++ b/pandas/io/tests/test_sql.py @@ -1537,6 +1537,15 @@ def test_dtype(self): self.assertTrue(isinstance(sqltype, sqlalchemy.String)) self.assertEqual(sqltype.length, 10) + # single dtype + df.to_sql('single_dtype_test', self.conn, dtype=sqlalchemy.TEXT) + meta = sqlalchemy.schema.MetaData(bind=self.conn) + meta.reflect() + sqltypea = meta.tables['single_dtype_test'].columns['A'].type + sqltypeb = meta.tables['single_dtype_test'].columns['B'].type + self.assertTrue(isinstance(sqltypea, sqlalchemy.TEXT)) + self.assertTrue(isinstance(sqltypeb, sqlalchemy.TEXT)) + def test_notnull_dtype(self): cols = {'Bool': Series([True, None]), 'Date': Series([datetime(2012, 5, 1), None]), @@ -2006,6 +2015,13 @@ def test_dtype(self): self.assertRaises(ValueError, df.to_sql, 'error', self.conn, dtype={'B': bool}) + # single dtype + df.to_sql('single_dtype_test', self.conn, dtype='STRING') + self.assertEqual( + self._get_sqlite_column_type('single_dtype_test', 'A'), 'STRING') + self.assertEqual( + self._get_sqlite_column_type('single_dtype_test', 'B'), 'STRING') + def test_notnull_dtype(self): if self.flavor == 'mysql': raise nose.SkipTest('Not applicable to MySQL legacy')