diff --git a/doc/source/getting_started/install.rst b/doc/source/getting_started/install.rst index 291799cfe521d..a9c3d637a41e3 100644 --- a/doc/source/getting_started/install.rst +++ b/doc/source/getting_started/install.rst @@ -326,6 +326,15 @@ top-level :func:`~pandas.read_html` function: .. _lxml: https://lxml.de .. _tabulate: https://github.com/astanin/python-tabulate +XML +^^^ + +========================= ================== ============================================================= +Dependency Minimum Version Notes +========================= ================== ============================================================= +lxml 4.3.0 XML parser for read_xml and tree builder for to_xml +========================= ================== ============================================================= + SQL databases ^^^^^^^^^^^^^ diff --git a/doc/source/reference/io.rst b/doc/source/reference/io.rst index e755ce94812bb..442631de50c7a 100644 --- a/doc/source/reference/io.rst +++ b/doc/source/reference/io.rst @@ -68,6 +68,13 @@ HTML read_html +XML +~~~~ +.. autosummary:: + :toctree: api/ + + read_xml + HDFStore: PyTables (HDF5) ~~~~~~~~~~~~~~~~~~~~~~~~~ .. autosummary:: diff --git a/doc/source/user_guide/io.rst b/doc/source/user_guide/io.rst index d7c1ca8bca598..7e113c93baabe 100644 --- a/doc/source/user_guide/io.rst +++ b/doc/source/user_guide/io.rst @@ -22,6 +22,7 @@ The pandas I/O API is a set of top level ``reader`` functions accessed like text;Fixed-Width Text File;:ref:`read_fwf` text;`JSON `__;:ref:`read_json`;:ref:`to_json` text;`HTML `__;:ref:`read_html`;:ref:`to_html` + text;`XML `__;:ref:`read_xml`;:ref:`to_xml` text; Local clipboard;:ref:`read_clipboard`;:ref:`to_clipboard` binary;`MS Excel `__;:ref:`read_excel`;:ref:`to_excel` binary;`OpenDocument `__;:ref:`read_excel`; @@ -2831,6 +2832,461 @@ parse HTML tables in the top-level pandas io function ``read_html``. +XML +--- + +.. _io.read_xml: + +Reading XML +''''''''''' + +.. versionadded:: 1.3.0 + +The top-level :func:`~pandas.io.xml.read_xml` function can accept an XML +string/file/URL and will parse nodes and attributes into a pandas ``DataFrame``. + +.. note:: + + Since there is no standard XML structure where design types can vary in + many ways, ``read_xml`` works best with flatter, shallow versions. If + an XML document is deeply nested, use the ``stylesheet`` feature to + transform XML into a flatter version. + +Let's look at a few examples. + +Read an XML string: + +.. ipython:: python + + xml = """ + + + Everyday Italian + Giada De Laurentiis + 2005 + 30.00 + + + Harry Potter + J K. Rowling + 2005 + 29.99 + + + Learning XML + Erik T. Ray + 2003 + 39.95 + + """ + + df = pd.read_xml(xml) + df + +Read a URL with no options: + +.. ipython:: python + + df = pd.read_xml("https://www.w3schools.com/xml/books.xml") + df + +Read in the content of the "books.xml" file and pass it to ``read_xml`` +as a string: + +.. ipython:: python + :suppress: + + rel_path = os.path.join("..", "pandas", "tests", "io", "data", "xml", + "books.xml") + file_path = os.path.abspath(rel_path) + +.. ipython:: python + + with open(file_path, "r") as f: + df = pd.read_xml(f.read()) + df + +Read in the content of the "books.xml" as instance of ``StringIO`` or +``BytesIO`` and pass it to ``read_xml``: + +.. ipython:: python + + with open(file_path, "r") as f: + sio = StringIO(f.read()) + + df = pd.read_xml(sio) + df + +.. ipython:: python + + with open(file_path, "rb") as f: + bio = BytesIO(f.read()) + + df = pd.read_xml(bio) + df + +Even read XML from AWS S3 buckets such as Python Software Foundation's IRS 990 Form: + +.. ipython:: python + + df = pd.read_xml( + "s3://irs-form-990/201923199349319487_public.xml", + xpath=".//irs:Form990PartVIISectionAGrp", + namespaces={"irs": "http://www.irs.gov/efile"} + ) + df + +With `lxml`_ as default ``parser``, you access the full-featured XML library +that extends Python's ElementTree API. One powerful tool is ability to query +nodes selectively or conditionally with more expressive XPath: + +.. _lxml: https://lxml.de + +.. ipython:: python + + df = pd.read_xml(file_path, xpath="//book[year=2005]") + df + +Specify only elements or only attributes to parse: + +.. ipython:: python + + df = pd.read_xml(file_path, elems_only=True) + df + +.. ipython:: python + + df = pd.read_xml(file_path, attrs_only=True) + df + +XML documents can have namespaces with prefixes and default namespaces without +prefixes both of which are denoted with a special attribute ``xmlns``. In order +to parse by node under a namespace context, ``xpath`` must reference a prefix. + +For example, below XML contains a namespace with prefix, ``doc``, and URI at +``https://example.com``. In order to parse ``doc:row`` nodes, +``namespaces`` must be used. + +.. ipython:: python + + xml = """ + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + + """ + + df = pd.read_xml(xml, + xpath="//doc:row", + namespaces={"doc": "https://example.com"}) + df + +Similarly, an XML document can have a default namespace without prefix. Failing +to assign a temporary prefix will return no nodes and raise a ``ValueError``. +But assiging *any* temporary name to correct URI allows parsing by nodes. + +.. ipython:: python + + xml = """ + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + + """ + + df = pd.read_xml(xml, + xpath="//pandas:row", + namespaces={"pandas": "https://example.com"}) + df + +However, if XPath does not reference node names such as default, ``/*``, then +``namespaces`` is not required. + +With `lxml`_ as parser, you can flatten nested XML documents with an XSLT +script which also can be string/file/URL types. As background, `XSLT`_ is +a special-purpose language written in a special XML file that can transform +original XML documents into other XML, HTML, even text (CSV, JSON, etc.) +using an XSLT processor. + +.. _lxml: https://lxml.de +.. _XSLT: https://www.w3.org/TR/xslt/ + +For example, consider this somewhat nested structure of Chicago "L" Rides +where station and rides elements encapsulate data in their own sections. +With below XSLT, ``lxml`` can transform original nested document into a flatter +output (as shown below for demonstration) for easier parse into ``DataFrame``: + +.. ipython:: python + + xml = """ + + + + 2020-09-01T00:00:00 + + 864.2 + 534 + 417.2 + + + + + 2020-09-01T00:00:00 + + 2707.4 + 1909.8 + 1438.6 + + + + + 2020-09-01T00:00:00 + + 2949.6 + 1657 + 1453.8 + + + """ + + xsl = """ + + + + + + + + + + + + + + + """ + + output = """ + + + 40850 + Library + 2020-09-01T00:00:00 + 864.2 + 534 + 417.2 + + + 41700 + Washington/Wabash + 2020-09-01T00:00:00 + 2707.4 + 1909.8 + 1438.6 + + + 40380 + Clark/Lake + 2020-09-01T00:00:00 + 2949.6 + 1657 + 1453.8 + + """ + + df = pd.read_xml(xml, stylesheet=xsl) + df + + +.. _io.xml: + +Writing XML +''''''''''' + +.. versionadded:: 1.3.0 + +``DataFrame`` objects have an instance method ``to_xml`` which renders the +contents of the ``DataFrame`` as an XML document. + +.. note:: + + This method does not support special properties of XML including DTD, + CData, XSD schemas, processing instructions, comments, and others. + Only namespaces at the root level is supported. However, ``stylesheet`` + allows design changes after initial output. + +Let's look at a few examples. + +Write an XML without options: + +.. ipython:: python + + geom_df = pd.DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } + ) + + print(geom_df.to_xml()) + + +Write an XML with new root and row name: + +.. ipython:: python + + print(geom_df.to_xml(root_name="geometry", row_name="objects")) + +Write an attribute-centric XML: + +.. ipython:: python + + print(geom_df.to_xml(attr_cols=geom_df.columns.tolist())) + +Write a mix of elements and attributes: + +.. ipython:: python + + print( + geom_df.to_xml( + index=False, + attr_cols=['shape'], + elem_cols=['degrees', 'sides']) + ) + +Any ``DataFrames`` with hierarchical columns will be flattened for XML element names +with levels delimited by underscores: + +.. ipython:: python + + ext_geom_df = pd.DataFrame( + { + "type": ["polygon", "other", "polygon"], + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } + ) + + pvt_df = ext_geom_df.pivot_table(index='shape', + columns='type', + values=['degrees', 'sides'], + aggfunc='sum') + pvt_df + + print(pvt_df.to_xml()) + +Write an XML with default namespace: + +.. ipython:: python + + print(geom_df.to_xml(namespaces={"": "https://example.com"})) + +Write an XML with namespace prefix: + +.. ipython:: python + + print( + geom_df.to_xml(namespaces={"doc": "https://example.com"}, + prefix="doc") + ) + +Write an XML without declaration or pretty print: + +.. ipython:: python + + print( + geom_df.to_xml(xml_declaration=False, + pretty_print=False) + ) + +Write an XML and transform with stylesheet: + +.. ipython:: python + + xsl = """ + + + + + + + + + + + polygon + + + + + + + + """ + + print(geom_df.to_xml(stylesheet=xsl)) + + +XML Final Notes +''''''''''''''' + +* All XML documents adhere to `W3C specifications`_. Both ``etree`` and ``lxml`` + parsers will fail to parse any markup document that is not well-formed or + follows XML syntax rules. Do be aware HTML is not an XML document unless it + follows XHTML specs. However, other popular markup types including KML, XAML, + RSS, MusicML, MathML are compliant `XML schemas`_. + +* For above reason, if your application builds XML prior to pandas operations, + use appropriate DOM libraries like ``etree`` and ``lxml`` to build the necessary + document and not by string concatenation or regex adjustments. Always remember + XML is a *special* text file with markup rules. + +* With very large XML files (several hundred MBs to GBs), XPath and XSLT + can become memory-intensive operations. Be sure to have enough available + RAM for reading and writing to large XML files (roughly about 5 times the + size of text). + +* Because XSLT is a programming language, use it with caution since such scripts + can pose a security risk in your environment and can run large or infinite + recursive operations. Always test scripts on small fragments before full run. + +* The `etree`_ parser supports all functionality of both ``read_xml`` and + ``to_xml`` except for complex XPath and any XSLT. Though limited in features, + ``etree`` is still a reliable and capable parser and tree builder. Its + performance may trail ``lxml`` to a certain degree for larger files but + relatively unnoticeable on small to medium size files. + +.. _`W3C specifications`: https://www.w3.org/TR/xml/ +.. _`XML schemas`: https://en.wikipedia.org/wiki/List_of_types_of_XML_schemas +.. _`etree`: https://docs.python.org/3/library/xml.etree.elementtree.html + + .. _io.excel: diff --git a/doc/source/whatsnew/v1.3.0.rst b/doc/source/whatsnew/v1.3.0.rst index 32a2514b3b6a3..22fe6d53ec8c3 100644 --- a/doc/source/whatsnew/v1.3.0.rst +++ b/doc/source/whatsnew/v1.3.0.rst @@ -43,6 +43,73 @@ For example: storage_options=headers ) +.. _whatsnew_130.read_to_xml: + +Read and write XML documents +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +We added I/O support to read and render shallow versions of `XML`_ documents with +:func:`pandas.read_xml` and :meth:`DataFrame.to_xml`. Using `lxml`_ as parser, +both XPath 1.0 and XSLT 1.0 is available. (:issue:`27554`) + +.. _XML: https://www.w3.org/standards/xml/core +.. _lxml: https://lxml.de + +.. code-block:: ipython + + In [1]: xml = """ + ...: + ...: + ...: square + ...: 360 + ...: 4.0 + ...: + ...: + ...: circle + ...: 360 + ...: + ...: + ...: + ...: triangle + ...: 180 + ...: 3.0 + ...: + ...: """ + + In [2]: df = pd.read_xml(xml) + In [3]: df + Out[3]: + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + In [4]: df.to_xml() + Out[4]: + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + +For more, see :ref:`io.xml` in the user guide on IO tools. + .. _whatsnew_130.enhancements.other: Other enhancements diff --git a/pandas/__init__.py b/pandas/__init__.py index cc4c99efc4345..7cad3eded0585 100644 --- a/pandas/__init__.py +++ b/pandas/__init__.py @@ -167,6 +167,7 @@ read_feather, read_gbq, read_html, + read_xml, read_json, read_stata, read_sas, diff --git a/pandas/core/frame.py b/pandas/core/frame.py index 469783913dc42..88308a89503c1 100644 --- a/pandas/core/frame.py +++ b/pandas/core/frame.py @@ -2641,6 +2641,189 @@ def to_html( render_links=render_links, ) + @doc(storage_options=generic._shared_docs["storage_options"]) + def to_xml( + self, + path_or_buffer: Optional[FilePathOrBuffer] = None, + index: bool = True, + root_name: Optional[str] = "data", + row_name: Optional[str] = "row", + na_rep: Optional[str] = None, + attr_cols: Optional[Union[str, List[str]]] = None, + elem_cols: Optional[Union[str, List[str]]] = None, + namespaces: Optional[Dict[Optional[str], str]] = None, + prefix: Optional[str] = None, + encoding: str = "utf-8", + xml_declaration: Optional[bool] = True, + pretty_print: Optional[bool] = True, + parser: Optional[str] = "lxml", + stylesheet: Optional[FilePathOrBuffer] = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions = None, + ) -> Optional[str]: + """ + Render a DataFrame to an XML document. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object or file-like object, optional + File to write output to. If None, the output is returned as a + string. + index : bool, default True + Whether to include index in XML document. + root_name : str, default 'data' + The name of root element in XML document. + row_name : str, default 'row' + The name of row element in XML document. + na_rep : str, optional + Missing data representation. + attr_cols : list-like, optional + List of columns to write as attributes in row element. + Hierarchical columns will be flattened with underscore + delimiting the different levels. + elem_cols : list-like, optional + List of columns to write as children in row element. By default, + all columns output as children of row element. Hierarchical + columns will be flattened with underscore delimiting the + different levels. + namespaces : dict, optional + All namespaces to be defined in root element. Keys of dict + should be prefix names and values of dict corresponding URIs. + Default namespaces should be given empty string key. For + example, :: + + namespaces = {{"": "https://example.com"}} + + prefix : str, optional + Namespace prefix to be used for every element and/or attribute + in document. This should be one of the keys in ``namespaces`` + dict. + encoding : str, default 'utf-8' + Encoding of the resulting document. + xml_declaration : bool, default True + Whether to include the XML declaration at start of document. + pretty_print : bool, default True + Whether output should be pretty printed with indentation and + line breaks. + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for building of tree. Only 'lxml' and + 'etree' are supported. With 'lxml', the ability to use XSLT + stylesheet is supported. + stylesheet : str, path object or file-like object, optional + A URL, file-like object, or a raw string containing an XSLT + script used to transform the raw XML output. Script should use + layout of elements and attributes from original output. This + argument requires ``lxml`` to be installed. Only XSLT 1.0 + scripts and not later versions is currently supported. + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer', then use + gzip, bz2, zip or xz if path_or_buffer is a string ending in + '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression + otherwise. If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. + {storage_options} + + Returns + ------- + None or str + If ``io`` is None, returns the resulting XML format as a + string. Otherwise returns None. + + See Also + -------- + to_json : Convert the pandas object to a JSON string. + to_html : Convert DataFrame to a html. + + Examples + -------- + >>> df = pd.DataFrame({{'shape': ['square', 'circle', 'triangle'], + ... 'degrees': [360, 360, 180], + ... 'sides': [4, np.nan, 3]}}) + + >>> df.to_xml() # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + + >>> df.to_xml(attr_cols=[ + ... 'index', 'shape', 'degrees', 'sides' + ... ]) # doctest: +SKIP + + + + + + + + >>> df.to_xml(namespaces={{"doc": "https://example.com"}}, + ... prefix="doc") # doctest: +SKIP + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + + + """ + + formatter = fmt.DataFrameFormatter( + self, + index=index, + ) + + return fmt.DataFrameRenderer(formatter).to_xml( + path_or_buffer=path_or_buffer, + index=index, + root_name=root_name, + row_name=row_name, + na_rep=na_rep, + attr_cols=attr_cols, + elem_cols=elem_cols, + namespaces=namespaces, + prefix=prefix, + encoding=encoding, + xml_declaration=xml_declaration, + pretty_print=pretty_print, + parser=parser, + stylesheet=stylesheet, + compression=compression, + storage_options=storage_options, + ) + # ---------------------------------------------------------------------- @Substitution( klass="DataFrame", diff --git a/pandas/io/api.py b/pandas/io/api.py index 2241f491b5d48..5926f2166ee9d 100644 --- a/pandas/io/api.py +++ b/pandas/io/api.py @@ -37,3 +37,4 @@ read_sql_table, ) from pandas.io.stata import read_stata +from pandas.io.xml import read_xml diff --git a/pandas/io/formats/format.py b/pandas/io/formats/format.py index a1b6986079723..44428abdcd8a5 100644 --- a/pandas/io/formats/format.py +++ b/pandas/io/formats/format.py @@ -1,5 +1,5 @@ """ -Internal module for formatting output data in csv, html, +Internal module for formatting output data in csv, html, xml, and latex files. This module also applies to display formatting. """ from __future__ import annotations @@ -61,6 +61,8 @@ IndexLabel, StorageOptions, ) +from pandas.compat._optional import import_optional_dependency +from pandas.util._decorators import doc from pandas.core.dtypes.common import ( is_categorical_dtype, @@ -96,6 +98,7 @@ from pandas.core.indexes.datetimes import DatetimeIndex from pandas.core.indexes.timedeltas import TimedeltaIndex from pandas.core.reshape.concat import concat +from pandas.core.shared_docs import _shared_docs from pandas.io.common import stringify_path from pandas.io.formats.printing import ( @@ -941,6 +944,7 @@ class DataFrameRenderer: Called in pandas.core.frame.DataFrame: - to_html + - to_xml - to_string Parameters @@ -1033,6 +1037,135 @@ def to_html( string = html_formatter.to_string() return save_to_buffer(string, buf=buf, encoding=encoding) + @doc(storage_options=_shared_docs["storage_options"]) + def to_xml( + self, + path_or_buffer: Optional[FilePathOrBuffer] = None, + index: Optional[bool] = True, + root_name: Optional[str] = "data", + row_name: Optional[str] = "row", + na_rep: Optional[str] = None, + attr_cols: Optional[Union[str, List[str]]] = None, + elem_cols: Optional[Union[str, List[str]]] = None, + namespaces: Optional[Dict[Optional[str], str]] = None, + prefix: Optional[str] = None, + encoding: str = "utf-8", + xml_declaration: Optional[bool] = True, + pretty_print: Optional[bool] = True, + parser: Optional[str] = "lxml", + stylesheet: Optional[FilePathOrBuffer] = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions = None, + ) -> Optional[str]: + """ + Render a DataFrame to an XML document. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object or file-like object, optional + File to write output to. If None, the output is returned as a + string. + index : bool, default True + Whether to include index in XML document. + root_name : str, default 'data' + The name of root element in XML document. + row_name : str, default 'row' + The name of row element in XML document. + na_rep : str, optional + Missing data representation. + attr_cols : list-like, optional + List of columns to write as attributes in row element. + Hierarchical columns will be flattened with underscore + delimiting the different levels. + elem_cols : list-like, optional + List of columns to write as children in row element. By default, + all columns output as children of row element. Hierarchical + columns will be flattened with underscore delimiting the + different levels. + namespaces : dict, optional + All namespaces to be defined in root element. Keys of dict + should be prefix names and values of dict corresponding URIs. + Default namespaces should be given empty string key. For + example, :: + + namespaces = {{'': 'https://example.com'}} + + prefix : str, optional + Namespace prefix to be used for every element and/or attribute + in document. This should be one of the keys in ``namespaces`` + dict. + encoding : str, default 'utf-8' + Encoding of the resulting document. + xml_declaration : str, optional + Whether to include the XML declaration at start of document. + pretty_print : bool, default True + Whether output should be pretty printed with indentation and + line breaks. + parser : {{'lxml','etree'}}, default "lxml" + Parser module to use for building of tree. Only 'lxml' and + 'etree' are supported. With 'lxml', the ability to use XSLT + stylesheet is supported. + stylesheet : str, path object or file-like object, optional + A URL, file-like object, or a raw string containing an XSLT + script used to transform the raw XML output. Script should use + layout of elements and attributes from original output. This + argument requires ``lxml`` to be installed. Only XSLT 1.0 + scripts and not later versions is currently supported. + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer', then use + gzip, bz2, zip or xz if path_or_buffer is a string ending in + '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression + otherwise. If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. + {storage_options} + """ + + from pandas.io.formats.xml import ( + EtreeXMLFormatter, + LxmlXMLFormatter, + ) + + lxml = import_optional_dependency("lxml.etree", errors="ignore") + + TreeBuilder: Union[Type[EtreeXMLFormatter], Type[LxmlXMLFormatter]] + + if parser == "lxml": + if lxml is not None: + TreeBuilder = LxmlXMLFormatter + else: + raise ImportError( + "lxml not found, please install or use the etree parser." + ) + + elif parser == "etree": + TreeBuilder = EtreeXMLFormatter + + else: + raise ValueError("Values for parser can only be lxml or etree.") + + xml_formatter = TreeBuilder( + self.fmt, + path_or_buffer=path_or_buffer, + index=index, + root_name=root_name, + row_name=row_name, + na_rep=na_rep, + attr_cols=attr_cols, + elem_cols=elem_cols, + namespaces=namespaces, + prefix=prefix, + encoding=encoding, + xml_declaration=xml_declaration, + pretty_print=pretty_print, + stylesheet=stylesheet, + compression=compression, + storage_options=storage_options, + ) + + return xml_formatter.write_output() + def to_string( self, buf: Optional[FilePathOrBuffer[str]] = None, diff --git a/pandas/io/formats/xml.py b/pandas/io/formats/xml.py new file mode 100644 index 0000000000000..044b03ba83714 --- /dev/null +++ b/pandas/io/formats/xml.py @@ -0,0 +1,599 @@ +""" +:mod:`pandas.io.formats.xml` is a module for formatting data in XML. +""" + +import codecs +import io +from typing import ( + Any, + Dict, + List, + Optional, + Union, +) + +from pandas._typing import ( + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) +from pandas.errors import AbstractMethodError + +from pandas.core.dtypes.common import is_list_like + +from pandas.io.common import get_handle +from pandas.io.formats.format import DataFrameFormatter +from pandas.io.xml import ( + get_data_from_filepath, + preprocess_data, +) + + +class BaseXMLFormatter: + """ + Subclass for formatting data in XML. + + Parameters + ---------- + path_or_buffer : str or file-like + This can be either a string of raw XML, a valid URL, + file or file-like object. + + index : bool + Whether to include index in xml document. + + row_name : str + Name for root of xml document. Default is 'data'. + + root_name : str + Name for row elements of xml document. Default is 'row'. + + na_rep : str + Missing data representation. + + attrs_cols : list + List of columns to write as attributes in row element. + + elem_cols : list + List of columns to write as children in row element. + + namespacess : dict + The namespaces to define in XML document as dicts with key + being namespace and value the URI. + + prefix : str + The prefix for each element in XML document including root. + + encoding : str + Encoding of xml object or document. + + xml_declaration : bool + Whether to include xml declaration at top line item in xml. + + pretty_print : bool + Whether to write xml document with line breaks and indentation. + + stylesheet : str or file-like + A URL, file, file-like object, or a raw string containing XSLT. + + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + Compression type for on-the-fly decompression of on-disk data. + If 'infer', then use extension for gzip, bz2, zip or xz. + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, + e.g. host, port, username, password, etc., + + See also + -------- + pandas.io.formats.xml.EtreeXMLFormatter + pandas.io.formats.xml.LxmlXMLFormatter + + """ + + def __init__( + self, + formatter: DataFrameFormatter, + path_or_buffer: Optional[FilePathOrBuffer] = None, + index: Optional[bool] = True, + root_name: Optional[str] = "data", + row_name: Optional[str] = "row", + na_rep: Optional[str] = None, + attr_cols: Optional[List[str]] = None, + elem_cols: Optional[List[str]] = None, + namespaces: Optional[Dict[Optional[str], str]] = None, + prefix: Optional[str] = None, + encoding: str = "utf-8", + xml_declaration: Optional[bool] = True, + pretty_print: Optional[bool] = True, + stylesheet: Optional[FilePathOrBuffer] = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions = None, + ) -> None: + self.fmt = formatter + self.path_or_buffer = path_or_buffer + self.index = index + self.root_name = root_name + self.row_name = row_name + self.na_rep = na_rep + self.attr_cols = attr_cols + self.elem_cols = elem_cols + self.namespaces = namespaces + self.prefix = prefix + self.encoding = encoding + self.xml_declaration = xml_declaration + self.pretty_print = pretty_print + self.stylesheet = stylesheet + self.compression = compression + self.storage_options = storage_options + + self.frame = self.fmt.frame + self.orig_cols = self.fmt.frame.columns.tolist() + self.frame_dicts = self.process_dataframe() + + def build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + raise AbstractMethodError(self) + + def validate_columns(self) -> None: + """ + Validate elems_cols and attrs_cols. + + This method will check if columns is list-like. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + if self.attr_cols and not is_list_like(self.attr_cols): + raise TypeError( + f"{type(self.attr_cols).__name__} is not a valid type for attr_cols" + ) + + if self.elem_cols and not is_list_like(self.elem_cols): + raise TypeError( + f"{type(self.elem_cols).__name__} is not a valid type for elem_cols" + ) + + def validate_encoding(self) -> None: + """ + Validate encoding. + + This method will check if encoding is among listed under codecs. + + Raises + ------ + LookupError + * If encoding is not available in codecs. + """ + + codecs.lookup(self.encoding) + + def process_dataframe(self) -> Dict[Union[int, str], Dict[str, Any]]: + """ + Adjust Data Frame to fit xml output. + + This method will adjust underlying data frame for xml output, + including optionally replacing missing values and including indexes. + """ + + df = self.fmt.frame + + if self.index: + df = df.reset_index() + + if self.na_rep: + df = df.replace({None: self.na_rep, float("nan"): self.na_rep}) + + return df.to_dict(orient="index") + + def handle_indexes(self) -> None: + """ + Handle indexes. + + This method will add indexes into attr_cols or elem_cols. + """ + + indexes: List[str] = [ + x for x in self.frame_dicts[0].keys() if x not in self.orig_cols + ] + + if self.attr_cols and self.index: + self.attr_cols = indexes + self.attr_cols + + if self.elem_cols and self.index: + self.elem_cols = indexes + self.elem_cols + + def get_prefix_uri(self) -> str: + """ + Get uri of namespace prefix. + + This method retrieves corresponding URI to prefix in namespaces. + + Raises + ------ + KeyError + *If prefix is not included in namespace dict. + """ + + raise AbstractMethodError(self) + + def other_namespaces(self) -> dict: + """ + Define other namespaces. + + This method will build dictionary of namespaces attributes + for root element, conditionally with optional namespaces and + prefix. + """ + + nmsp_dict: Dict[str, str] = {} + if self.namespaces and self.prefix is None: + nmsp_dict = {"xmlns": n for p, n in self.namespaces.items() if p != ""} + + if self.namespaces and self.prefix: + nmsp_dict = {"xmlns": n for p, n in self.namespaces.items() if p == ""} + + return nmsp_dict + + def build_attribs(self) -> None: + """ + Create attributes of row. + + This method adds attributes using attr_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + + raise AbstractMethodError(self) + + def build_elems(self) -> None: + """ + Create child elements of row. + + This method adds child elements using elem_cols to row element and + works with tuples for multindex or hierarchical columns. + """ + + raise AbstractMethodError(self) + + def write_output(self) -> Optional[str]: + xml_doc = self.build_tree() + + out_str: Optional[str] + + if self.path_or_buffer is not None: + with get_handle( + self.path_or_buffer, + "wb", + compression=self.compression, + storage_options=self.storage_options, + is_text=False, + ) as handles: + handles.handle.write(xml_doc) # type: ignore[arg-type] + return None + + else: + return xml_doc.decode(self.encoding).rstrip() + + +class EtreeXMLFormatter(BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.validate_columns() + self.validate_encoding() + self.handle_indexes() + self.prefix_uri = self.get_prefix_uri() + + def build_tree(self) -> bytes: + from xml.etree.ElementTree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element( + f"{self.prefix_uri}{self.root_name}", attrib=self.other_namespaces() + ) + + for k, d in self.frame_dicts.items(): + self.d = d + self.elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(self.frame_dicts[0].keys()) + self.build_elems() + + else: + self.build_attribs() + self.build_elems() + + self.out_xml = tostring(self.root, method="xml", encoding=self.encoding) + + if self.pretty_print: + self.out_xml = self.prettify_tree() + + if not self.xml_declaration: + self.out_xml = self.remove_declaration() + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + return self.out_xml + + def get_prefix_uri(self) -> str: + from xml.etree.ElementTree import register_namespace + + uri = "" + if self.namespaces: + for p, n in self.namespaces.items(): + if isinstance(p, str) and isinstance(n, str): + register_namespace(p, n) + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + else: + uri = f'{{{self.namespaces[""]}}}' + + return uri + + def build_attribs(self) -> None: + if not self.attr_cols: + return + + for col in self.attr_cols: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join(str(c) for c in col).strip() + if "" in col + else "_".join(str(c) for c in col).strip() + ) + + attr_name = f"{self.prefix_uri}{flat_col}" + try: + val = ( + None + if self.d[col] is None or self.d[col] != self.d[col] + else str(self.d[col]) + ) + if val is not None: + self.elem_row.attrib[attr_name] = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + def build_elems(self) -> None: + from xml.etree.ElementTree import SubElement + + if not self.elem_cols: + return + + for col in self.elem_cols: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join(str(c) for c in col).strip() + if "" in col + else "_".join(str(c) for c in col).strip() + ) + + elem_name = f"{self.prefix_uri}{flat_col}" + try: + val = ( + None + if self.d[col] in [None, ""] or self.d[col] != self.d[col] + else str(self.d[col]) + ) + SubElement(self.elem_row, elem_name).text = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + def prettify_tree(self) -> bytes: + """ + Output tree for pretty print format. + + This method will pretty print xml with line breaks and indentation. + """ + + from xml.dom.minidom import parseString + + dom = parseString(self.out_xml) + + return dom.toprettyxml(indent=" ", encoding=self.encoding) + + def remove_declaration(self) -> bytes: + """ + Remove xml declaration. + + This method will remove xml declaration of working tree. Currently, + pretty_print is not supported in etree. + """ + + return self.out_xml.split(b"?>")[-1].strip() + + +class LxmlXMLFormatter(BaseXMLFormatter): + """ + Class for formatting data in xml using Python standard library + modules: `xml.etree.ElementTree` and `xml.dom.minidom`. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self.validate_columns() + self.validate_encoding() + self.prefix_uri = self.get_prefix_uri() + + self.convert_empty_str_key() + self.handle_indexes() + + def build_tree(self) -> bytes: + """ + Build tree from data. + + This method initializes the root and builds attributes and elements + with optional namespaces. + """ + from lxml.etree import ( + Element, + SubElement, + tostring, + ) + + self.root = Element(f"{self.prefix_uri}{self.root_name}", nsmap=self.namespaces) + + for k, d in self.frame_dicts.items(): + self.d = d + self.elem_row = SubElement(self.root, f"{self.prefix_uri}{self.row_name}") + + if not self.attr_cols and not self.elem_cols: + self.elem_cols = list(self.frame_dicts[0].keys()) + self.build_elems() + + else: + self.build_attribs() + self.build_elems() + + self.out_xml = tostring( + self.root, + pretty_print=self.pretty_print, + method="xml", + encoding=self.encoding, + xml_declaration=self.xml_declaration, + ) + + if self.stylesheet is not None: + self.out_xml = self.transform_doc() + + return self.out_xml + + def convert_empty_str_key(self) -> None: + """ + Replace zero-lengh string in `namespaces`. + + This method will replce '' with None to align to `lxml` + requirement that empty string prefixes are not allowed. + """ + + if self.namespaces and "" in self.namespaces.keys(): + self.namespaces[None] = self.namespaces.pop("", "default") + + def get_prefix_uri(self) -> str: + uri = "" + if self.namespaces: + if self.prefix: + try: + uri = f"{{{self.namespaces[self.prefix]}}}" + except KeyError: + raise KeyError(f"{self.prefix} is not included in namespaces") + else: + uri = f'{{{self.namespaces[""]}}}' + + return uri + + def build_attribs(self) -> None: + if not self.attr_cols: + return + + for col in self.attr_cols: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join(str(c) for c in col).strip() + if "" in col + else "_".join(str(c) for c in col).strip() + ) + + attr_name = f"{self.prefix_uri}{flat_col}" + try: + val = ( + None + if self.d[col] is None or self.d[col] != self.d[col] + else str(self.d[col]) + ) + if val is not None: + self.elem_row.attrib[attr_name] = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + def build_elems(self) -> None: + from lxml.etree import SubElement + + if not self.elem_cols: + return + + for col in self.elem_cols: + flat_col = col + if isinstance(col, tuple): + flat_col = ( + "".join(str(c) for c in col).strip() + if "" in col + else "_".join(str(c) for c in col).strip() + ) + + elem_name = f"{self.prefix_uri}{flat_col}" + try: + val = ( + None + if self.d[col] in [None, ""] or self.d[col] != self.d[col] + else str(self.d[col]) + ) + SubElement(self.elem_row, elem_name).text = val + except KeyError: + raise KeyError(f"no valid column, {col}") + + def transform_doc(self) -> bytes: + """ + Parse stylesheet from file or buffer and run it. + + This method will parse stylesheet object into tree for parsing + conditionally by its specific object type, then transforms + original tree with XSLT script. + """ + + from lxml.etree import ( + XSLT, + XMLParser, + fromstring, + parse, + ) + + style_doc = self.stylesheet + + handle_data = get_data_from_filepath( + filepath_or_buffer=style_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + xsl_doc = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + xsl_doc = parse(xml_data, parser=curr_parser) + + transformer = XSLT(xsl_doc) + new_doc = transformer(self.root) + + return bytes(new_doc) diff --git a/pandas/io/xml.py b/pandas/io/xml.py new file mode 100644 index 0000000000000..83eba5f17c7b3 --- /dev/null +++ b/pandas/io/xml.py @@ -0,0 +1,944 @@ +""" +:mod:`pandas.io.xml` is a module for reading XML. +""" + +import io +from typing import ( + Dict, + List, + Optional, + Union, +) + +from pandas._typing import ( + Buffer, + CompressionOptions, + FilePathOrBuffer, + StorageOptions, +) +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + AbstractMethodError, + ParserError, +) +from pandas.util._decorators import doc + +from pandas.core.dtypes.common import is_list_like + +from pandas.core.frame import DataFrame +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + file_exists, + get_handle, + is_fsspec_url, + is_url, + stringify_path, +) +from pandas.io.parsers import TextParser + + +class _XMLFrameParser: + """ + Internal subclass to parse XML into DataFrames. + + Parameters + ---------- + path_or_buffer : a valid JSON str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, and file. + + xpath : str or regex + The XPath expression to parse required set of nodes for + migration to `Data Frame`. `etree` supports limited XPath. + + namespacess : dict + The namespaces defined in XML document (`xmlns:namespace='URI') + as dicts with key being namespace and value the URI. + + elems_only : bool + Parse only the child elements at the specified `xpath`. + + attrs_only : bool + Parse only the attributes at the specified `xpath`. + + names : list + Column names for Data Frame of parsed XML data. + + encoding : str + Encoding of xml object or document. + + stylesheet : str or file-like + URL, file, file-like object, or a raw string containing XSLT, + `etree` does not support XSLT but retained for consistency. + + compression : {'infer', 'gzip', 'bz2', 'zip', 'xz', None}, default 'infer' + Compression type for on-the-fly decompression of on-disk data. + If 'infer', then use extension for gzip, bz2, zip or xz. + + storage_options : dict, optional + Extra options that make sense for a particular storage connection, + e.g. host, port, username, password, etc., + + See also + -------- + pandas.io.xml._EtreeFrameParser + pandas.io.xml._LxmlFrameParser + + Notes + ----- + To subclass this class effectively you must override the following methods:` + * :func:`parse_data` + * :func:`_parse_nodes` + * :func:`_parse_doc` + * :func:`_validate_names` + * :func:`_validate_path` + + + See each method's respective documentation for details on their + functionality. + """ + + def __init__( + self, + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + encoding, + stylesheet, + compression, + storage_options, + ): + self.path_or_buffer = path_or_buffer + self.xpath = xpath + self.namespaces = namespaces + self.elems_only = elems_only + self.attrs_only = attrs_only + self.names = names + self.encoding = encoding + self.stylesheet = stylesheet + self.is_style = None + self.compression = compression + self.storage_options = storage_options + + def parse_data(self) -> List[Dict[str, Optional[str]]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate xpath, names, parse and return specific nodes. + """ + + raise AbstractMethodError(self) + + def _parse_nodes(self) -> List[Dict[str, Optional[str]]]: + """ + Parse xml nodes. + + This method will parse the children and attributes of elements + in xpath, conditionally for only elements, only attributes + or both while optionally renaming node names. + + Raises + ------ + ValueError + * If only elements and only attributes are specified. + + Notes + ----- + Namespace URIs will be removed from return node values.Also, + elements with missing children or attributes compared to siblings + will have optional keys filled withi None values. + """ + + raise AbstractMethodError(self) + + def _validate_path(self) -> None: + """ + Validate xpath. + + This method checks for syntax, evaluation, or empty nodes return. + + Raises + ------ + SyntaxError + * If xpah is not supported or issues with namespaces. + + ValueError + * If xpah does not return any nodes. + """ + + raise AbstractMethodError(self) + + def _validate_names(self) -> None: + """ + Validate names. + + This method will check if names is a list-like and aligns + with length of parse nodes. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + raise AbstractMethodError(self) + + def _parse_doc(self): + """ + Build tree from io. + + This method will parse io object into tree for parsing + conditionally by its specific object type. + """ + + raise AbstractMethodError(self) + + +class _EtreeFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into DataFrames with the Python + standard library XML module: `xml.etree.ElementTree`. + """ + + from xml.etree.ElementTree import ( + Element, + ElementTree, + ) + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def parse_data(self) -> List[Dict[str, Optional[str]]]: + + if self.stylesheet is not None: + raise ValueError( + "To use stylesheet, you need lxml installed and selected as parser." + ) + + self.xml_doc = self._parse_doc() + + self._validate_path() + self._validate_names() + + return self._parse_nodes() + + def _parse_nodes(self) -> List[Dict[str, Optional[str]]]: + elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces) + dicts: List[Dict[str, Optional[str]]] + + if self.elems_only and self.attrs_only: + raise ValueError("Either element or attributes can be parsed not both.") + elif self.elems_only: + if self.names: + dicts = [ + { + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text.strip() if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + else: + dicts = [ + { + ch.tag: ch.text.strip() if ch.text else None + for ch in el.findall("*") + } + for el in elems + ] + + elif self.attrs_only: + dicts = [ + {k: v.strip() if v else None for k, v in el.attrib.items()} + for el in elems + ] + + else: + if self.names: + dicts = [ + { + **el.attrib, + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text.strip() if ch.text else None + for nm, ch in zip(self.names, el.findall("*")) + }, + } + for el in elems + ] + + else: + dicts = [ + { + **el.attrib, + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + ch.tag: ch.text.strip() if ch.text else None + for ch in el.findall("*") + }, + } + for el in elems + ] + + dicts = [ + {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} for d in dicts + ] + + keys = list(dict.fromkeys([k for d in dicts for k in d.keys()])) + dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts] + + if self.names: + dicts = [ + {nm: v for nm, (k, v) in zip(self.names, d.items())} for d in dicts + ] + + return dicts + + def _validate_path(self) -> None: + """ + Notes + ----- + `etree` supports limited XPath. If user attempts a more complex + expression syntax error will raise. + """ + + msg = ( + "xpath does not return any nodes. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + try: + elems = self.xml_doc.find(self.xpath, namespaces=self.namespaces) + if elems is None: + raise ValueError(msg) + + if elems is not None and elems.find("*") is None and elems.attrib is None: + raise ValueError(msg) + + except (KeyError, SyntaxError): + raise SyntaxError( + "You have used an incorrect or unsupported XPath " + "expression for etree library or you used an " + "undeclared namespace prefix." + ) + + def _validate_names(self) -> None: + if self.names: + parent = self.xml_doc.find(self.xpath, namespaces=self.namespaces) + children = parent.findall("*") if parent else [] + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc(self) -> Union[Element, ElementTree]: + from xml.etree.ElementTree import ( + XMLParser, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=self.path_or_buffer, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + r = parse(xml_data, parser=curr_parser) + + return r + + +class _LxmlFrameParser(_XMLFrameParser): + """ + Internal class to parse XML into DataFrames with third-party + full-featured XML library, `lxml`, that supports + XPath 1.0 and XSLT 1.0. + """ + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def parse_data(self) -> List[Dict[str, Optional[str]]]: + """ + Parse xml data. + + This method will call the other internal methods to + validate xpath, names, optionally parse and run XSLT, + and parse original or transformed XML and return specific nodes. + """ + + self.xml_doc = self._parse_doc(self.path_or_buffer) + + if self.stylesheet is not None: + self.xsl_doc = self._parse_doc(self.stylesheet) + self.xml_doc = self._transform_doc() + + self._validate_path() + self._validate_names() + + return self._parse_nodes() + + def _parse_nodes(self) -> List[Dict[str, Optional[str]]]: + elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces) + dicts: List[Dict[str, Optional[str]]] + + if self.elems_only and self.attrs_only: + raise ValueError("Either element or attributes can be parsed not both.") + + elif self.elems_only: + if self.names: + dicts = [ + { + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text.strip() if ch.text else None + for nm, ch in zip(self.names, el.xpath("*")) + }, + } + for el in elems + ] + else: + dicts = [ + { + ch.tag: ch.text.strip() if ch.text else None + for ch in el.xpath("*") + } + for el in elems + ] + + elif self.attrs_only: + dicts = [el.attrib for el in elems] + + else: + if self.names: + dicts = [ + { + **el.attrib, + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + nm: ch.text.strip() if ch.text else None + for nm, ch in zip(self.names, el.xpath("*")) + }, + } + for el in elems + ] + else: + dicts = [ + { + **el.attrib, + **( + {el.tag: el.text.strip()} + if el.text and not el.text.isspace() + else {} + ), + **{ + ch.tag: ch.text.strip() if ch.text else None + for ch in el.xpath("*") + }, + } + for el in elems + ] + + if self.namespaces or "}" in list(dicts[0].keys())[0]: + dicts = [ + {k.split("}")[1] if "}" in k else k: v for k, v in d.items()} + for d in dicts + ] + + keys = list(dict.fromkeys([k for d in dicts for k in d.keys()])) + dicts = [{k: d[k] if k in d.keys() else None for k in keys} for d in dicts] + + if self.names: + dicts = [ + {nm: v for nm, (k, v) in zip(self.names, d.items())} for d in dicts + ] + + return dicts + + def _transform_doc(self): + """ + Transform original tree using stylesheet. + + This method will transform original xml using XSLT script into + am ideally flatter xml document for easier parsing and migration + to Data Frame. + """ + from lxml.etree import XSLT + + transformer = XSLT(self.xsl_doc) + new_doc = transformer(self.xml_doc) + + return new_doc + + def _validate_path(self) -> None: + + msg = ( + "xpath does not return any nodes. " + "Be sure row level nodes are in xpath. " + "If document uses namespaces denoted with " + "xmlns, be sure to define namespaces and " + "use them in xpath." + ) + + elems = self.xml_doc.xpath(self.xpath, namespaces=self.namespaces) + children = self.xml_doc.xpath(self.xpath + "/*", namespaces=self.namespaces) + attrs = self.xml_doc.xpath(self.xpath + "/@*", namespaces=self.namespaces) + + if elems == []: + raise ValueError(msg) + + if elems != [] and attrs == [] and children == []: + raise ValueError(msg) + + def _validate_names(self) -> None: + """ + Validate names. + + This method will check if names is a list and aligns with + length of parse nodes. + + Raises + ------ + ValueError + * If value is not a list and less then length of nodes. + """ + if self.names: + children = self.xml_doc.xpath( + self.xpath + "[1]/*", namespaces=self.namespaces + ) + + if is_list_like(self.names): + if len(self.names) < len(children): + raise ValueError( + "names does not match length of child elements in xpath." + ) + else: + raise TypeError( + f"{type(self.names).__name__} is not a valid type for names" + ) + + def _parse_doc(self, raw_doc): + from lxml.etree import ( + XMLParser, + fromstring, + parse, + ) + + handle_data = get_data_from_filepath( + filepath_or_buffer=raw_doc, + encoding=self.encoding, + compression=self.compression, + storage_options=self.storage_options, + ) + + with preprocess_data(handle_data) as xml_data: + curr_parser = XMLParser(encoding=self.encoding) + + if isinstance(xml_data, io.StringIO): + doc = fromstring( + xml_data.getvalue().encode(self.encoding), parser=curr_parser + ) + else: + doc = parse(xml_data, parser=curr_parser) + + return doc + + +def get_data_from_filepath( + filepath_or_buffer, + encoding, + compression, + storage_options, +) -> Union[str, bytes, Buffer]: + """ + Extract raw XML data. + + The method accepts three input types: + 1. filepath (string-like) + 2. file-like object (e.g. open file object, StringIO) + 3. XML string or bytes + + This method turns (1) into (2) to simplify the rest of the processing. + It returns input types (2) and (3) unchanged. + """ + filepath_or_buffer = stringify_path(filepath_or_buffer) + + if ( + isinstance(filepath_or_buffer, str) + and not filepath_or_buffer.startswith((" Union[io.StringIO, io.BytesIO]: + """ + Convert extracted raw data. + + This method will return underlying data of extracted XML content. + The data either has a `read` attribute (e.g. a file object or a + StringIO/BytesIO) or is a string or bytes that is an XML document. + """ + + if isinstance(data, str): + data = io.StringIO(data) + + elif isinstance(data, bytes): + data = io.BytesIO(data) + + return data + + +def _data_to_frame(data, **kwargs) -> DataFrame: + """ + Convert parsed data to Data Frame. + + This method will bind xml dictionary data of keys and values + into named columns of Data Frame using the built-in TextParser + class that build Data Frame and infers specific dtypes. + """ + + tags = next(iter(data)) + nodes = [list(d.values()) for d in data] + + try: + with TextParser(nodes, names=tags, **kwargs) as tp: + return tp.read() + except ParserError: + raise ParserError( + "XML document may be too complex for import. " + "Try to flatten document and use distinct " + "element and attribute names." + ) + + +def _parse( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + encoding, + parser, + stylesheet, + compression, + storage_options, + **kwargs, +) -> DataFrame: + """ + Call internal parsers. + + This method will conditionally call internal parsers: + LxmlFrameParser and/or EtreeParser. + + Raises + ------ + ImportError + * If lxml is not installed if selected as parser. + + ValueError + * If parser is not lxml or etree. + """ + + lxml = import_optional_dependency("lxml.etree", errors="ignore") + p: Union[_EtreeFrameParser, _LxmlFrameParser] + + if parser == "lxml": + if lxml is not None: + p = _LxmlFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + encoding, + stylesheet, + compression, + storage_options, + ) + else: + raise ImportError("lxml not found, please install or use the etree parser.") + + elif parser == "etree": + p = _EtreeFrameParser( + path_or_buffer, + xpath, + namespaces, + elems_only, + attrs_only, + names, + encoding, + stylesheet, + compression, + storage_options, + ) + else: + raise ValueError("Values for parser can only be lxml or etree.") + + data_dicts = p.parse_data() + + return _data_to_frame(data=data_dicts, **kwargs) + + +@doc(storage_options=_shared_docs["storage_options"]) +def read_xml( + path_or_buffer: FilePathOrBuffer, + xpath: Optional[str] = "./*", + namespaces: Optional[Union[dict, List[dict]]] = None, + elems_only: Optional[bool] = False, + attrs_only: Optional[bool] = False, + names: Optional[List[str]] = None, + encoding: Optional[str] = "utf-8", + parser: Optional[str] = "lxml", + stylesheet: Optional[FilePathOrBuffer] = None, + compression: CompressionOptions = "infer", + storage_options: StorageOptions = None, +) -> DataFrame: + r""" + Read XML document into a ``DataFrame`` object. + + .. versionadded:: 1.3.0 + + Parameters + ---------- + path_or_buffer : str, path object, or file-like object + Any valid XML string or path is acceptable. The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. + + xpath : str, optional, default './\*' + The XPath to parse required set of nodes for migration to DataFrame. + XPath should return a collection of elements and not a single + element. Note: The ``etree`` parser supports limited XPath + expressions. For more complex XPath, use ``lxml`` which requires + installation. + + namespaces : dict, optional + The namespaces defined in XML document as dicts with key being + namespace prefix and value the URI. There is no need to include all + namespaces in XML, only the ones used in ``xpath`` expression. + Note: if XML document uses default namespace denoted as + `xmlns=''` without a prefix, you must assign any temporary + namespace prefix such as 'doc' to the URI in order to parse + underlying nodes and/or attributes. For example, :: + + namespaces = {{"doc": "https://example.com"}} + + elems_only : bool, optional, default False + Parse only the child elements at the specified ``xpath``. By default, + all child elements and non-empty text nodes are returned. + + attrs_only : bool, optional, default False + Parse only the attributes at the specified ``xpath``. + By default, all attributes are returned. + + names : list-like, optional + Column names for DataFrame of parsed XML data. Use this parameter to + rename original element names and distinguish same named elements. + + encoding : str, optional, default 'utf-8' + Encoding of XML document. + + parser : {{'lxml','etree'}}, default 'lxml' + Parser module to use for retrieval of data. Only 'lxml' and + 'etree' are supported. With 'lxml' more complex XPath searches + and ability to use XSLT stylesheet are supported. + + stylesheet : str, path object or file-like object + A URL, file-like object, or a raw string containing an XSLT script. + This stylesheet should flatten complex, deeply nested XML documents + for easier parsing. To use this feature you must have ``lxml`` module + installed and specify 'lxml' as ``parser``. The ``xpath`` must + reference nodes of transformed XML document generated after XSLT + transformation and not the original XML document. Only XSLT 1.0 + scripts and not later versions is currently supported. + + compression : {{'infer', 'gzip', 'bz2', 'zip', 'xz', None}}, default 'infer' + For on-the-fly decompression of on-disk data. If 'infer', then use + gzip, bz2, zip or xz if path_or_buffer is a string ending in + '.gz', '.bz2', '.zip', or 'xz', respectively, and no decompression + otherwise. If using 'zip', the ZIP file must contain only one data + file to be read in. Set to None for no decompression. + + {storage_options} + + Returns + ------- + df + A DataFrame. + + See Also + -------- + read_json : Convert a JSON string to pandas object. + read_html : Read HTML tables into a list of DataFrame objects. + + Notes + ----- + This method is best designed to import shallow XML documents in + following format which is the ideal fit for the two-dimensions of a + ``DataFrame`` (row by column). :: + + + + data + data + data + ... + + + ... + + ... + + + As a file format, XML documents can be designed any way including + layout of elements and attributes as long as it conforms to W3C + specifications. Therefore, this method is a convenience handler for + a specific flatter design and not all possible XML structures. + + However, for more complex XML documents, ``stylesheet`` allows you to + temporarily redesign original document with XSLT (a special purpose + language) for a flatter version for migration to a DataFrame. + + This function will *always* return a single :class:`DataFrame` or raise + exceptions due to issues with XML document, ``xpath``, or other + parameters. + + Examples + -------- + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(xml) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... + ... + ... ''' + + >>> df = pd.read_xml(xml, xpath=".//row") + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + + >>> xml = ''' + ... + ... + ... square + ... 360 + ... 4.0 + ... + ... + ... circle + ... 360 + ... + ... + ... + ... triangle + ... 180 + ... 3.0 + ... + ... ''' + + >>> df = pd.read_xml(xml, + ... xpath="//doc:row", + ... namespaces={{"doc": "https://example.com"}}) + >>> df + shape degrees sides + 0 square 360 4.0 + 1 circle 360 NaN + 2 triangle 180 3.0 + """ + + return _parse( + path_or_buffer=path_or_buffer, + xpath=xpath, + namespaces=namespaces, + elems_only=elems_only, + attrs_only=attrs_only, + names=names, + encoding=encoding, + parser=parser, + stylesheet=stylesheet, + compression=compression, + storage_options=storage_options, + ) diff --git a/pandas/tests/api/test_api.py b/pandas/tests/api/test_api.py index 541c2988a0636..fd1c19219c4bf 100644 --- a/pandas/tests/api/test_api.py +++ b/pandas/tests/api/test_api.py @@ -159,6 +159,7 @@ class TestPDApi(Base): "read_gbq", "read_hdf", "read_html", + "read_xml", "read_json", "read_pickle", "read_sas", diff --git a/pandas/tests/io/data/xml/baby_names.xml b/pandas/tests/io/data/xml/baby_names.xml new file mode 100644 index 0000000000000..b4797b79d7112 --- /dev/null +++ b/pandas/tests/io/data/xml/baby_names.xml @@ -0,0 +1,53 @@ + + + + 1 + Jos� + Sof�a + + + 2 + Luis + Valentina + + + 3 + Carlos + Isabella + + + 4 + Juan + Camila + + + 5 + Jorge + Valeria + + + 6 + Pedro + Mariana + + + 7 + Jes�s + Gabriela + + + 8 + Manuel + Sara + + + 9 + Santiago + Daniella + + + 10 + Sebasti�n + Mar�a Jos� + + diff --git a/pandas/tests/io/data/xml/books.xml b/pandas/tests/io/data/xml/books.xml new file mode 100644 index 0000000000000..666ce60e9a2be --- /dev/null +++ b/pandas/tests/io/data/xml/books.xml @@ -0,0 +1,21 @@ + + + + Everyday Italian + Giada De Laurentiis + 2005 + 30.00 + + + Harry Potter + J K. Rowling + 2005 + 29.99 + + + Learning XML + Erik T. Ray + 2003 + 39.95 + + diff --git a/pandas/tests/io/data/xml/cta_rail_lines.kml b/pandas/tests/io/data/xml/cta_rail_lines.kml new file mode 100644 index 0000000000000..c031137ee7b20 --- /dev/null +++ b/pandas/tests/io/data/xml/cta_rail_lines.kml @@ -0,0 +1,92 @@ + + + CTA_RailLines + + + CTA_RailLines + + + Blue Line (Forest Park) + +
Blue Line (Forest Park)
OBJECTID_1 1
ASSET_ID 21100001
LINES Blue Line (Forest Park)
DESCRIPTIO Oak Park to Austin
TYPE Elevated or at Grade
LEGEND BL
ALT_LEGEND BL
BRANCH Blue Line Forest Park
SHAPE.LEN 4060.368778
]]>
+ #LineStyle01 + + + 0 + clampedToGround + -87.77678526964958,41.8708863930319,0 -87.77826234150609,41.87097820122218,0 -87.78251583439344,41.87130129991005,0 -87.78418294588424,41.87145055520308,0 -87.7872369165933,41.8717239119163,0 -87.79160214925886,41.87210797280065,0 + + +
+ + Red, Purple Line + +
Red, Purple Line
OBJECTID_1 2
ASSET_ID 21100002
LINES Red, Purple Line
DESCRIPTIO Lawrence to Wilson
TYPE Elevated or at Grade
LEGEND RD
ALT_LEGEND RDPR
BRANCH Red Line North Side
SHAPE.LEN 1800.132896
]]>
+ #LineStyle01 + + + 0 + clampedToGround + -87.65758750947528,41.96427269188822,0 -87.65802133507393,41.96581929055245,0 -87.65819033925305,41.96621846093642,0 -87.6583189819129,41.96650362897086,0 -87.65835858701473,41.96669002089185,0 -87.65838428411853,41.96688150295095,0 -87.65842208882658,41.96745896091846,0 -87.65846556843937,41.9683761425439,0 -87.65849296214573,41.96913893870342,0 + + +
+ + Red, Purple Line + +
Red, Purple Line
OBJECTID_1 3
ASSET_ID 21100003
LINES Red, Purple Line
DESCRIPTIO Wilson to Sheridan
TYPE Elevated or at Grade
LEGEND RD
ALT_LEGEND RDPR
BRANCH Red Line North Side
SHAPE.LEN 4256.243677
]]>
+ #LineStyle01 + + + 0 + clampedToGround + -87.65492939166126,41.95377494531437,0 -87.65557043199591,41.95376544118533,0 -87.65606302030132,41.95376391658746,0 -87.65623502146268,41.95377379126367,0 -87.65634748981634,41.95380103566435,0 -87.65646537904269,41.95387703994676,0 -87.65656532461145,41.95396622645799,0 -87.65664760856414,41.95404201996044,0 -87.65671750555913,41.95416647054043,0 -87.65673983607117,41.95429949810849,0 -87.65673866475777,41.95441024240925,0 -87.6567690255541,41.95490657227902,0 -87.65683672482363,41.95692259283837,0 -87.6568900886376,41.95861070983142,0 -87.65699865558875,41.96181418669004,0 -87.65756347177603,41.96397045777844,0 -87.65758750947528,41.96427269188822,0 + + +
+ + Red, Purple Line + +
Red, Purple Line
OBJECTID_1 4
ASSET_ID 21100004
LINES Red, Purple Line
DESCRIPTIO Sheridan to Addison
TYPE Elevated or at Grade
LEGEND RD
ALT_LEGEND RDPR
BRANCH Red Line North Side
SHAPE.LEN 2581.713736
]]>
+ #LineStyle01 + + + 0 + clampedToGround + -87.65362593118043,41.94742799535678,0 -87.65363554415794,41.94819886386848,0 -87.6536456393239,41.95059994675451,0 -87.65365831235026,41.95108288489359,0 -87.6536604873874,41.9519954657554,0 -87.65362592053201,41.95245597302328,0 -87.65367158496069,41.95311153649393,0 -87.65368468595476,41.9533202828916,0 -87.65369271253692,41.95343095587119,0 -87.65373335834569,41.95351536301472,0 -87.65378605844126,41.95358212680591,0 -87.65385067928185,41.95364452823767,0 -87.6539390793817,41.95370263886964,0 -87.6540786298351,41.95373403675265,0 -87.65430648647626,41.9537535411832,0 -87.65492939166126,41.95377494531437,0 + + +
+ + Red, Purple Line + +
Red, Purple Line
OBJECTID_1 5
ASSET_ID 21100005
LINES Red, Purple Line
DESCRIPTIO Addison to Clark Junction
TYPE Elevated or at Grade
LEGEND RD
ALT_LEGEND RDPR
BRANCH Red Line North Side
SHAPE.LEN 1918.716686
]]>
+ #LineStyle01 + + + 0 + clampedToGround + -87.65345391792157,41.94217681262115,0 -87.65342448305786,41.94237224420864,0 -87.65339745703922,41.94268217746244,0 -87.65337753982941,41.94288140770284,0 -87.65336256753105,41.94317369618263,0 -87.65338799707138,41.94357253961736,0 -87.65340240886648,41.94389158188269,0 -87.65341837392448,41.94406444407721,0 -87.65342275247338,41.94421065714904,0 -87.65347469646018,41.94434829382345,0 -87.65351486483024,41.94447699917548,0 -87.65353483605053,41.9453896864472,0 -87.65361975532807,41.94689193720703,0 -87.65362593118043,41.94742799535678,0 + + +
+
+ +
+
diff --git a/pandas/tests/io/data/xml/flatten_doc.xsl b/pandas/tests/io/data/xml/flatten_doc.xsl new file mode 100644 index 0000000000000..a9d62d180beaf --- /dev/null +++ b/pandas/tests/io/data/xml/flatten_doc.xsl @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + diff --git a/pandas/tests/io/data/xml/row_field_output.xsl b/pandas/tests/io/data/xml/row_field_output.xsl new file mode 100644 index 0000000000000..5a0f0e655a78e --- /dev/null +++ b/pandas/tests/io/data/xml/row_field_output.xsl @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + diff --git a/pandas/tests/io/xml/test_to_xml.py b/pandas/tests/io/xml/test_to_xml.py new file mode 100644 index 0000000000000..2026035a23370 --- /dev/null +++ b/pandas/tests/io/xml/test_to_xml.py @@ -0,0 +1,1299 @@ +from io import ( + BytesIO, + StringIO, +) +import os +import sys +from typing import Union + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.common import get_handle +from pandas.io.xml import read_xml + +""" +CHECKLIST + +[x] - ValueError: "Values for parser can only be lxml or etree." + +etree +[x] - ImportError: "lxml not found, please install or use the etree parser." +[X] - TypeError: "...is not a valid type for attr_cols" +[X] - TypeError: "...is not a valid type for elem_cols" +[X] - LookupError: "unknown encoding" +[X] - KeyError: "...is not included in namespaces" +[X] - KeyError: "no valid column" +[X] - ValueError: "To use stylesheet, you need lxml installed..." +[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +[X] - FileNotFoundError: "No such file or directory" +[X] - PermissionError: "Forbidden" + +lxml +[X] - TypeError: "...is not a valid type for attr_cols" +[X] - TypeError: "...is not a valid type for elem_cols" +[X] - LookupError: "unknown encoding" +[] - OSError: (NEED PERMISSOIN ISSUE, DISK FULL, ETC.) +[X] - FileNotFoundError: "No such file or directory" +[X] - KeyError: "...is not included in namespaces" +[X] - KeyError: "no valid column" +[X] - ValueError: "stylesheet is not a url, file, or xml string." +[] - LookupError: (NEED WRONG ENCODING FOR FILE OUTPUT) +[] - URLError: (USUALLY DUE TO NETWORKING) +[] - HTTPError: (NEED AN ONLINE STYLESHEET) +[X] - OSError: "failed to load external entity" +[X] - XMLSyntaxError: "Opening and ending tag mismatch" +[X] - XSLTApplyError: "Cannot resolve URI" +[X] - XSLTParseError: "failed to compile" +[X] - PermissionError: "Forbidden" +""" + +geom_df = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } +) + +planet_df = DataFrame( + { + "planet": [ + "Mercury", + "Venus", + "Earth", + "Mars", + "Jupiter", + "Saturn", + "Uranus", + "Neptune", + ], + "type": [ + "terrestrial", + "terrestrial", + "terrestrial", + "terrestrial", + "gas giant", + "gas giant", + "ice giant", + "ice giant", + ], + "location": [ + "inner", + "inner", + "inner", + "inner", + "outer", + "outer", + "outer", + "outer", + ], + "mass": [ + 0.330114, + 4.86747, + 5.97237, + 0.641712, + 1898.187, + 568.3174, + 86.8127, + 102.4126, + ], + } +) + +from_file_expected = """\ + + + + 0 + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + 1 + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + 2 + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + +def equalize_decl(doc): + # etree and lxml differ on quotes and case in xml declaration + if doc is not None: + doc = doc.replace( + ' + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml(filename, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml(path, index=False, parser=parser) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +def test_index_false_rename_row_root(datapath, parser): + expected = """\ + + + + cooking + Everyday Italian + Giada De Laurentiis + 2005 + 30.0 + + + children + Harry Potter + J K. Rowling + 2005 + 29.99 + + + web + Learning XML + Erik T. Ray + 2003 + 39.95 + +""" + + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml(filename, parser=parser) + + with tm.ensure_clean("test.xml") as path: + df_file.to_xml( + path, index=False, root_name="books", row_name="book", parser=parser + ) + with open(path, "rb") as f: + output = f.read().decode("utf-8").strip() + + output = equalize_decl(output) + + assert output == expected + + +# NA_REP + +na_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +def test_na_elem_output(datapath, parser): + output = geom_df.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_str_elem_option(datapath, parser): + output = geom_df.to_xml(na_rep="", parser=parser) + output = equalize_decl(output) + + assert output == na_expected + + +def test_na_empty_elem_option(datapath, parser): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + 0.0 + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(na_rep="0.0", parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# ATTR_COLS + + +@pytest.mark.skipif( + sys.version_info < (3, 8), + reason=("etree alpha ordered attributes <= py3.7"), +) +def test_attrs_cols_nan_output(datapath, parser): + expected = """\ + + + + + +""" + + output = geom_df.to_xml(attr_cols=["shape", "degrees", "sides"], parser=parser) + output = equalize_decl(output) + + assert output == expected + + +@pytest.mark.skipif( + sys.version_info < (3, 8), + reason=("etree alpha ordered attributes <= py3.7"), +) +def test_attrs_cols_prefix(datapath, parser): + expected = """\ + + + + + +""" + + output = geom_df.to_xml( + attr_cols=["index", "shape", "degrees", "sides"], + namespaces={"doc": "http://example.xom"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + assert output == expected + + +def test_attrs_unknown_column(parser): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(attr_cols=["shape", "degreees", "sides"], parser=parser) + + +def test_attrs_wrong_type(parser): + with pytest.raises(TypeError, match=("is not a valid type for attr_cols")): + geom_df.to_xml(attr_cols='"shape", "degreees", "sides"', parser=parser) + + +# ELEM_COLS + + +def test_elems_cols_nan_output(datapath, parser): + elems_cols_expected = """\ + + + + 360 + 4.0 + square + + + 360 + + circle + + + 180 + 3.0 + triangle + +""" + + output = geom_df.to_xml( + index=False, elem_cols=["degrees", "sides", "shape"], parser=parser + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +def test_elems_unknown_column(parser): + with pytest.raises(KeyError, match=("no valid column")): + geom_df.to_xml(elem_cols=["shape", "degreees", "sides"], parser=parser) + + +def test_elems_wrong_type(parser): + with pytest.raises(TypeError, match=("is not a valid type for elem_cols")): + geom_df.to_xml(elem_cols='"shape", "degreees", "sides"', parser=parser) + + +def test_elems_and_attrs_cols(datapath, parser): + elems_cols_expected = """\ + + + + 360 + 4.0 + + + 360 + + + + 180 + 3.0 + +""" + + output = geom_df.to_xml( + index=False, + elem_cols=["degrees", "sides"], + attr_cols=["shape"], + parser=parser, + ) + output = equalize_decl(output) + + assert output == elems_cols_expected + + +# HIERARCHICAL COLUMNS + + +def test_hierarchical_columns(datapath, parser): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + + + All + + 8 + 2667.54 + 333.44 + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +@pytest.mark.skipif( + sys.version_info < (3, 8), + reason=("etree alpha ordered attributes <= py3.7"), +) +def test_hierarchical_attrs_columns(datapath, parser): + expected = """\ + + + + + + +""" + + pvt = planet_df.pivot_table( + index=["location", "type"], + values="mass", + aggfunc=["count", "sum", "mean"], + margins=True, + ).round(2) + + output = pvt.to_xml(attr_cols=list(pvt.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# MULTIINDEX + + +def test_multi_index(datapath, parser): + expected = """\ + + + + inner + terrestrial + 4 + 11.81 + 2.95 + + + outer + gas giant + 2 + 2466.5 + 1233.25 + + + outer + ice giant + 2 + 189.23 + 94.61 + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + + output = agg.to_xml(parser=parser) + output = equalize_decl(output) + + assert output == expected + + +@pytest.mark.skipif( + sys.version_info < (3, 8), + reason=("etree alpha ordered attributes <= py3.7"), +) +def test_multi_index_attrs_cols(datapath, parser): + expected = """\ + + + + + +""" + + agg = ( + planet_df.groupby(["location", "type"])["mass"] + .agg(["count", "sum", "mean"]) + .round(2) + ) + output = agg.to_xml(attr_cols=list(agg.reset_index().columns.values), parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# NAMESPACE + + +def test_default_namespace(parser): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(namespaces={"": "http://example.com"}, parser=parser) + output = equalize_decl(output) + + assert output == expected + + +# PREFIX + + +def test_namespace_prefix(parser): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"doc": "http://example.com"}, prefix="doc", parser=parser + ) + output = equalize_decl(output) + + assert output == expected + + +def test_missing_prefix_in_nmsp(parser): + with pytest.raises(KeyError, match=("doc is not included in namespaces")): + + geom_df.to_xml( + namespaces={"": "http://example.com"}, prefix="doc", parser=parser + ) + + +def test_namespace_prefix_and_default(parser): + expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml( + namespaces={"": "http://example.com", "doc": "http://other.org"}, + prefix="doc", + parser=parser, + ) + output = equalize_decl(output) + + if output is not None: + # etree and lxml differs on order of namespace prefixes + output = output.replace( + 'xmlns:doc="http://other.org" xmlns="http://example.com"', + 'xmlns="http://example.com" xmlns:doc="http://other.org"', + ) + + assert output == expected + + +# ENCODING + +encoding_expected = """\ + + + + 0 + 1 + José + Sofía + + + 1 + 2 + Luis + Valentina + + + 2 + 3 + Carlos + Isabella + + + 3 + 4 + Juan + Camila + + + 4 + 5 + Jorge + Valeria + +""" + + +def test_encoding_option_str(datapath, parser): + filename = datapath("io", "data", "xml", "baby_names.xml") + df_file = read_xml(filename, parser=parser, encoding="ISO-8859-1").head(5) + + output = df_file.to_xml(encoding="ISO-8859-1", parser=parser) + + if output is not None: + # etree and lxml differ on quotes and case in xml declaration + output = output.replace( + ' + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + output = geom_df.to_xml(xml_declaration=False) + + assert output == expected + + +@td.skip_if_no("lxml") +def test_no_pretty_print_with_decl(): + expected = ( + "\n" + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(pretty_print=False, parser="lxml") + output = equalize_decl(output) + + # etree adds space for closed tags + if output is not None: + output = output.replace(" />", "/>") + + assert output == expected + + +@td.skip_if_no("lxml") +def test_no_pretty_print_no_decl(): + expected = ( + "0square" + "3604.0" + "1circle360" + "2" + "triangle1803.0" + "" + ) + + output = geom_df.to_xml(xml_declaration=False, pretty_print=False) + + assert output == expected + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(): + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + geom_df.to_xml() + + +def test_unknown_parser(): + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + geom_df.to_xml(parser="bs4") + + +# STYLESHEET + +xsl_expected = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +@td.skip_if_no("lxml") +def test_stylesheet_file_like(datapath, mode): + xsl = datapath("io", "data", "xml", "row_field_output.xsl") + + with open(xsl, mode) as f: + assert geom_df.to_xml(stylesheet=f) == xsl_expected + + +@td.skip_if_no("lxml") +def test_stylesheet_io(datapath, mode): + xsl_path = datapath("io", "data", "xml", "row_field_output.xsl") + + xsl_obj: Union[BytesIO, StringIO] + + with open(xsl_path, mode) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +@td.skip_if_no("lxml") +def test_stylesheet_buffered_reader(datapath, mode): + xsl = datapath("io", "data", "xml", "row_field_output.xsl") + + with open(xsl, mode) as f: + xsl_obj = f.read() + + output = geom_df.to_xml(stylesheet=xsl_obj) + + assert output == xsl_expected + + +@td.skip_if_no("lxml") +def test_stylesheet_wrong_path(datapath): + from lxml.etree import XMLSyntaxError + + xsl = os.path.join("data", "xml", "row_field_output.xslt") + + with pytest.raises( + XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + geom_df.to_xml(stylesheet=xsl) + + +@td.skip_if_no("lxml") +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_stylesheet(val): + from lxml.etree import XMLSyntaxError + + with pytest.raises( + XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found") + ): + geom_df.to_xml(stylesheet=val) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_syntax(): + from lxml.etree import XMLSyntaxError + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises(XMLSyntaxError, match=("Opening and ending tag mismatch")): + geom_df.to_xml(stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_eval(): + from lxml.etree import XSLTParseError + + xsl = """\ + + + + + + + + + + + + + + + + + + +""" + + with pytest.raises(XSLTParseError, match=("failed to compile")): + geom_df.to_xml(stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_apply(parser): + from lxml.etree import XSLTApplyError + + xsl = """\ + + + + + + + + + +""" + + with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")): + with tm.ensure_clean("test.xml") as path: + geom_df.to_xml(path, stylesheet=xsl) + + +def test_stylesheet_with_etree(datapath): + xsl = """\ + + + + + + + + + """ + + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + geom_df.to_xml(parser="etree", stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_style_to_csv(): + xsl = """\ + + + + + , + + ,shape,degrees,sides + + + + + + + +""" + + out_csv = geom_df.to_csv(line_terminator="\n") + + if out_csv is not None: + out_csv = out_csv.strip() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_csv == out_xml + + +@td.skip_if_no("lxml") +def test_style_to_string(): + xsl = """\ + + + + + + + shape degrees sides + + + + + + + +""" + + out_str = geom_df.to_string() + out_xml = geom_df.to_xml(na_rep="NaN", stylesheet=xsl) + + assert out_xml == out_str + + +@td.skip_if_no("lxml") +def test_style_to_json(): + xsl = """\ + + + + + " + + + {"shape":{ + + },"degrees":{ + + },"sides":{ + + }} + + + + + + + + + + + + + + + + + , + + +""" + + out_json = geom_df.to_json() + out_xml = geom_df.to_xml(stylesheet=xsl) + + assert out_json == out_xml + + +# COMPRESSION + + +geom_xml = """\ + + + + 0 + square + 360 + 4.0 + + + 1 + circle + 360 + + + + 2 + triangle + 180 + 3.0 + +""" + + +@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"]) +def test_compression_output(parser, comp): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with get_handle( + path, + "r", + compression=comp, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"]) +@pytest.mark.parametrize("compfile", ["xml.bz2", "xml.gz", "xml.xz", "xml.zip"]) +def test_filename_and_suffix_comp(parser, comp, compfile): + with tm.ensure_clean(filename=compfile) as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with get_handle( + path, + "r", + compression=comp, + ) as handle_obj: + output = handle_obj.handle.read() + + output = equalize_decl(output) + + assert geom_xml == output.strip() + + +def test_unsuported_compression(datapath, parser): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@tm.network +@td.skip_if_no("s3fs") +@td.skip_if_no("lxml") +def test_s3_permission_output(parser): + import s3fs + + with pytest.raises(PermissionError, match="Access Denied"): + fs = s3fs.S3FileSystem(anon=True) + fs.ls("pandas-test") + + geom_df.to_xml("s3://pandas-test/geom.xml", compression="zip", parser=parser) diff --git a/pandas/tests/io/xml/test_xml.py b/pandas/tests/io/xml/test_xml.py new file mode 100644 index 0000000000000..6902b4e93443f --- /dev/null +++ b/pandas/tests/io/xml/test_xml.py @@ -0,0 +1,1097 @@ +from io import ( + BytesIO, + StringIO, +) +import os +from typing import Union +from urllib.error import HTTPError + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas import DataFrame +import pandas._testing as tm + +from pandas.io.xml import read_xml + +""" +CHECK LIST + +[x] - ValueError: "Values for parser can only be lxml or etree." + +etree +[X] - ImportError: "lxml not found, please install or use the etree parser." +[X] - TypeError: "expected str, bytes or os.PathLike object, not NoneType" +[X] - ValueError: "Either element or attributes can be parsed not both." +[X] - ValueError: "xpath does not return any nodes..." +[X] - SyntaxError: "You have used an incorrect or unsupported XPath" +[X] - ValueError: "names does not match length of child elements in xpath." +[X] - TypeError: "...is not a valid type for names" +[X] - ValueError: "To use stylesheet, you need lxml installed..." +[] - URLError: (GENERAL ERROR WITH HTTPError AS SUBCLASS) +[X] - HTTPError: "HTTP Error 404: Not Found" +[] - OSError: (GENERAL ERROR WITH FileNotFoundError AS SUBCLASS) +[X] - FileNotFoundError: "No such file or directory" +[] - ParseError (FAILSAFE CATCH ALL FOR VERY COMPLEX XML) +[X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +[X] - UnicodeError: "UTF-16 stream does not start with BOM" +[X] - BadZipFile: "File is not a zip file" +[X] - OSError: "Invalid data stream" +[X] - LZMAError: "Input format not supported by decoder" +[X] - ValueError: "Unrecognized compression type" +[X] - PermissionError: "Forbidden" + +lxml +[X] - ValueError: "Either element or attributes can be parsed not both." +[X] - AttributeError: "__enter__" +[X] - XSLTApplyError: "Cannot resolve URI" +[X] - XSLTParseError: "document is not a stylesheet" +[X] - ValueError: "xpath does not return any nodes." +[X] - XPathEvalError: "Invalid expression" +[] - XPathSyntaxError: (OLD VERSION IN lxml FOR XPATH ERRORS) +[X] - TypeError: "empty namespace prefix is not supported in XPath" +[X] - ValueError: "names does not match length of child elements in xpath." +[X] - TypeError: "...is not a valid type for names" +[X] - LookupError: "unknown encoding" +[] - URLError: (USUALLY DUE TO NETWORKING) +[X - HTTPError: "HTTP Error 404: Not Found" +[X] - OSError: "failed to load external entity" +[X] - XMLSyntaxError: "Start tag expected, '<' not found" +[] - ParserError: (FAILSAFE CATCH ALL FOR VERY COMPLEX XML +[X] - ValueError: "Values for parser can only be lxml or etree." +[X] - UnicodeDecodeError: "'utf-8' codec can't decode byte 0xe9..." +[X] - UnicodeError: "UTF-16 stream does not start with BOM" +[X] - BadZipFile: "File is not a zip file" +[X] - OSError: "Invalid data stream" +[X] - LZMAError: "Input format not supported by decoder" +[X] - ValueError: "Unrecognized compression type" +[X] - PermissionError: "Forbidden" +""" + +geom_df = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4, np.nan, 3], + } +) + +xml_default_nmsp = """\ + + + + square + 360 + 4 + + + circle + 360 + + + + triangle + 180 + 3 + +""" + +xml_prefix_nmsp = """\ + + + + square + 360 + 4.0 + + + circle + 360 + + + + triangle + 180 + 3.0 + +""" + + +df_kml = DataFrame( + { + "id": { + 0: "ID_00001", + 1: "ID_00002", + 2: "ID_00003", + 3: "ID_00004", + 4: "ID_00005", + }, + "name": { + 0: "Blue Line (Forest Park)", + 1: "Red, Purple Line", + 2: "Red, Purple Line", + 3: "Red, Purple Line", + 4: "Red, Purple Line", + }, + "styleUrl": { + 0: "#LineStyle01", + 1: "#LineStyle01", + 2: "#LineStyle01", + 3: "#LineStyle01", + 4: "#LineStyle01", + }, + "extrude": {0: 0, 1: 0, 2: 0, 3: 0, 4: 0}, + "altitudeMode": { + 0: "clampedToGround", + 1: "clampedToGround", + 2: "clampedToGround", + 3: "clampedToGround", + 4: "clampedToGround", + }, + "coordinates": { + 0: ( + "-87.77678526964958,41.8708863930319,0 " + "-87.77826234150609,41.87097820122218,0 " + "-87.78251583439344,41.87130129991005,0 " + "-87.78418294588424,41.87145055520308,0 " + "-87.7872369165933,41.8717239119163,0 " + "-87.79160214925886,41.87210797280065,0" + ), + 1: ( + "-87.65758750947528,41.96427269188822,0 " + "-87.65802133507393,41.96581929055245,0 " + "-87.65819033925305,41.96621846093642,0 " + "-87.6583189819129,41.96650362897086,0 " + "-87.65835858701473,41.96669002089185,0 " + "-87.65838428411853,41.96688150295095,0 " + "-87.65842208882658,41.96745896091846,0 " + "-87.65846556843937,41.9683761425439,0 " + "-87.65849296214573,41.96913893870342,0" + ), + 2: ( + "-87.65492939166126,41.95377494531437,0 " + "-87.65557043199591,41.95376544118533,0 " + "-87.65606302030132,41.95376391658746,0 " + "-87.65623502146268,41.95377379126367,0 " + "-87.65634748981634,41.95380103566435,0 " + "-87.65646537904269,41.95387703994676,0 " + "-87.65656532461145,41.95396622645799,0 " + "-87.65664760856414,41.95404201996044,0 " + "-87.65671750555913,41.95416647054043,0 " + "-87.65673983607117,41.95429949810849,0 " + "-87.65673866475777,41.95441024240925,0 " + "-87.6567690255541,41.95490657227902,0 " + "-87.65683672482363,41.95692259283837,0 " + "-87.6568900886376,41.95861070983142,0 " + "-87.65699865558875,41.96181418669004,0 " + "-87.65756347177603,41.96397045777844,0 " + "-87.65758750947528,41.96427269188822,0" + ), + 3: ( + "-87.65362593118043,41.94742799535678,0 " + "-87.65363554415794,41.94819886386848,0 " + "-87.6536456393239,41.95059994675451,0 " + "-87.65365831235026,41.95108288489359,0 " + "-87.6536604873874,41.9519954657554,0 " + "-87.65362592053201,41.95245597302328,0 " + "-87.65367158496069,41.95311153649393,0 " + "-87.65368468595476,41.9533202828916,0 " + "-87.65369271253692,41.95343095587119,0 " + "-87.65373335834569,41.95351536301472,0 " + "-87.65378605844126,41.95358212680591,0 " + "-87.65385067928185,41.95364452823767,0 " + "-87.6539390793817,41.95370263886964,0 " + "-87.6540786298351,41.95373403675265,0 " + "-87.65430648647626,41.9537535411832,0 " + "-87.65492939166126,41.95377494531437,0" + ), + 4: ( + "-87.65345391792157,41.94217681262115,0 " + "-87.65342448305786,41.94237224420864,0 " + "-87.65339745703922,41.94268217746244,0 " + "-87.65337753982941,41.94288140770284,0 " + "-87.65336256753105,41.94317369618263,0 " + "-87.65338799707138,41.94357253961736,0 " + "-87.65340240886648,41.94389158188269,0 " + "-87.65341837392448,41.94406444407721,0 " + "-87.65342275247338,41.94421065714904,0 " + "-87.65347469646018,41.94434829382345,0 " + "-87.65351486483024,41.94447699917548,0 " + "-87.65353483605053,41.9453896864472,0 " + "-87.65361975532807,41.94689193720703,0 " + "-87.65362593118043,41.94742799535678,0" + ), + }, + } +) + + +@pytest.fixture(params=["rb", "r"]) +def mode(request): + return request.param + + +@pytest.fixture(params=[pytest.param("lxml", marks=td.skip_if_no("lxml")), "etree"]) +def parser(request): + return request.param + + +# FILE / URL + + +@td.skip_if_no("lxml") +def test_parser_consistency_file(datapath): + filename = datapath("io", "data", "xml", "books.xml") + df_file_lxml = read_xml(filename, parser="lxml") + df_file_etree = read_xml(filename, parser="etree") + + tm.assert_frame_equal(df_file_lxml, df_file_etree) + + +@tm.network +@pytest.mark.slow +@td.skip_if_no("lxml") +def test_parser_consistency_url(datapath): + url = ( + "https://data.cityofchicago.org/api/views/" + "8pix-ypme/rows.xml?accessType=DOWNLOAD" + ) + df_url_lxml = read_xml(url, xpath=".//row/row", parser="lxml") + df_url_etree = read_xml(url, xpath=".//row/row", parser="etree") + + tm.assert_frame_equal(df_url_lxml, df_url_etree) + + +def test_file_like(datapath, parser, mode): + filename = datapath("io", "data", "xml", "books.xml") + with open(filename, mode) as f: + df_file = read_xml(f, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_io(datapath, parser, mode): + filename = datapath("io", "data", "xml", "books.xml") + with open(filename, mode) as f: + xml_obj = f.read() + + df_io = read_xml( + (BytesIO(xml_obj) if isinstance(xml_obj, bytes) else StringIO(xml_obj)), + parser=parser, + ) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_io, df_expected) + + +def test_file_buffered_reader_string(datapath, parser, mode): + filename = datapath("io", "data", "xml", "books.xml") + with open(filename, mode) as f: + xml_obj = f.read() + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_buffered_reader_no_xml_declaration(datapath, parser, mode): + filename = datapath("io", "data", "xml", "books.xml") + with open(filename, mode) as f: + next(f) + xml_obj = f.read() + + df_str = read_xml(xml_obj, parser=parser) + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_str, df_expected) + + +def test_file_handle_close(datapath, parser): + xml_file = datapath("io", "data", "xml", "books.xml") + + with open(xml_file, "rb") as f: + read_xml(BytesIO(f.read()), parser=parser) + + assert not f.closed + + +@td.skip_if_no("lxml") +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_lxml(val): + from lxml.etree import XMLSyntaxError + + with pytest.raises(XMLSyntaxError, match="Document is empty"): + read_xml(val, parser="lxml") + + +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_string_etree(val): + from xml.etree.ElementTree import ParseError + + with pytest.raises(ParseError, match="no element found"): + read_xml(val, parser="etree") + + +@td.skip_if_no("lxml") +def test_wrong_file_path_lxml(): + from lxml.etree import XMLSyntaxError + + filename = os.path.join("data", "html", "books.xml") + + with pytest.raises( + XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + read_xml(filename, parser="lxml") + + +def test_wrong_file_path_etree(): + from xml.etree.ElementTree import ParseError + + filename = os.path.join("data", "html", "books.xml") + + with pytest.raises( + ParseError, + match=("not well-formed"), + ): + read_xml(filename, parser="etree") + + +@tm.network +@td.skip_if_no("lxml") +def test_url(): + url = "https://www.w3schools.com/xml/books.xml" + df_url = read_xml(url, xpath=".//book[count(*)=4]") + + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + "cover": [None, None, "paperback"], + } + ) + + tm.assert_frame_equal(df_url, df_expected) + + +def test_wrong_url(parser): + with pytest.raises(HTTPError, match=("HTTP Error 404: Not Found")): + url = "https://www.w3schools.com/xml/python.xml" + read_xml(url, xpath=".//book[count(*)=4]", parser=parser) + + +# XPATH + + +@td.skip_if_no("lxml") +def test_empty_xpath_lxml(datapath): + filename = datapath("io", "data", "xml", "books.xml") + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(filename, xpath=".//python", parser="lxml") + + +def test_bad_xpath_etree(datapath): + filename = datapath("io", "data", "xml", "books.xml") + with pytest.raises( + SyntaxError, match=("You have used an incorrect or unsupported XPath") + ): + read_xml(filename, xpath=".//[book]", parser="etree") + + +@td.skip_if_no("lxml") +def test_bad_xpath_lxml(datapath): + from lxml.etree import XPathEvalError + + filename = datapath("io", "data", "xml", "books.xml") + with pytest.raises(XPathEvalError, match=("Invalid expression")): + read_xml(filename, xpath=".//[book]", parser="lxml") + + +# NAMESPACE + + +def test_default_namespace(parser): + df_nmsp = read_xml( + xml_default_nmsp, + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser=parser, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + + +def test_prefix_namespace(parser): + df_nmsp = read_xml( + xml_prefix_nmsp, + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser=parser, + ) + + df_expected = DataFrame( + { + "shape": ["square", "circle", "triangle"], + "degrees": [360, 360, 180], + "sides": [4.0, float("nan"), 3.0], + } + ) + + tm.assert_frame_equal(df_nmsp, df_expected) + + +@td.skip_if_no("lxml") +def test_consistency_default_namespace(): + df_lxml = read_xml( + xml_default_nmsp, + xpath=".//ns:row", + namespaces={"ns": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + xml_default_nmsp, + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +@td.skip_if_no("lxml") +def test_consistency_prefix_namespace(): + df_lxml = read_xml( + xml_prefix_nmsp, + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="lxml", + ) + + df_etree = read_xml( + xml_prefix_nmsp, + xpath=".//doc:row", + namespaces={"doc": "http://example.com"}, + parser="etree", + ) + + tm.assert_frame_equal(df_lxml, df_etree) + + +# PREFIX + + +def test_missing_prefix_with_default_namespace(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + with pytest.raises(ValueError, match=("xpath does not return any nodes")): + read_xml(filename, xpath=".//Placemark", parser=parser) + + +def test_missing_prefix_definition_etree(datapath): + filename = datapath("io", "data", "xml", "cta_rail_lines.kml") + with pytest.raises(SyntaxError, match=("you used an undeclared namespace prefix")): + read_xml(filename, xpath=".//kml:Placemark", parser="etree") + + +@td.skip_if_no("lxml") +def test_missing_prefix_definition_lxml(datapath): + from lxml.etree import XPathEvalError + + filename = datapath("io", "data", "xml", "cta_rail_lines.kml") + with pytest.raises(XPathEvalError, match=("Undefined namespace prefix")): + read_xml(filename, xpath=".//kml:Placemark", parser="lxml") + + +@td.skip_if_no("lxml") +@pytest.mark.parametrize("key", ["", None]) +def test_none_namespace_prefix(key): + with pytest.raises( + TypeError, match=("empty namespace prefix is not supported in XPath") + ): + read_xml( + xml_default_nmsp, + xpath=".//kml:Placemark", + namespaces={key: "http://www.opengis.net/kml/2.2"}, + parser="lxml", + ) + + +# ELEMS AND ATTRS + + +def test_file_elems_and_attrs(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml(filename, parser=parser) + df_expected = DataFrame( + { + "category": ["cooking", "children", "web"], + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_only_attrs(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml(filename, attrs_only=True, parser=parser) + df_expected = DataFrame({"category": ["cooking", "children", "web"]}) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_file_only_elems(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml(filename, elems_only=True, parser=parser) + df_expected = DataFrame( + { + "title": ["Everyday Italian", "Harry Potter", "Learning XML"], + "author": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "year": [2005, 2005, 2003], + "price": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_elem_and_attrs_only(datapath, parser): + filename = datapath("io", "data", "xml", "cta_rail_lines.kml") + with pytest.raises( + ValueError, + match=("Either element or attributes can be parsed not both"), + ): + read_xml(filename, elems_only=True, attrs_only=True, parser=parser) + + +@td.skip_if_no("lxml") +def test_attribute_centric_xml(): + xml = """\ + + + + + + + + + + + + + + + + + +""" + + df_lxml = read_xml(xml, xpath=".//station") + df_etree = read_xml(xml, xpath=".//station", parser="etree") + + tm.assert_frame_equal(df_lxml, df_etree) + + +# NAMES + + +def test_names_option_output(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + df_file = read_xml( + filename, names=["Col1", "Col2", "Col3", "Col4", "Col5"], parser=parser + ) + + df_expected = DataFrame( + { + "Col1": ["cooking", "children", "web"], + "Col2": ["Everyday Italian", "Harry Potter", "Learning XML"], + "Col3": ["Giada De Laurentiis", "J K. Rowling", "Erik T. Ray"], + "Col4": [2005, 2005, 2003], + "Col5": [30.00, 29.99, 39.95], + } + ) + + tm.assert_frame_equal(df_file, df_expected) + + +def test_names_option_wrong_length(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + + with pytest.raises(ValueError, match=("names does not match length")): + read_xml(filename, names=["Col1", "Col2", "Col3"], parser=parser) + + +def test_names_option_wrong_type(datapath, parser): + filename = datapath("io", "data", "xml", "books.xml") + + with pytest.raises(TypeError, match=("is not a valid type for names")): + read_xml( + filename, names="Col1, Col2, Col3", parser=parser # type: ignore[arg-type] + ) + + +# ENCODING + + +def test_wrong_encoding(datapath, parser): + filename = datapath("io", "data", "xml", "baby_names.xml") + with pytest.raises(UnicodeDecodeError, match=("'utf-8' codec can't decode")): + read_xml(filename, parser=parser) + + +def test_utf16_encoding(datapath, parser): + filename = datapath("io", "data", "xml", "baby_names.xml") + with pytest.raises( + UnicodeError, + match=( + "UTF-16 stream does not start with BOM|" + "'utf-16-le' codec can't decode byte" + ), + ): + read_xml(filename, encoding="UTF-16", parser=parser) + + +def test_unknown_encoding(datapath, parser): + filename = datapath("io", "data", "xml", "baby_names.xml") + with pytest.raises(LookupError, match=("unknown encoding: uft-8")): + read_xml(filename, encoding="UFT-8", parser=parser) + + +def test_ascii_encoding(datapath, parser): + filename = datapath("io", "data", "xml", "baby_names.xml") + with pytest.raises(UnicodeDecodeError, match=("'ascii' codec can't decode byte")): + read_xml(filename, encoding="ascii", parser=parser) + + +@td.skip_if_no("lxml") +def test_parser_consistency_with_encoding(datapath): + filename = datapath("io", "data", "xml", "baby_names.xml") + df_lxml = read_xml(filename, parser="lxml", encoding="ISO-8859-1") + df_etree = read_xml(filename, parser="etree", encoding="iso-8859-1") + + tm.assert_frame_equal(df_lxml, df_etree) + + +# PARSER + + +@td.skip_if_installed("lxml") +def test_default_parser_no_lxml(datapath): + filename = datapath("io", "data", "xml", "books.xml") + + with pytest.raises( + ImportError, match=("lxml not found, please install or use the etree parser.") + ): + read_xml(filename) + + +def test_wrong_parser(datapath): + filename = datapath("io", "data", "xml", "books.xml") + + with pytest.raises( + ValueError, match=("Values for parser can only be lxml or etree.") + ): + read_xml(filename, parser="bs4") + + +# STYLESHEET + + +@td.skip_if_no("lxml") +def test_stylesheet_file(datapath): + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "flatten_doc.xsl") + + df_style = read_xml( + kml, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +@td.skip_if_no("lxml") +def test_stylesheet_file_like(datapath, mode): + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "flatten_doc.xsl") + + with open(xsl, mode) as f: + df_style = read_xml( + kml, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=f, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +@td.skip_if_no("lxml") +def test_stylesheet_io(datapath, mode): + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "flatten_doc.xsl") + + xsl_obj: Union[BytesIO, StringIO] + + with open(xsl, mode) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + df_style = read_xml( + kml, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +@td.skip_if_no("lxml") +def test_stylesheet_buffered_reader(datapath, mode): + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "flatten_doc.xsl") + + with open(xsl, mode) as f: + xsl_obj = f.read() + + df_style = read_xml( + kml, + xpath=".//k:Placemark", + namespaces={"k": "http://www.opengis.net/kml/2.2"}, + stylesheet=xsl_obj, + ) + + tm.assert_frame_equal(df_kml, df_style) + + +@td.skip_if_no("lxml") +def test_not_stylesheet(datapath): + from lxml.etree import XSLTParseError + + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "books.xml") + + with pytest.raises(XSLTParseError, match=("document is not a stylesheet")): + read_xml(kml, stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_syntax(datapath): + from lxml.etree import XMLSyntaxError + + xsl = """\ + + + + + + + + + + + + + + + +""" + + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + + with pytest.raises( + XMLSyntaxError, match=("Extra content at the end of the document") + ): + read_xml(kml, stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_eval(datapath): + from lxml.etree import XSLTParseError + + xsl = """\ + + + + + + + + + + + + + + + +""" + + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + + with pytest.raises(XSLTParseError, match=("failed to compile")): + read_xml(kml, stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_incorrect_xsl_apply(datapath): + from lxml.etree import XSLTApplyError + + xsl = """\ + + + + + + + + + +""" + + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + + with pytest.raises(XSLTApplyError, match=("Cannot resolve URI")): + read_xml(kml, stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_wrong_stylesheet(): + from lxml.etree import XMLSyntaxError + + kml = os.path.join("data", "xml", "cta_rail_lines.kml") + xsl = os.path.join("data", "xml", "flatten.xsl") + + with pytest.raises( + XMLSyntaxError, + match=("Start tag expected, '<' not found"), + ): + read_xml(kml, stylesheet=xsl) + + +@td.skip_if_no("lxml") +def test_stylesheet_file_close(datapath, mode): + kml = datapath("io", "data", "xml", "cta_rail_lines.kml") + xsl = datapath("io", "data", "xml", "flatten_doc.xsl") + + xsl_obj: Union[BytesIO, StringIO] + + with open(xsl, mode) as f: + if mode == "rb": + xsl_obj = BytesIO(f.read()) + else: + xsl_obj = StringIO(f.read()) + + read_xml(kml, stylesheet=xsl_obj) + + assert not f.closed + + +@td.skip_if_no("lxml") +def test_stylesheet_with_etree(datapath): + kml = os.path.join("data", "xml", "cta_rail_lines.kml") + xsl = os.path.join("data", "xml", "flatten_doc.xsl") + + with pytest.raises( + ValueError, match=("To use stylesheet, you need lxml installed") + ): + read_xml(kml, parser="etree", stylesheet=xsl) + + +@td.skip_if_no("lxml") +@pytest.mark.parametrize("val", ["", b""]) +def test_empty_stylesheet(val): + from lxml.etree import XMLSyntaxError + + kml = os.path.join("data", "xml", "cta_rail_lines.kml") + + with pytest.raises( + XMLSyntaxError, match=("Document is empty|Start tag expected, '<' not found") + ): + read_xml(kml, stylesheet=val) + + +@tm.network +@td.skip_if_no("lxml") +def test_online_stylesheet(): + xml = "https://www.w3schools.com/xml/cdcatalog_with_xsl.xml" + xsl = "https://www.w3schools.com/xml/cdcatalog.xsl" + + df_xsl = read_xml( + xml, + xpath=".//tr[td and position() <= 6]", + names=["title", "artist"], + stylesheet=xsl, + ) + + df_expected = DataFrame( + { + "title": { + 0: "Empire Burlesque", + 1: "Hide your heart", + 2: "Greatest Hits", + 3: "Still got the blues", + 4: "Eros", + }, + "artist": { + 0: "Bob Dylan", + 1: "Bonnie Tyler", + 2: "Dolly Parton", + 3: "Gary Moore", + 4: "Eros Ramazzotti", + }, + } + ) + + tm.assert_frame_equal(df_expected, df_xsl) + + +# COMPRESSION + + +@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz", "zip"]) +def test_compression_read(parser, comp): + with tm.ensure_clean() as path: + geom_df.to_xml(path, index=False, parser=parser, compression=comp) + + xml_df = read_xml(path, parser=parser, compression=comp) + + tm.assert_frame_equal(xml_df, geom_df) + + +@pytest.mark.parametrize("comp", ["gzip", "xz", "zip"]) +def test_wrong_compression_bz2(parser, comp): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with pytest.raises(OSError, match="Invalid data stream"): + read_xml(path, parser=parser, compression="bz2") + + +@pytest.mark.parametrize("comp", ["bz2", "xz", "zip"]) +def test_wrong_compression_gz(parser, comp): + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with pytest.raises(OSError, match="Not a gzipped file"): + read_xml(path, parser=parser, compression="gzip") + + +@pytest.mark.parametrize("comp", ["bz2", "gzip", "zip"]) +def test_wrong_compression_xz(parser, comp): + from lzma import LZMAError + + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with pytest.raises(LZMAError, match="Input format not supported by decoder"): + read_xml(path, parser=parser, compression="xz") + + +@pytest.mark.parametrize("comp", ["bz2", "gzip", "xz"]) +def test_wrong_compression_zip(parser, comp): + from zipfile import BadZipFile + + with tm.ensure_clean() as path: + geom_df.to_xml(path, parser=parser, compression=comp) + + with pytest.raises(BadZipFile, match="File is not a zip file"): + read_xml(path, parser=parser, compression="zip") + + +def test_unsuported_compression(datapath, parser): + with pytest.raises(ValueError, match="Unrecognized compression type"): + with tm.ensure_clean() as path: + read_xml(path, parser=parser, compression="7z") + + +# STORAGE OPTIONS + + +@tm.network +@td.skip_if_no("s3fs") +@td.skip_if_no("lxml") +def test_s3_parser_consistency(): + # Python Software Foundation (2019 IRS-990 RETURN) + s3 = "s3://irs-form-990/201923199349319487_public.xml" + + df_lxml = read_xml( + s3, + xpath=".//irs:Form990PartVIISectionAGrp", + namespaces={"irs": "http://www.irs.gov/efile"}, + parser="lxml", + storage_options={"anon": True}, + ) + + df_etree = read_xml( + s3, + xpath=".//irs:Form990PartVIISectionAGrp", + namespaces={"irs": "http://www.irs.gov/efile"}, + parser="etree", + storage_options={"anon": True}, + ) + + tm.assert_frame_equal(df_lxml, df_etree)