Skip to content

Commit

Permalink
[SPARK-8766] support non-ascii character in column names
Browse files Browse the repository at this point in the history
Use UTF-8 to encode the name of column in Python 2, or it may failed to encode with default encoding ('ascii').

This PR also fix a bug when there is Java exception without error message.

Author: Davies Liu <davies@databricks.com>

Closes #7165 from davies/non_ascii and squashes the following commits:

02cb61a [Davies Liu] fix tests
3b09d31 [Davies Liu] add encoding in header
867754a [Davies Liu] support non-ascii character in column names
  • Loading branch information
Davies Liu committed Jul 1, 2015
1 parent 1ce6428 commit f958f27
Show file tree
Hide file tree
Showing 4 changed files with 15 additions and 5 deletions.
3 changes: 1 addition & 2 deletions python/pyspark/sql/dataframe.py
Original file line number Diff line number Diff line change
Expand Up @@ -484,13 +484,12 @@ def dtypes(self):
return [(str(f.name), f.dataType.simpleString()) for f in self.schema.fields]

@property
@ignore_unicode_prefix
@since(1.3)
def columns(self):
"""Returns all column names as a list.
>>> df.columns
[u'age', u'name']
['age', 'name']
"""
return [f.name for f in self.schema.fields]

Expand Down
9 changes: 9 additions & 0 deletions python/pyspark/sql/tests.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
# -*- encoding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
Expand Down Expand Up @@ -628,6 +629,14 @@ def test_access_column(self):
self.assertRaises(IndexError, lambda: df["bad_key"])
self.assertRaises(TypeError, lambda: df[{}])

def test_column_name_with_non_ascii(self):
df = self.sqlCtx.createDataFrame([(1,)], ["数量"])
self.assertEqual(StructType([StructField("数量", LongType(), True)]), df.schema)
self.assertEqual("DataFrame[数量: bigint]", str(df))
self.assertEqual([("数量", 'bigint')], df.dtypes)
self.assertEqual(1, df.select("数量").first()[0])
self.assertEqual(1, df.select(df["数量"]).first()[0])

def test_access_nested_types(self):
df = self.sc.parallelize([Row(l=[1], r=Row(a=1, b="b"), d={"k": "v"})]).toDF()
self.assertEqual(1, df.select(df.l[0]).first()[0])
Expand Down
2 changes: 2 additions & 0 deletions python/pyspark/sql/types.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,6 +324,8 @@ def __init__(self, name, dataType, nullable=True, metadata=None):
False
"""
assert isinstance(dataType, DataType), "dataType should be DataType"
if not isinstance(name, str):
name = name.encode('utf-8')
self.name = name
self.dataType = dataType
self.nullable = nullable
Expand Down
6 changes: 3 additions & 3 deletions python/pyspark/sql/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ def deco(*a, **kw):
try:
return f(*a, **kw)
except py4j.protocol.Py4JJavaError as e:
cls, msg = e.java_exception.toString().split(': ', 1)
if cls == 'org.apache.spark.sql.AnalysisException':
raise AnalysisException(msg)
s = e.java_exception.toString()
if s.startswith('org.apache.spark.sql.AnalysisException: '):
raise AnalysisException(s.split(': ', 1)[1])
raise
return deco

Expand Down

0 comments on commit f958f27

Please sign in to comment.