@@ -198,8 +198,8 @@ def _inferSchema(self, rdd, samplingRatio=None):
198198 raise ValueError ("The first row in RDD is empty, "
199199 "can not infer schema" )
200200 if type (first ) is dict :
201- warnings .warn ("Using RDD of dict to inferSchema is deprecated, "
202- "please use pyspark.sql.Row instead" , DeprecationWarning )
201+ warnings .warn ("Using RDD of dict to inferSchema is deprecated. "
202+ "Use pyspark.sql.Row instead" )
203203
204204 if samplingRatio is None :
205205 schema = _infer_schema (first )
@@ -222,8 +222,7 @@ def inferSchema(self, rdd, samplingRatio=None):
222222 """
223223 .. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
224224 """
225- warnings .warn (
226- "inferSchema is deprecated, please use createDataFrame instead." , DeprecationWarning )
225+ warnings .warn ("inferSchema is deprecated, please use createDataFrame instead." )
227226
228227 if isinstance (rdd , DataFrame ):
229228 raise TypeError ("Cannot apply schema to DataFrame" )
@@ -235,8 +234,7 @@ def applySchema(self, rdd, schema):
235234 """
236235 .. note:: Deprecated in 1.3, use :func:`createDataFrame` instead.
237236 """
238- warnings .warn (
239- "applySchema is deprecated, please use createDataFrame instead" , DeprecationWarning )
237+ warnings .warn ("applySchema is deprecated, please use createDataFrame instead" )
240238
241239 if isinstance (rdd , DataFrame ):
242240 raise TypeError ("Cannot apply schema to DataFrame" )
@@ -369,8 +367,11 @@ def parquetFile(self, *paths):
369367 """Loads a Parquet file, returning the result as a :class:`DataFrame`.
370368
371369 .. note:: Deprecated in 1.4, use :func:`DataFrameReader.parquet` instead.
370+
371+ >>> sqlContext.parquetFile('python/test_support/sql/parquet_partitioned').dtypes
372+ [('name', 'string'), ('year', 'int'), ('month', 'int'), ('day', 'int')]
372373 """
373- warnings .warn ("parquetFile is deprecated. Use read.parquet() instead." , DeprecationWarning )
374+ warnings .warn ("parquetFile is deprecated. Use read.parquet() instead." )
374375 gateway = self ._sc ._gateway
375376 jpaths = gateway .new_array (gateway .jvm .java .lang .String , len (paths ))
376377 for i in range (0 , len (paths )):
@@ -382,8 +383,11 @@ def jsonFile(self, path, schema=None, samplingRatio=1.0):
382383 """Loads a text file storing one JSON object per line as a :class:`DataFrame`.
383384
384385 .. note:: Deprecated in 1.4, use :func:`DataFrameReader.json` instead.
386+
387+ >>> sqlContext.jsonFile('python/test_support/sql/people.json').dtypes
388+ [('age', 'bigint'), ('name', 'string')]
385389 """
386- warnings .warn ("jsonFile is deprecated. Use read.json() instead." , DeprecationWarning )
390+ warnings .warn ("jsonFile is deprecated. Use read.json() instead." )
387391 if schema is None :
388392 df = self ._ssql_ctx .jsonFile (path , samplingRatio )
389393 else :
@@ -440,7 +444,7 @@ def load(self, path=None, source=None, schema=None, **options):
440444
441445 .. note:: Deprecated in 1.4, use :func:`DataFrameReader.load` instead.
442446 """
443- warnings .warn ("load is deprecated. Use read.load() instead." , DeprecationWarning )
447+ warnings .warn ("load is deprecated. Use read.load() instead." )
444448 return self .read .load (path , source , schema , ** options )
445449
446450 @since (1.3 )
@@ -621,10 +625,14 @@ def register(self, name, f, returnType=StringType()):
621625
622626
623627def _test ():
628+ import os
624629 import doctest
625630 from pyspark .context import SparkContext
626631 from pyspark .sql import Row , SQLContext
627632 import pyspark .sql .context
633+
634+ os .chdir (os .environ ["SPARK_HOME" ])
635+
628636 globs = pyspark .sql .context .__dict__ .copy ()
629637 sc = SparkContext ('local[4]' , 'PythonTest' )
630638 globs ['sc' ] = sc
0 commit comments