#python #pandas #apache-spark #pyspark
#python #pandas #apache-spark #pyspark
Вопрос:
Я создал приложение spark, в котором я использовал функцию pandas_udf, которая выдает фреймы данных pandas для каждой группы. Это действие выполняется, и после этого этапа я напечатал несколько записей из результата, и все выглядит нормально.
Код, как показано ниже:
@pandas_udf(df.schema, PandasUDFType.GROUPED_MAP)
def forecast(ts):
##Some Time Series Forecasting Operation goes here and returns forecast of next 7 days
##as DataFrame as below
return pd.DataFrame({'date_key':forcasted_dates,'keyword':key,'hit_numbers':predictions})
Вызвал указанную выше функцию следующим образом:
res = df.groupby('keyword').apply(forecast)
Напечатано несколько строк результата следующим образом:
res.show(5)
------------------- ------- -----------
| date_key|keyword|hit_numbers|
------------------- ------- -----------
|2020-10-01 00:00:00|alquran| 158|
|2020-10-02 00:00:00|alquran| 149|
|2020-10-03 00:00:00|alquran| 81|
|2020-10-04 00:00:00|alquran| 94|
|2020-10-05 00:00:00|alquran| 150|
------------------- ------- -----------
only showing top 5 rows
Но когда я пытаюсь выполнить любую другую операцию, например, подсчитать отдельные значения столбца или преобразовать результат (фрейм данных PySpark) в фрейм данных Pandas, он выдает ошибку, как показано ниже:
---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
<ipython-input-46-353422df5245> in <module>
----> 1 res.select('keyword').distinct().count()
/usr/lib/spark/python/pyspark/sql/dataframe.py in count(self)
522 2
523 """
--> 524 return int(self._jdf.count())
525
526 @ignore_unicode_prefix
/usr/lib/spark/python/lib/py4j-0.10.7-src.zip/py4j/java_gateway.py in __call__(self, *args)
1255 answer = self.gateway_client.send_command(command)
1256 return_value = get_return_value(
-> 1257 answer, self.gateway_client, self.target_id, self.name)
1258
1259 for temp_arg in temp_args:
/usr/lib/spark/python/pyspark/sql/utils.py in deco(*a, **kw)
61 def deco(*a, **kw):
62 try:
---> 63 return f(*a, **kw)
64 except py4j.protocol.Py4JJavaError as e:
65 s = e.java_exception.toString()
/usr/lib/spark/python/lib/py4j-0.10.7-src.zip/py4j/protocol.py in get_return_value(answer, gateway_client, target_id, name)
326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.n".
--> 328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
Py4JJavaError: An error occurred while calling o374.count.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 112 in stage 29.0 failed 4 times, most recent failure: Lost task 112.3 in stage 29.0 (TID 1846, ashwanth-pi-scaling-w-1.c.toped-ds-sandbox.internal, executor 111): org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
process()
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 290, in dump_stream
for series in iterator:
File "<string>", line 1, in <lambda>
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 113, in wrapped
result = f(pd.concat(value_series, axis=1))
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<ipython-input-32-6e45198643a7>", line 17, in forecast
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/arima/model.py", line 345, in fit
cov_type=cov_type, cov_kwds=cov_kwds, **method_kwargs)
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 643, in fit
start_params = self.start_params
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/sarimax.py", line 956, in start_params
warning_description='ARMA and trend')
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/sarimax.py", line 843, in _conditional_sum_squares
Y = endog[r:]
IndexError: too many indices for array: array is 0-dimensional, but 1 were indexed
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
at org.apache.spark.sql.execution.python.ArrowPythonRunner$anon$1.read(ArrowPythonRunner.scala:172)
at org.apache.spark.sql.execution.python.ArrowPythonRunner$anon$1.read(ArrowPythonRunner.scala:122)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.agg_doAggregateWithKeys_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$anonfun$13$anon$1.hasNext(WholeStageCodegenExec.scala:636)
at scala.collection.Iterator$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:127)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$failJobAndIndependentStages(DAGScheduler.scala:1926)
at org.apache.spark.scheduler.DAGScheduler$anonfun$abortStage$1.apply(DAGScheduler.scala:1914)
at org.apache.spark.scheduler.DAGScheduler$anonfun$abortStage$1.apply(DAGScheduler.scala:1913)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1913)
at org.apache.spark.scheduler.DAGScheduler$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948)
at org.apache.spark.scheduler.DAGScheduler$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948)
at scala.Option.foreach(Option.scala:257)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2147)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2096)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2085)
at org.apache.spark.util.EventLoop$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2061)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2082)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2101)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2126)
at org.apache.spark.rdd.RDD$anonfun$collect$1.apply(RDD.scala:990)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:385)
at org.apache.spark.rdd.RDD.collect(RDD.scala:989)
at org.apache.spark.sql.execution.SparkPlan.executeCollect(SparkPlan.scala:299)
at org.apache.spark.sql.Dataset$anonfun$count$1.apply(Dataset.scala:2836)
at org.apache.spark.sql.Dataset$anonfun$count$1.apply(Dataset.scala:2835)
at org.apache.spark.sql.Dataset$anonfun$52.apply(Dataset.scala:3370)
at org.apache.spark.sql.execution.SQLExecution$anonfun$withNewExecutionId$1.apply(SQLExecution.scala:80)
at org.apache.spark.sql.execution.SQLExecution$.withSQLConfPropagated(SQLExecution.scala:127)
at org.apache.spark.sql.execution.SQLExecution$.withNewExecutionId(SQLExecution.scala:75)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$withAction(Dataset.scala:3369)
at org.apache.spark.sql.Dataset.count(Dataset.scala:2835)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.GatewayConnection.run(GatewayConnection.java:238)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last):
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 377, in main
process()
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 372, in process
serializer.dump_stream(func(split_index, iterator), outfile)
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/serializers.py", line 290, in dump_stream
for series in iterator:
File "<string>", line 1, in <lambda>
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/worker.py", line 113, in wrapped
result = f(pd.concat(value_series, axis=1))
File "/usr/lib/spark/python/lib/pyspark.zip/pyspark/util.py", line 99, in wrapper
return f(*args, **kwargs)
File "<ipython-input-32-6e45198643a7>", line 17, in forecast
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/arima/model.py", line 345, in fit
cov_type=cov_type, cov_kwds=cov_kwds, **method_kwargs)
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/mlemodel.py", line 643, in fit
start_params = self.start_params
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/sarimax.py", line 956, in start_params
warning_description='ARMA and trend')
File "/opt/conda/anaconda/lib/python3.6/site-packages/statsmodels/tsa/statespace/sarimax.py", line 843, in _conditional_sum_squares
Y = endog[r:]
IndexError: too many indices for array: array is 0-dimensional, but 1 were indexed
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456)
at org.apache.spark.sql.execution.python.ArrowPythonRunner$anon$1.read(ArrowPythonRunner.scala:172)
at org.apache.spark.sql.execution.python.ArrowPythonRunner$anon$1.read(ArrowPythonRunner.scala:122)
at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410)
at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37)
at scala.collection.Iterator$anon$12.hasNext(Iterator.scala:440)
at scala.collection.Iterator$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.agg_doAggregateWithKeys_0$(Unknown Source)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage3.processNext(Unknown Source)
at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43)
at org.apache.spark.sql.execution.WholeStageCodegenExec$anonfun$13$anon$1.hasNext(WholeStageCodegenExec.scala:636)
at scala.collection.Iterator$anon$11.hasNext(Iterator.scala:409)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:127)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:99)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:55)
at org.apache.spark.scheduler.Task.run(Task.scala:123)
at org.apache.spark.executor.Executor$TaskRunner$anonfun$10.apply(Executor.scala:408)
at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:414)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
... 1 more
В чем может быть возможная проблема здесь?
Комментарии:
1. Это явно исключение python
IndexError
2. Хм… Но в чем проблема? Когда я запускаю меньший набор данных, не сталкиваюсь с той же проблемой, что и упоминалось.
3. возможно, проверьте значение null
4. В качестве альтернативы попробуйте следующее:
from pyspark.sql.functions import countDistinct df.select(countDistinct("keyword")).show()