From 6c9f1149a45a7fdd346b3d00e0c18ff5f5a00aac Mon Sep 17 00:00:00 2001 From: fly6516 Date: Tue, 22 Apr 2025 06:28:02 +0000 Subject: [PATCH] fix: error.txt --- error.txt | 160 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 160 insertions(+) diff --git a/error.txt b/error.txt index e69de29..256b7dc 100644 --- a/error.txt +++ b/error.txt @@ -0,0 +1,160 @@ +/usr/bin/python3 /root/PycharmProjects/als_movie/collab_filter.py +25/04/22 06:25:24 WARN NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable +Using Spark's default log4j profile: org/apache/spark/log4j-defaults.properties +Setting default log level to "WARN". +To adjust logging level use sc.setLogLevel(newLevel). For SparkR, use setLogLevel(newLevel). +Training: 3563128, validation: 1189844, test: 1188989 + +[(21708, 165, 3.5), (21708, 168, 1.0), (21708, 208, 2.0)] +[(21708, 110, 4.5), (21708, 1641, 1.5), (21708, 1682, 4.5)] +[(21708, 95, 3.5), (21708, 153, 1.5), (21708, 161, 4.0)] +1189844 +25/04/22 06:25:52 ERROR TaskSetManager: Task 0 in stage 17.0 failed 4 times; aborting job +Traceback (most recent call last): + File "/root/PycharmProjects/als_movie/collab_filter.py", line 54, in + lambda_=regularizationParameter) + File "/usr/local/bin/python3.6/lib/python3.6/site-packages/pyspark/mllib/recommendation.py", line 274, in train + lambda_, blocks, nonnegative, seed) + File "/usr/local/bin/python3.6/lib/python3.6/site-packages/pyspark/mllib/common.py", line 130, in callMLlibFunc + return callJavaFunc(sc, api, *args) + File "/usr/local/bin/python3.6/lib/python3.6/site-packages/pyspark/mllib/common.py", line 123, in callJavaFunc + return _java2py(sc, func(*args)) + File "/usr/local/bin/python3.6/lib/python3.6/site-packages/py4j/java_gateway.py", line 1257, in __call__ + answer, self.gateway_client, self.target_id, self.name) + File "/usr/local/bin/python3.6/lib/python3.6/site-packages/py4j/protocol.py", line 328, in get_return_value + format(target_id, ".", name), value) +py4j.protocol.Py4JJavaError: An error occurred while calling o135.trainALSModel. +: org.apache.spark.SparkException: Job aborted due to stage failure: Task 0 in stage 17.0 failed 4 times, most recent failure: Lost task 0.3 in stage 17.0 (TID 17, 100.64.0.10, executor 0): org.apache.spark.api.python.PythonException: Traceback (most recent call last): + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 364, in main + func, profiler, deserializer, serializer = read_command(pickleSer, infile) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 69, in read_command + command = serializer._read_with_length(file) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 173, in _read_with_length + return self.loads(obj) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 587, in loads + return pickle.loads(obj, encoding=encoding) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/mllib/__init__.py", line 28, in + import numpy +ModuleNotFoundError: No module named 'numpy' + + at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456) + at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:592) + at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:575) + at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410) + at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37) + at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440) + at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:390) + at scala.collection.Iterator$class.foreach(Iterator.scala:891) + at scala.collection.AbstractIterator.foreach(Iterator.scala:1334) + at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) + at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) + at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) + at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310) + at scala.collection.AbstractIterator.to(Iterator.scala:1334) + at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302) + at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1334) + at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289) + at scala.collection.AbstractIterator.toArray(Iterator.scala:1334) + at org.apache.spark.rdd.RDD$$anonfun$take$1$$anonfun$31.apply(RDD.scala:1409) + at org.apache.spark.rdd.RDD$$anonfun$take$1$$anonfun$31.apply(RDD.scala:1409) + at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2107) + at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2107) + at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) + at org.apache.spark.scheduler.Task.run(Task.scala:123) + at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411) + at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) + at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417) + at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) + at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) + at java.lang.Thread.run(Thread.java:748) + +Driver stacktrace: + at org.apache.spark.scheduler.DAGScheduler.org$apache$spark$scheduler$DAGScheduler$$failJobAndIndependentStages(DAGScheduler.scala:1925) + at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1913) + at org.apache.spark.scheduler.DAGScheduler$$anonfun$abortStage$1.apply(DAGScheduler.scala:1912) + at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59) + at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48) + at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:1912) + at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948) + at org.apache.spark.scheduler.DAGScheduler$$anonfun$handleTaskSetFailed$1.apply(DAGScheduler.scala:948) + at scala.Option.foreach(Option.scala:257) + at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:948) + at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2146) + at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2095) + at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2084) + at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49) + at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:759) + at org.apache.spark.SparkContext.runJob(SparkContext.scala:2067) + at org.apache.spark.SparkContext.runJob(SparkContext.scala:2088) + at org.apache.spark.SparkContext.runJob(SparkContext.scala:2107) + at org.apache.spark.rdd.RDD$$anonfun$take$1.apply(RDD.scala:1409) + at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) + at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) + at org.apache.spark.rdd.RDD.withScope(RDD.scala:385) + at org.apache.spark.rdd.RDD.take(RDD.scala:1382) + at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply$mcZ$sp(RDD.scala:1517) + at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply(RDD.scala:1517) + at org.apache.spark.rdd.RDD$$anonfun$isEmpty$1.apply(RDD.scala:1517) + at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151) + at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112) + at org.apache.spark.rdd.RDD.withScope(RDD.scala:385) + at org.apache.spark.rdd.RDD.isEmpty(RDD.scala:1516) + at org.apache.spark.mllib.recommendation.ALS.run(ALS.scala:240) + at org.apache.spark.mllib.api.python.PythonMLLibAPI.trainALSModel(PythonMLLibAPI.scala:488) + at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) + at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62) + at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) + at java.lang.reflect.Method.invoke(Method.java:498) + at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244) + at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357) + at py4j.Gateway.invoke(Gateway.java:282) + at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132) + at py4j.commands.CallCommand.execute(CallCommand.java:79) + at py4j.GatewayConnection.run(GatewayConnection.java:238) + at java.lang.Thread.run(Thread.java:748) +Caused by: org.apache.spark.api.python.PythonException: Traceback (most recent call last): + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 364, in main + func, profiler, deserializer, serializer = read_command(pickleSer, infile) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/worker.py", line 69, in read_command + command = serializer._read_with_length(file) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 173, in _read_with_length + return self.loads(obj) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/serializers.py", line 587, in loads + return pickle.loads(obj, encoding=encoding) + File "/opt/module/spark-2.4.8-bin-hadoop2.7/python/lib/pyspark.zip/pyspark/mllib/__init__.py", line 28, in + import numpy +ModuleNotFoundError: No module named 'numpy' + + at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.handlePythonException(PythonRunner.scala:456) + at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:592) + at org.apache.spark.api.python.PythonRunner$$anon$1.read(PythonRunner.scala:575) + at org.apache.spark.api.python.BasePythonRunner$ReaderIterator.hasNext(PythonRunner.scala:410) + at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37) + at scala.collection.Iterator$$anon$12.hasNext(Iterator.scala:440) + at scala.collection.Iterator$$anon$10.hasNext(Iterator.scala:390) + at scala.collection.Iterator$class.foreach(Iterator.scala:891) + at scala.collection.AbstractIterator.foreach(Iterator.scala:1334) + at scala.collection.generic.Growable$class.$plus$plus$eq(Growable.scala:59) + at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:104) + at scala.collection.mutable.ArrayBuffer.$plus$plus$eq(ArrayBuffer.scala:48) + at scala.collection.TraversableOnce$class.to(TraversableOnce.scala:310) + at scala.collection.AbstractIterator.to(Iterator.scala:1334) + at scala.collection.TraversableOnce$class.toBuffer(TraversableOnce.scala:302) + at scala.collection.AbstractIterator.toBuffer(Iterator.scala:1334) + at scala.collection.TraversableOnce$class.toArray(TraversableOnce.scala:289) + at scala.collection.AbstractIterator.toArray(Iterator.scala:1334) + at org.apache.spark.rdd.RDD$$anonfun$take$1$$anonfun$31.apply(RDD.scala:1409) + at org.apache.spark.rdd.RDD$$anonfun$take$1$$anonfun$31.apply(RDD.scala:1409) + at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2107) + at org.apache.spark.SparkContext$$anonfun$runJob$5.apply(SparkContext.scala:2107) + at org.apache.spark.scheduler.ResultTask.runTask(ResultTask.scala:90) + at org.apache.spark.scheduler.Task.run(Task.scala:123) + at org.apache.spark.executor.Executor$TaskRunner$$anonfun$10.apply(Executor.scala:411) + at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:1360) + at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:417) + at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) + at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) + ... 1 more + + +Process finished with exit code 1