Skip to content

[DNM] Bump Spark to 3.5.4 #14850

[DNM] Bump Spark to 3.5.4

[DNM] Bump Spark to 3.5.4 #14850

Triggered via pull request January 23, 2025 07:31
Status Success
Total duration 34s
Artifacts

clickhouse_be_trigger.yml

on: pull_request_target
add-comment
25s
add-comment
Fit to window
Zoom out
Zoom in

Annotations

3 errors and 1 warning
VeloxTPCHIcebergSuite.iceberg transformer exists: org/apache/gluten/execution/VeloxTPCHIcebergSuite#L1
Job aborted due to stage failure: Task 0 in stage 710.0 failed 1 times, most recent failure: Lost task 0.0 in stage 710.0 (TID 191) (05e73e3d8203 executor driver): java.lang.IndexOutOfBoundsException: index: 0, length: 40000 (expected: range(0, 0)) at org.apache.arrow.memory.ArrowBuf.checkIndex(ArrowBuf.java:701) at org.apache.arrow.memory.ArrowBuf.setBytes(ArrowBuf.java:829) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedParquetDefinitionLevelReader.setNextNValuesInVector(VectorizedParquetDefinitionLevelReader.java:795) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedParquetDefinitionLevelReader.access$000(VectorizedParquetDefinitionLevelReader.java:37) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedParquetDefinitionLevelReader$NumericBaseReader.nextBatch(VectorizedParquetDefinitionLevelReader.java:67) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedPageIterator$LongPageReader.nextVal(VectorizedPageIterator.java:235) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedPageIterator$BasePageReader.nextBatch(VectorizedPageIterator.java:187) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedColumnIterator$LongBatchReader.nextBatchOf(VectorizedColumnIterator.java:129) at org.apache.iceberg.arrow.vectorized.parquet.VectorizedColumnIterator$BatchReader.nextBatch(VectorizedColumnIterator.java:77) at org.apache.iceberg.arrow.vectorized.VectorizedArrowReader.read(VectorizedArrowReader.java:170) at org.apache.iceberg.spark.data.vectorized.ColumnarBatchReader$ColumnBatchLoader.readDataToColumnVectors(ColumnarBatchReader.java:123) at org.apache.iceberg.spark.data.vectorized.ColumnarBatchReader$ColumnBatchLoader.loadDataToColumnBatch(ColumnarBatchReader.java:98) at org.apache.iceberg.spark.data.vectorized.ColumnarBatchReader.read(ColumnarBatchReader.java:72) at org.apache.iceberg.spark.data.vectorized.ColumnarBatchReader.read(ColumnarBatchReader.java:44) at org.apache.iceberg.parquet.VectorizedParquetReader$FileIterator.next(VectorizedParquetReader.java:147) at org.apache.iceberg.spark.source.BaseReader.next(BaseReader.java:138) at org.apache.spark.sql.execution.datasources.v2.PartitionIterator.hasNext(DataSourceRDD.scala:120) at org.apache.spark.sql.execution.datasources.v2.MetricsIterator.hasNext(DataSourceRDD.scala:158) at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.$anonfun$hasNext$1(DataSourceRDD.scala:63) at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.$anonfun$hasNext$1$adapted(DataSourceRDD.scala:63) at scala.Option.exists(Option.scala:406) at org.apache.spark.sql.execution.datasources.v2.DataSourceRDD$$anon$1.hasNext(DataSourceRDD.scala:63) at org.apache.spark.InterruptibleIterator.hasNext(InterruptibleIterator.scala:37) at scala.collection.Iterator$$anon$9.hasNext(Iterator.scala:576) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.columnartorow_nextBatch_0$(Unknown Source) at org.apache.spark.sql.catalyst.expressions.GeneratedClass$GeneratedIteratorForCodegenStage1.processNext(Unknown Source) at org.apache.spark.sql.execution.BufferedRowIterator.hasNext(BufferedRowIterator.java:43) at org.apache.spark.sql.execution.WholeStageCodegenEvaluatorFactory$WholeStageCodegenPartitionEvaluator$$anon$1.hasNext(WholeStageCodegenEvaluatorFactory.scala:43) at scala.collection.Iterator$$anon$9.hasNext(Iterator.scala:576) at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:168) at org.apache.spark.shuffle.ShuffleWriteProcessor.write(ShuffleWriteProcessor.scala:59) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:104) at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:54) at org.apache.spark.TaskContext.runTaskWithListeners(TaskContext.scala:166) at org.apache.spark.scheduler.Task.run(Task.scala:141) at org.apache.spark.executor.Executor$TaskRunner.$anonfun$run$4(Executor.scala:620) at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally(SparkErrorUtils.scala:64) at org.apache.spark.util.SparkErrorUtils.tryWithSafeFinally$(SparkErrorUtils.scala:61) at org.apache.spark.util.Utils$.tryWithSafeFinally(Utils.scala:94) at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:623) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624) at java.lang.Thread.run(Thread.java:748) Driver stacktrace:
GlutenSQLQueryTestSuite.show-create-table.sql: org/apache/spark/sql/GlutenSQLQueryTestSuite#L346
show-create-table.sql Expected "...uet LOCATION 'file:/[//]path/to/table'", but got "...uet LOCATION 'file:/[]path/to/table'" Result did not match for query #7 SHOW CREATE TABLE tbl
add-comment
ubuntu-latest pipelines will use ubuntu-24.04 soon. For more details, see https://github.com/actions/runner-images/issues/10636