【发布时间】:2020-04-06 14:31:14
【问题描述】:
import org.apache.spark.SparkContext
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{StructType, StructField, StringType, IntegerType};
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.types.DataType
import org.apache.spark.sql._
val sqlContext = new org.apache.spark.sql.SQLContext(sc);
val hiveContext = new org.apache.spark.sql.hive.HiveContext(sc)
val rdd =
sc.textFile("/user/scvappdf/hdpcf/SIT/df/meta/1576373/SDM_Data/RDM/rdm_all_ctry_cd_ver1.txt")
val header = rdd.first()
val rdd1 = rdd.filter(x => x != header)
val rowRDD = rdd1.map(_.split("\\|")).map(p => Row(p(0).trim,
p(1).trim,p(2).trim,p(3).trim,p(4).trim,p(5).trim,p(6).trim,p(7).trim,p(8).trim,p(9).trim,
p(10).trim,p(11).trim,p(12).trim,p(13).trim,p(14).trim,p(15).trim,p(16).trim,p(17).trim,
p(18).trim,p(19).trim,p(20).trim,p(21).trim,p(22).trim,p(23).trim,p(24).trim,p(25).trim,p(26).trim))
val innerStruct = StructType(StructField("rowid", StringType, true)::StructField("s_startdt",
StringType, false)::StructField("s_starttime", StringType, false)::StructField("s_enddt",
StringType, false)::StructField("s_endtime", StringType, false)::StructField("s_deleted_flag",
StringType, false)::StructField("ctry_cd", StringType, false)::StructField("ctry_cd_desc",
StringType, false)::StructField("intrnl_ctry_rgn_cd", StringType, false)::
StructField("intrnl_ctry_rgn_desc", StringType, false)::StructField("iso_ind", StringType,
false)::StructField("iso_alp2_ctry_cd", StringType, false)::StructField("iso_alp3_ctry_cd",
StringType, false)::StructField("iso_num_cd", StringType, false)::StructField("npc_ind", StringType,
false)::StructField("cal_type_cd", StringType, false)::StructField("hemisphere_cd", StringType,
false)::StructField("hemisphere_desc", StringType, false)::StructField("ref_tbl_id", StringType,
false)::StructField("eff_strt_dt", StringType, false)::StructField("eff_end_dt", StringType,
false)::StructField("ent_st", StringType, false)::StructField("vers", StringType,
false)::StructField("crtd_tmst", StringType, false)::StructField("mod_tmst", StringType,
false)::StructField("src_id", StringType, false)::StructField("ods", StringType, false):: Nil)
val peopleSchemaRDD = hiveContext.createDataFrame(rowRDD, innerStruct)
peopleSchemaRDD.write.format("orc").mode("overwrite")
.save("/sit/regenv/hdata/DATAFABRIC/reg_tb_prd_dfab/stg_rdm_all_ctry_cd_ver1/ods=2019-12-06")
在以 ORC 格式保存数据帧 peopleSchemaRDD 时,我收到错误“java.lang.ArrayIndexOutOfBoundsException: 3”
我收到此错误原因:org.apache.spark.SparkException:写入行时任务失败
路径没有问题,因为正在创建文件夹但没有数据传入。我用的是spark 1.6.3版本
【问题讨论】:
标签: apache-spark apache-spark-sql