如果您有 pyspark 数据框,您可以尝试使用 withColumnRenamed 函数重命名列。我确实尝试过,看看并根据您的更改对其进行自定义。
>>> l=[('some value1','some value2','some value 3'),('some value4','some value5','some value 6')]
>>> l_schema = StructType([StructField("col1.some valwith(in)and{around}",StringType(),True),StructField("col2.some valwith()and{}",StringType(),True),StructField("col3 some()valwith.and{}",StringType(),True)])
>>> reps=('.','_'),(' ','_'),('(',''),(')',''),('{','')('}','')
>>> rdd = sc.parallelize(l)
>>> df = sqlContext.createDataFrame(rdd,l_schema)
>>> df.printSchema()
root
|-- col1.some valwith(in)and{around}: string (nullable = true)
|-- col2.some valwith()and{}: string (nullable = true)
|-- col3 some()valwith.and{}: string (nullable = true)
>>> df.show()
+------------------------+------------------------+------------------------+
|col1.some valwith(in)and{around}|col2.some valwith()and{}|col3 some()valwith.and{}|
+------------------------+------------------------+------------------------+
| some value1| some value2| some value 3|
| some value4| some value5| some value 6|
+------------------------+------------------------+------------------------+
>>> def colrename(x):
... return reduce(lambda a,kv : a.replace(*kv),reps,x)
>>> for i in df.schema.names:
... df = df.withColumnRenamed(i,colrename(i))
>>> df.printSchema()
root
|-- col1_some_valwithinandaround: string (nullable = true)
|-- col2_some_valwithand: string (nullable = true)
|-- col3_somevalwith_and: string (nullable = true)
>>> df.show()
+--------------------+--------------------+--------------------+
|col1_some_valwithinandaround|col2_some_valwithand|col3_somevalwith_and|
+--------------------+--------------------+--------------------+
| some value1| some value2| some value 3|
| some value4| some value5| some value 6|
+--------------------+--------------------+--------------------+