I try to do very simple - update a value of a nested column;however, I cannot figure out how
Environment:
dataDF = [
(('Jon','','Smith'),'1580-01-06','M',3000)
]
schema = StructType([
StructField('name', StructType([
StructField('firstname', StringType(), True),
StructField('middlename', StringType(), True),
StructField('lastname', StringType(), True)
])),
StructField('dob', StringType(), True),
StructField('gender', StringType(), True),
StructField('gender', IntegerType(), True)
])
df = spark.createDataFrame(data = dataDF, schema = schema)
df = df.withColumn("name.firstname", lit('John'))
df.printSchema()
df.show()
#Results
#I get a new column instead of update
root
|-- name: struct (nullable = true)
| |-- firstname: string (nullable = true)
| |-- middlename: string (nullable = true)
| |-- lastname: string (nullable = true)
|-- dob: string (nullable = true)
|-- gender: string (nullable = true)
|-- gender: integer (nullable = true)
|-- name.firstname: string (nullable = false)
+--------------+----------+------+------+--------------+
| name| dob|gender|gender|name.firstname|
+--------------+----------+------+------+--------------+
|[Jon, , Smith]|1580-01-06| M| 3000| John|
+--------------+----------+------+------+--------------+
Need to wrangle with the column a bit as below:
import pyspark.sql.functions as F
df2 = df.select('*', 'name.*') \
.withColumn('firstname', F.lit('newname')) \
.withColumn('name', F.struct(*[F.col(col) for col in df.select('name.*').columns])) \
.drop(*df.select('name.*').columns)
df2.show()
+------------------+----------+------+------+
| name| dob|gender|gender|
+------------------+----------+------+------+
|[newname, , Smith]|1580-01-06| M| 3000|
+------------------+----------+------+------+