1# Return a new DataFrame with duplicate rows removed
2
3from pyspark.sql import Row
4df = sc.parallelize([
5 Row(name='Alice', age=5, height=80),
6 Row(name='Alice', age=5, height=80),
7 Row(name='Alice', age=10, height=80)]).toDF()
8df.dropDuplicates().show()
9# +---+------+-----+
10# |age|height| name|
11# +---+------+-----+
12# | 5| 80|Alice|
13# | 10| 80|Alice|
14# +---+------+-----+
15
16df.dropDuplicates(['name', 'height']).show()
17# +---+------+-----+
18# |age|height| name|
19# +---+------+-----+
20# | 5| 80|Alice|
21# +---+------+-----+