1import pandas as pd
2
3# making data frame from csv file
4data = pd.read_csv("employees.csv")
5
6# sorting by first name
7data.sort_values("First Name", inplace = True)
8
9# dropping ALL duplicte values
10data.drop_duplicates(subset ="First Name",keep = False, inplace = True)
11
12# displaying data
13print(data)
1# Return a new DataFrame with duplicate rows removed
2
3from pyspark.sql import Row
4df = sc.parallelize([
5 Row(name='Alice', age=5, height=80),
6 Row(name='Alice', age=5, height=80),
7 Row(name='Alice', age=10, height=80)]).toDF()
8df.dropDuplicates().show()
9# +---+------+-----+
10# |age|height| name|
11# +---+------+-----+
12# | 5| 80|Alice|
13# | 10| 80|Alice|
14# +---+------+-----+
15
16df.dropDuplicates(['name', 'height']).show()
17# +---+------+-----+
18# |age|height| name|
19# +---+------+-----+
20# | 5| 80|Alice|
21# +---+------+-----+