1# A regex based tokenizer that extracts tokens
2
3df = spark.createDataFrame([("A B c",)], ["text"])
4reTokenizer = RegexTokenizer(inputCol="text", outputCol="words")
5reTokenizer.transform(df).head()
6# Row(text=u'A B c', words=[u'a', u'b', u'c'])
7
8# Change a parameter.
9reTokneizer.setParams(outputCol="tokens").transform(df).head()
10# Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
11
12# Temporarily modify a parameter.
13reTokenizer.transform(df,
14 {reTokenizer.outputCol: "words"}).head()
15# Row(text=u'A B c', words=[u'a', u'b', u'c'])
16reTokenizer.transform(df).head()
17# Row(text=u'A B c', tokens=[u'a', u'b', u'c'])
18
19# Must use keyword arguments to specify params.
20reTokenizer.setParams("text")
21# Traceback (most recent call last):
22# ...
23# TypeError: Method setParams forces keyword arguments.
24regexTokenizerPath = temp_path + "/regex-tokenizer"
25reTokenizer.save(regexTokenizerPath)
26loadedReTokenizer = RegexTokenizer.load(regexTokenizerPath)
27loadedReTokenizer.getMinTokenLength() == reTokenizer.getMinTokenLength()
28# True
29loadedReTokenizer.getGaps() == reTokenizer.getGaps()
30# True