repl.it
@TonySiu/

Dictionaries

Python

No description

fork
loading
Files
  • main.py
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
#install all required packages
install.packages(c("ggplot2", "e1071", "caret", "quanteda", "irlba", "randomGorest"))

if (!require("pacman")) install.packages("pacman")
pacman::p_load_gh(
  "trinker/qdapDictionaries",
  "trinker/qdapRegex",
  "trinker/qdapTools",
  "trinker/qdap"
)

#Load up the .CSV data and explore in RStudio
Tony.raw <- read.csv("denver_listings.csv", stringsAsFactors = FALSE)
View(Tony.raw)



Tony.raw <- read.csv("stackoverflow.csv", stringsAsFactors = FALSE)
df[df$No_of_Mails][!(apply(df, 1, function(y) any(y == 0))),].
df[row_sub,]

# Clean up the data frame and view our handiwork.
Tony.raw <- Tony.raw[!is.na(Tony.raw)]
Tony.raw <- Tony.raw[, c("neighborhood_overview" )]
Tony.raw <- Tony.raw[, c("description", "neighbourhood", "neighborhood_overview" )]
View(Tony.raw)

class(Tony.raw)
Tony.raw$neighborhood_overview <- Tony.raw$neighborhood_overview[complete.cases(Tony.raw$neighborhood_overview)]
head(Tony.raw$neighborhood_overview)




Tony.raw <- Tony.raw[,1:2]
names(Tony.raw) <- c("Label", "Text")
View(Tony.raw)
# Check data to see if there are missing values.
length(which(!complete.cases(Tony.raw)))

#Convert our class label into a factor.
Tony.raw$neighbourhood <- as.factor(Tony.raw$neighbourhood) #as.factor()# as.character()which()

Tony.raw$Label <- as.factor(Tony.raw$Label)

# The first step , as always, is to expore the data.
#First, let's take a look at distribution of the class labels (i.e., ham vs. spam),
prop.table(table(Tony.raw$neighbourhood))

prop.table(table(Tony.raw$Label))

#Next up , let's get a feel for the distribution of text lengths of the SMS
# messages by adding a new dearture for the length of each message.
Tony.raw$TextLength <- nchar(Tony.raw$description) #nchar(labels(Tony.raw$description)[Tony.raw$description])
summary(Tony.raw$TextLength)

Tony.raw$TextLength <- nchar(Tony.raw$Text)
summary(Tony.raw$TextLength)


#Visualize distribution with ggplot2, adding segmentation for ham/spam
library(ggplot2)

ggplot(Tony.raw, aes(x=TextLength, fill = neighbourhood)) +
  theme_bw() +
  geom_histogram(binwidth = 5) +
  labs(y = "Text Count", x = "Length of Text",
       title = "Distribution of Text Lengths with class Labels")

# At a minimum we need to split our data into a training set and a test set. 
# In a true project we would want to use a three way split of training , validtaion and test.

# As we know that our data has non trivial class imbalance, 
# we'll use the mighty caret package to create a random train/test split that ensures the correct ham/spam class lael proportions
# (ie we'll use caret for a random stratified split)
library(caret)
help(package = "caret")

# Use caret to creat a 70%/30% stratified split. Set the random seed for reproducibility.
set.seed(32984)
indexes <- createDataPartition(Tony.raw$Label, times =1,
                               p = 0.7, list = FALSE)

train <- Tony.raw[indexes,]
test <- Tony.raw[-indexes,]

#Verify Proportions.
prop.table(table(train$Label))
prop.table(table(test$Label))

library(qdap)
frequent_terms <- freq_terms(Tony.raw$neighborhood_overview, 10)
plot(frequent_terms)