parallel works, now output in data frame

This commit is contained in:
2015-02-22 03:05:56 +01:00
parent b3658e282a
commit a121c1baf1
3 changed files with 802 additions and 365 deletions
+359 -359
View File
@@ -1,380 +1,276 @@
for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
curfile <- str_c(id_folder,"/",curissue,".csv")
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
if(curchars <= 4) {
curacro <- checkAcronym(string = curtag, chars = curchars)
} else {
curacro <- FALSE
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
as.character(drange[i])
w <- sample(1:2, 1)
Sys.sleep(w)
}
# Now expand the current tag by possible suffixes that may be plural forms
if(!curacro) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
cat(as.character(drange[i]))
}
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
tags_found <- NULL
tags_found <- smartPatternMatch(curtext, curtag, curchars, curacro)
if(tags_found == 1) {
# Raise number of findings on this day for this issue by 1
issues[d,curissue] <- issues[d,curissue] + 1
# Add issue and first matched tag of tweet to tweets-DF
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
break
print(Sys.time()-strt)
stopCluster(cl)
writeLines(c(""), "log.txt")
cat(as.character(drange[i]))
writeLines(c(""), "log.txt")
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
cat(as.character(drange[i]),"\n")
}
else {
#cat("Nothing found\n")
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
cat(as.character(drange[i]),"\n")
w <- sample(1:3, 1)
Sys.sleep(w)
as.character(drange[i])
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
warnings()
tags_found <- NULL
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curchars, curacro)
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
cat(as.character(drange[i]),"\n")
# w <- sample(1:3, 1)
# Sys.sleep(w)
as.character(drange[i])
}
tags_found
curtext
curtag
any(tags_found)
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
sink("log.txt", append=TRUE)
cat(as.character(drange[i]),"\n")
# w <- sample(1:3, 1)
# Sys.sleep(w)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:3, 1)
Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:10, 1)
Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:10, 1)
#Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
ls<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:10, 1)
#Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
View(data)
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
data<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:10, 1)
#Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
rm(ls)
data
#import packages
library(foreach)
library(doParallel)
#setup parallel backend to use 8 processors
cl<-makeCluster(3)
registerDoParallel(cl)
#start time
strt<-Sys.time()
writeLines(c(""), "log.txt")
#loop
df<-foreach(i = 1:length(drange)) %dopar% {
w <- sample(1:10, 1)
#Sys.sleep(w)
cat(paste("\n","Starting iteration",i,"\n"), file="log.txt", append=TRUE)
as.character(drange[i])
}
print(Sys.time()-strt)
stopCluster(cl)
df
view(df)
View(df)
# Parallelisation
writeLines(c(""), "log.txt")
cl<-makeCluster(3)
registerDoParallel(cl)
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
issues <- data.frame(date = drange)
issuelist <- xmlToList("issues.xml")
issuelist <- readLines("issues.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
issuelist <- xmlToList(issuelist)
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
tagexpand <- c("", "s", "n", "en")
for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
curfile <- str_c(id_folder,"/",curissue,".csv")
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
if(curchars <= 4) {
curacro <- checkAcronym(string = curtag, chars = curchars)
} else {
curacro <- FALSE
}
# Now expand the current tag by possible suffixes that may be plural forms
if(!curacro) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
}
}
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
tags_found <- NULL
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curchars, curacro)
}
tags_found <- any(tags_found)
if(tags_found == TRUE) {
# Raise number of findings on this day for this issue by 1
issues[d,curissue] <- issues[d,curissue] + 1
# Add issue and first matched tag of tweet to tweets-DF
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
break
}
else {
#cat("Nothing found\n")
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
curtag
curtag <- curtag[1]
curtag
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
issues <- data.frame(date = drange)
issuelist <- xmlToList("issues.xml")
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
tagexpand <- c("", "s", "n", "en")
for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
curfile <- str_c(id_folder,"/",curissue,".csv")
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
if(curchars <= 4) {
curacro <- checkAcronym(string = curtag, chars = curchars)
} else {
curacro <- FALSE
}
# Now expand the current tag by possible suffixes that may be plural forms
if(!curacro) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
}
}
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
tags_found <- NULL
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curchars, curacro)
}
tags_found <- any(tags_found)
curtag <- curtag[1]
if(tags_found == TRUE) {
# Raise number of findings on this day for this issue by 1
issues[d,curissue] <- issues[d,curissue] + 1
# Add issue and first matched tag of tweet to tweets-DF
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
break
}
else {
#cat("Nothing found\n")
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
View(tweets)
require(jsonlite)
require(stringr)
require(devtools)
require(RTwitterAPI)
acc_df <- read.csv("MdB-twitter.csv")
delrow <- NULL
for(r in 1:nrow(acc_df)) {
acc <- as.character(acc_df$twitter_acc[r])
if(!nzchar(acc)) {
delrow <- c(delrow, r)
}
}
acc_df <- acc_df[-delrow, ]
rm(delrow, r, acc)
acc_df$row.names <- NULL
row.names(acc_df) <- NULL
View(acc_df)
acc_df(acc_df$party == "linke")
acc_df[acc_df$party == "linke"]
acc_df[, acc_df$party == "linke"]
acc_df[acc_df$party == "linke", ]
length(acc_df[acc_df$party == "linke", ])
nrow(acc_df[acc_df$party == "linke", ])
nrow(acc_df[acc_df$party == "linke", ]) / 280
nrow(acc_df[acc_df$party == "gruene", ]) / 280
nrow(acc_df[acc_df$party == "cducsu", ]) / 280
nrow(acc_df[acc_df$party == "spd", ]) / 280
test <- c("linke", "gruene")
nrow(acc_df[acc_df$party == sprintf("%s", test), ]) / 280
test
nrow(acc_df[acc_df$party == sprintf("%s", test), ]) / 280
acc_parties <- c("cducsu", "spd", "linke", "gruene")
acc_parties <- data.frame(party = c("cducsu", "spd", "linke", "gruene"))
View(acc_parties)
acc_parties$btw13 <- c(41.5, 25.7, 8.6, 8.4)
View(acc_parties)
acc_parties$twitter <- 0
View(acc_parties)
for(p in 1:length(acc_parties)) {
acc_parties$twitter[p] <- as.numeric(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280)
}
View(acc_parties)
as.numeric(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
round(14.64282, digits = 1)
round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280), digits=1)
nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280)
nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280
nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100
round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100, digits=1)
for(p in 1:length(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100, digits=1)
}
View(acc_parties)
View(acc_parties)
acc_parties$twitter <- 0
for(p in 1:length(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100, digits=1)
}
View(acc_parties)
nrow(acc_df[acc_df$party == "gruene", ]) / 280
as.character(acc_parties$party[4])
acc_parties <- data.frame(party = c("cducsu", "spd", "linke", "gruene"))
acc_parties$btw13 <- c(41.5, 25.7, 8.6, 8.4)
acc_parties$twitter <- 0
for(p in 1:length(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
}
View(acc_parties)
round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
p
acc_parties
acc_parties <- data.frame(party = c("cducsu", "spd", "linke", "gruene"))
acc_parties$btw13 <- c(41.5, 25.7, 8.6, 8.4)
acc_parties$twitter <- 0
for(p in 1:nrow(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
}
View(acc_parties)
acc_parties <- data.frame(party = c("cducsu", "spd", "linke", "gruene"))
acc_parties$btw13 <- c(49.3, 30.6, 10.1, 10.0)
acc_parties$twitter <- 0
for(p in 1:nrow(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
}
View(acc_parties)
pie(acc_parties$btw13)
pie(acc_parties$btw13, col=c("black", "red", "purple", "green"))
pie(acc_parties$btw13, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"))
pie(acc_parties$twitter, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"))
pie(acc_parties$twitter, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T)
pie(acc_btw13$twitter, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T)
pie(acc_parties$btw13, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T)
acc_parties <- data.frame(party = c("cducsu", "spd", "linke", "gruene"))
acc_parties$btw13 <- c(49.3, 30.6, 10.1, 10.0) # seats of party / 631 seats
acc_parties$twitter <- 0
for(p in 1:nrow(acc_parties)) {
acc_parties$twitter[p] <- round(nrow(acc_df[acc_df$party == as.character(acc_parties$party[p]), ]) / 280 * 100)
}
pie(acc_parties$btw13, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T)
pie(acc_parties$twitter, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T)
pie(acc_parties$btw13, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T,
main = "Seats of parties in the parliament")
pie(acc_parties$twitter, col=c("black", "red", "purple", "green"), labels = c("CDU/CSU", "SPD", "Die LINKE", "Bündnis 90/Grüne"), clockwise = T,
main = "Percentage of parties' MdBs of all Twitter accounts")
rm(acc_parties)
require(lubridate)
require(XML)
require(ggplot2)
require(reshape2)
require(stringr)
source("issuecomp-functions.R")
curchars
curchars <- 7
curchars >= 5 && curchars <= 7
curchars <- 10
curchars >= 5 && curchars <= 7
curchars <- 4
curchars >= 5 && curchars <= 7
if(curchars <= 4) {
curdistance <- 0
}
else if {curchars >= 5} {
curdistance <- 1
}
if(curchars <= 4) {
curdistance <- 0
} else if {curchars >= 5} {
curdistance <- 1
}
if(curchars <= 4) {
curdistance <- 0
} else {
curdistance <- 1
}
curdistance
source("issuecomp-functions.R")
smartPatternMatch("bla bla Tomate bla", "tomaten", 0, F)
smartPatternMatch("bla bla Tomate bla", "tomaten", 1, F)
smartPatternMatch("bla bla Tomate bla", "tomatens", 1, F)
smartPatternMatch("bla bla Tomate bla", "tomatens", 2, F)
rm(list=ls())
require(lubridate)
require(XML)
require(ggplot2)
require(reshape2)
require(stringr)
source("issuecomp-functions.R")
load(file = "tweets_untagged.RData")
date_start <- as.Date("2014-01-01")
date_end <- as.Date("2014-12-31")
drange <- as.integer(date_end - date_start)
drange <- date_start + days(0:drange)
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
issues <- data.frame(date = drange)
issuelist <- xmlToList("issues.xml")
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
tagexpand <- c("", "s", "n", "en")
for(d in 1:nrow(issues)) {
tagexpand <- c("", "s", "n", "en", "er")
# Parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(3)
registerDoParallel(cl)
df<-foreach(d = 1:nrow(issues) %dopar% {
#for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
sink("log.txt", append=TRUE)
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
cat("Starting tweet", t, "of",as.character(curdate),"\n")
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
@@ -424,6 +320,7 @@ oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
cat("Match!\n")
break
}
else {
@@ -433,24 +330,34 @@ else {
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
issues <- data.frame(date = drange)
issuelist <- xmlToList("issues.xml")
issuelist <- readLines("issues.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
issuelist <- xmlToList(issuelist)
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
tagexpand <- c("", "s", "n", "en")
for(d in 1:nrow(issues)) {
tagexpand <- c("", "s", "n", "en", "er")
# Parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(3)
registerDoParallel(cl)
df<-foreach(d = 1:nrow(issues)) %dopar% {
#for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
sink("log.txt", append=TRUE)
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
cat("Starting tweet", t, "of",as.character(curdate),"\n")
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
@@ -500,6 +407,7 @@ oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
cat("Match!\n")
break
}
else {
@@ -509,4 +417,96 @@ else {
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
issues <- data.frame(date = drange)
issuelist <- readLines("issues.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
issuelist <- xmlToList(issuelist)
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
tagexpand <- c("", "s", "n", "en", "er")
# Parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(3)
registerDoParallel(cl)
df<-foreach(d = 1:nrow(issues), .packages = c("stringr")) %dopar% {
#for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
sink("log.txt", append=TRUE)
cat(as.character(curdate),"\n")
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
cat("Starting tweet", t, "of",as.character(curdate),"\n")
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
curfile <- str_c(id_folder,"/",curissue,".csv")
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
if(curchars <= 4) {
curacro <- checkAcronym(string = curtag, chars = curchars)
} else {
curacro <- FALSE
}
# Now expand the current tag by possible suffixes that may be plural forms
if(!curacro) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
}
}
# Set Levenshtein distance depending on char length
if(curchars <= 4) {
curdistance <- 0
} else {
curdistance <- 1
}
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
tags_found <- NULL
# Match the tweet with each variation of tagexpand
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
}
tags_found <- any(tags_found)
curtag <- curtag[1]
if(tags_found == TRUE) {
# Raise number of findings on this day for this issue by 1
issues[d,curissue] <- issues[d,curissue] + 1
# Add issue and first matched tag of tweet to tweets-DF
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curtag), curfile, append = TRUE)
cat("Match!\n")
break
}
else {
#cat("Nothing found\n")
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
stopCluster(cl)
View(issues)
cl
df
View(data)
stopCluster(cl)
+7 -6
View File
@@ -3,6 +3,8 @@ require(XML)
require(ggplot2)
require(reshape2)
require(stringr)
library(foreach)
library(doParallel)
source("issuecomp-functions.R")
@@ -34,22 +36,21 @@ tweets$tags <- ""
tagexpand <- c("", "s", "n", "en", "er")
# Parallelisation
writeLines(c(""), "log.txt")
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(3)
registerDoParallel(cl)
df<-foreach(d = 1:nrow(issues) %dopar% {
df<-foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
#for(d in 1:nrow(issues)) {
# Go through every day
curdate <- issues$date[d]
sink("log.txt", append=TRUE)
cat(as.character(curdate),"\n")
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
cat("Starting tweet", t, "of",as.character(curdate),"\n")
cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
@@ -122,7 +123,7 @@ df<-foreach(d = 1:nrow(issues) %dopar% {
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
stopCluster(cl)
# SAVING ------------------------------------------------------------------
+436
View File
@@ -0,0 +1,436 @@
2014-01-01
2014-01-02
2014-01-03
Starting tweet 1 of 2014-01-02
Starting tweet 1 of 2014-01-01
Starting tweet 1 of 2014-01-03
Starting tweet 2 of 2014-01-01
Starting tweet 2 of 2014-01-03
Starting tweet 2 of 2014-01-02
Starting tweet 3 of 2014-01-01
Starting tweet 3 of 2014-01-03
Starting tweet 3 of 2014-01-02
Starting tweet 4 of 2014-01-01
Starting tweet 4 of 2014-01-03
Starting tweet 4 of 2014-01-02
Starting tweet 5 of 2014-01-01
Starting tweet 5 of 2014-01-03
Starting tweet 6 of 2014-01-01
Starting tweet 5 of 2014-01-02
Starting tweet 6 of 2014-01-03
Starting tweet 6 of 2014-01-02
Starting tweet 7 of 2014-01-01
Starting tweet 7 of 2014-01-03
Starting tweet 8 of 2014-01-01
Starting tweet 7 of 2014-01-02
Starting tweet 8 of 2014-01-03
Starting tweet 9 of 2014-01-01
Starting tweet 8 of 2014-01-02
Starting tweet 9 of 2014-01-03
Starting tweet 9 of 2014-01-02
Starting tweet 10 of 2014-01-01
Starting tweet 10 of 2014-01-03
Starting tweet 10 of 2014-01-02
Starting tweet 11 of 2014-01-01
Starting tweet 11 of 2014-01-02
Starting tweet 11 of 2014-01-03
Starting tweet 12 of 2014-01-01
Starting tweet 12 of 2014-01-02
Starting tweet 12 of 2014-01-03
Starting tweet 13 of 2014-01-01
Starting tweet 13 of 2014-01-02
Starting tweet 13 of 2014-01-03
Starting tweet 14 of 2014-01-01
Starting tweet 14 of 2014-01-02
Starting tweet 15 of 2014-01-01
Starting tweet 14 of 2014-01-03
Starting tweet 15 of 2014-01-03
Starting tweet 15 of 2014-01-02
Starting tweet 16 of 2014-01-01
Starting tweet 16 of 2014-01-02
Starting tweet 16 of 2014-01-03
Starting tweet 17 of 2014-01-01
Starting tweet 17 of 2014-01-03
Starting tweet 17 of 2014-01-02
Starting tweet 18 of 2014-01-03
Starting tweet 18 of 2014-01-01
Starting tweet 18 of 2014-01-02
Starting tweet 19 of 2014-01-03
Starting tweet 19 of 2014-01-01
Starting tweet 20 of 2014-01-03
Starting tweet 19 of 2014-01-02
Starting tweet 21 of 2014-01-03
Starting tweet 20 of 2014-01-01
Starting tweet 20 of 2014-01-02
Starting tweet 22 of 2014-01-03
Starting tweet 21 of 2014-01-01
Starting tweet 21 of 2014-01-02
Starting tweet 22 of 2014-01-01
Starting tweet 22 of 2014-01-02
Starting tweet 23 of 2014-01-03
Starting tweet 23 of 2014-01-01
Starting tweet 23 of 2014-01-02
Starting tweet 24 of 2014-01-03
Starting tweet 24 of 2014-01-02
Starting tweet 24 of 2014-01-01
Starting tweet 25 of 2014-01-03
Starting tweet 25 of 2014-01-02
Starting tweet 25 of 2014-01-01
Starting tweet 26 of 2014-01-03
Starting tweet 26 of 2014-01-02
Starting tweet 26 of 2014-01-01
Starting tweet 27 of 2014-01-03
Starting tweet 27 of 2014-01-02
Starting tweet 27 of 2014-01-01
Starting tweet 28 of 2014-01-03
Starting tweet 28 of 2014-01-02
Starting tweet 28 of 2014-01-01
Starting tweet 29 of 2014-01-03
Starting tweet 29 of 2014-01-01
Starting tweet 29 of 2014-01-02
Starting tweet 30 of 2014-01-03
Starting tweet 30 of 2014-01-01
Starting tweet 30 of 2014-01-02
Starting tweet 31 of 2014-01-03
Starting tweet 31 of 2014-01-02
Starting tweet 31 of 2014-01-01
Starting tweet 32 of 2014-01-02
Starting tweet 32 of 2014-01-03
Starting tweet 32 of 2014-01-01
Starting tweet 33 of 2014-01-02
Starting tweet 33 of 2014-01-03
Starting tweet 33 of 2014-01-01
Starting tweet 34 of 2014-01-02
Starting tweet 34 of 2014-01-03
Starting tweet 34 of 2014-01-01
Starting tweet 35 of 2014-01-02
Starting tweet 35 of 2014-01-03
Starting tweet 35 of 2014-01-01
Starting tweet 36 of 2014-01-02
Starting tweet 36 of 2014-01-01
Starting tweet 36 of 2014-01-03
Starting tweet 37 of 2014-01-01
Starting tweet 37 of 2014-01-02
Starting tweet 37 of 2014-01-03
Starting tweet 38 of 2014-01-01
Starting tweet 38 of 2014-01-02
Starting tweet 39 of 2014-01-01
Starting tweet 38 of 2014-01-03
Starting tweet 39 of 2014-01-02
Starting tweet 39 of 2014-01-03
Starting tweet 40 of 2014-01-01
Starting tweet 40 of 2014-01-02
Starting tweet 41 of 2014-01-01
Starting tweet 40 of 2014-01-03
Starting tweet 41 of 2014-01-02
Starting tweet 42 of 2014-01-01
Starting tweet 41 of 2014-01-03
Starting tweet 43 of 2014-01-01
Starting tweet 42 of 2014-01-02
Starting tweet 44 of 2014-01-01
Starting tweet 42 of 2014-01-03
Starting tweet 43 of 2014-01-02
Starting tweet 43 of 2014-01-03
Starting tweet 45 of 2014-01-01
Starting tweet 44 of 2014-01-02
Starting tweet 44 of 2014-01-03
Starting tweet 46 of 2014-01-01
Starting tweet 45 of 2014-01-02
Starting tweet 45 of 2014-01-03
Starting tweet 47 of 2014-01-01
Starting tweet 46 of 2014-01-02
Starting tweet 46 of 2014-01-03
Starting tweet 48 of 2014-01-01
Starting tweet 47 of 2014-01-02
Starting tweet 47 of 2014-01-03
Starting tweet 48 of 2014-01-02
Starting tweet 49 of 2014-01-01
Starting tweet 48 of 2014-01-03
Starting tweet 49 of 2014-01-02
Starting tweet 50 of 2014-01-01
Starting tweet 49 of 2014-01-03
Starting tweet 50 of 2014-01-02
Starting tweet 51 of 2014-01-01
Starting tweet 50 of 2014-01-03
Starting tweet 52 of 2014-01-01
Starting tweet 51 of 2014-01-02
Starting tweet 51 of 2014-01-03
Starting tweet 52 of 2014-01-02
Starting tweet 53 of 2014-01-01
Starting tweet 52 of 2014-01-03
Starting tweet 53 of 2014-01-02
Starting tweet 54 of 2014-01-01
Starting tweet 53 of 2014-01-03
Starting tweet 54 of 2014-01-02
Starting tweet 55 of 2014-01-01
Starting tweet 54 of 2014-01-03
Starting tweet 56 of 2014-01-01
Starting tweet 55 of 2014-01-02
Starting tweet 55 of 2014-01-03
Starting tweet 56 of 2014-01-02
Starting tweet 57 of 2014-01-01
Starting tweet 56 of 2014-01-03
Starting tweet 57 of 2014-01-02
Starting tweet 58 of 2014-01-01
Starting tweet 57 of 2014-01-03
Starting tweet 58 of 2014-01-02
Starting tweet 58 of 2014-01-03
Starting tweet 59 of 2014-01-01
Starting tweet 60 of 2014-01-01
Starting tweet 59 of 2014-01-03
Starting tweet 59 of 2014-01-02
Starting tweet 60 of 2014-01-03
Starting tweet 61 of 2014-01-01
Starting tweet 60 of 2014-01-02
Starting tweet 61 of 2014-01-03
Starting tweet 62 of 2014-01-01
Starting tweet 61 of 2014-01-02
Starting tweet 62 of 2014-01-03
Starting tweet 63 of 2014-01-01
Starting tweet 62 of 2014-01-02
Starting tweet 63 of 2014-01-03
Starting tweet 64 of 2014-01-01
Starting tweet 63 of 2014-01-02
Starting tweet 64 of 2014-01-03
Starting tweet 65 of 2014-01-01
Starting tweet 64 of 2014-01-02
Starting tweet 65 of 2014-01-03
Starting tweet 66 of 2014-01-01
Starting tweet 66 of 2014-01-03
Starting tweet 65 of 2014-01-02
2014-01-04
Starting tweet 1 of 2014-01-04
Starting tweet 67 of 2014-01-03
Starting tweet 66 of 2014-01-02
Starting tweet 2 of 2014-01-04
Starting tweet 67 of 2014-01-02
Starting tweet 68 of 2014-01-03
Starting tweet 3 of 2014-01-04
Starting tweet 68 of 2014-01-02
Starting tweet 69 of 2014-01-03
Starting tweet 4 of 2014-01-04
Starting tweet 70 of 2014-01-03
Starting tweet 69 of 2014-01-02
Starting tweet 5 of 2014-01-04
Starting tweet 71 of 2014-01-03
Starting tweet 70 of 2014-01-02
Starting tweet 72 of 2014-01-03
Starting tweet 6 of 2014-01-04
Starting tweet 71 of 2014-01-02
Starting tweet 73 of 2014-01-03
Starting tweet 7 of 2014-01-04
Starting tweet 74 of 2014-01-03
Starting tweet 72 of 2014-01-02
Starting tweet 75 of 2014-01-03
Starting tweet 8 of 2014-01-04
Starting tweet 73 of 2014-01-02
Starting tweet 76 of 2014-01-03
Starting tweet 9 of 2014-01-04
Starting tweet 74 of 2014-01-02
Starting tweet 77 of 2014-01-03
Starting tweet 75 of 2014-01-02
Starting tweet 10 of 2014-01-04
Starting tweet 78 of 2014-01-03
Starting tweet 76 of 2014-01-02
Starting tweet 11 of 2014-01-04
Starting tweet 77 of 2014-01-02
Starting tweet 79 of 2014-01-03
Starting tweet 80 of 2014-01-03
Starting tweet 12 of 2014-01-04
Starting tweet 78 of 2014-01-02
Starting tweet 79 of 2014-01-02
Starting tweet 13 of 2014-01-04
Starting tweet 81 of 2014-01-03
Starting tweet 80 of 2014-01-02
Starting tweet 14 of 2014-01-04
Starting tweet 82 of 2014-01-03
Starting tweet 81 of 2014-01-02
Starting tweet 15 of 2014-01-04
Starting tweet 83 of 2014-01-03
Starting tweet 82 of 2014-01-02
Starting tweet 84 of 2014-01-03
Starting tweet 16 of 2014-01-04
Starting tweet 83 of 2014-01-02
Starting tweet 17 of 2014-01-04
Starting tweet 85 of 2014-01-03
Starting tweet 84 of 2014-01-02
Starting tweet 18 of 2014-01-04
Starting tweet 86 of 2014-01-03
Starting tweet 85 of 2014-01-02
Starting tweet 19 of 2014-01-04
Starting tweet 87 of 2014-01-03
Starting tweet 88 of 2014-01-03
Starting tweet 86 of 2014-01-02
Starting tweet 20 of 2014-01-04
Starting tweet 89 of 2014-01-03
Starting tweet 87 of 2014-01-02
Starting tweet 21 of 2014-01-04
Starting tweet 90 of 2014-01-03
Starting tweet 88 of 2014-01-02
Starting tweet 22 of 2014-01-04
Starting tweet 91 of 2014-01-03
Starting tweet 89 of 2014-01-02
Starting tweet 23 of 2014-01-04
Starting tweet 92 of 2014-01-03
Starting tweet 24 of 2014-01-04
Starting tweet 90 of 2014-01-02
Starting tweet 93 of 2014-01-03
Starting tweet 91 of 2014-01-02
Starting tweet 25 of 2014-01-04
Starting tweet 94 of 2014-01-03
Starting tweet 92 of 2014-01-02
Starting tweet 26 of 2014-01-04
Starting tweet 95 of 2014-01-03
Starting tweet 93 of 2014-01-02
Starting tweet 96 of 2014-01-03
Starting tweet 27 of 2014-01-04
Starting tweet 94 of 2014-01-02
Starting tweet 28 of 2014-01-04
Starting tweet 97 of 2014-01-03
Starting tweet 95 of 2014-01-02
Starting tweet 29 of 2014-01-04
Starting tweet 98 of 2014-01-03
Starting tweet 96 of 2014-01-02
Starting tweet 30 of 2014-01-04
Starting tweet 99 of 2014-01-03
Starting tweet 97 of 2014-01-02
Starting tweet 31 of 2014-01-04
Starting tweet 100 of 2014-01-03
Starting tweet 98 of 2014-01-02
Starting tweet 32 of 2014-01-04
Starting tweet 101 of 2014-01-03
Starting tweet 99 of 2014-01-02
Starting tweet 33 of 2014-01-04
Starting tweet 102 of 2014-01-03
Starting tweet 100 of 2014-01-02
Starting tweet 34 of 2014-01-04
Starting tweet 103 of 2014-01-03
Starting tweet 101 of 2014-01-02
Starting tweet 35 of 2014-01-04
Starting tweet 104 of 2014-01-03
Starting tweet 102 of 2014-01-02
Starting tweet 36 of 2014-01-04
Starting tweet 105 of 2014-01-03
Starting tweet 103 of 2014-01-02
Starting tweet 37 of 2014-01-04
Starting tweet 106 of 2014-01-03
Starting tweet 104 of 2014-01-02
Starting tweet 38 of 2014-01-04
Starting tweet 107 of 2014-01-03
Starting tweet 105 of 2014-01-02
Starting tweet 108 of 2014-01-03
Starting tweet 39 of 2014-01-04
Starting tweet 106 of 2014-01-02
Starting tweet 107 of 2014-01-02
Starting tweet 109 of 2014-01-03
Starting tweet 40 of 2014-01-04
Starting tweet 108 of 2014-01-02
Starting tweet 110 of 2014-01-03
Starting tweet 41 of 2014-01-04
Starting tweet 109 of 2014-01-02
Starting tweet 110 of 2014-01-02
Starting tweet 111 of 2014-01-03
Starting tweet 42 of 2014-01-04
Starting tweet 111 of 2014-01-02
Starting tweet 112 of 2014-01-03
Starting tweet 43 of 2014-01-04
Starting tweet 113 of 2014-01-03
Starting tweet 112 of 2014-01-02
Starting tweet 44 of 2014-01-04
Starting tweet 114 of 2014-01-03
Starting tweet 113 of 2014-01-02
Starting tweet 45 of 2014-01-04
Starting tweet 115 of 2014-01-03
Starting tweet 114 of 2014-01-02
Starting tweet 46 of 2014-01-04
Starting tweet 115 of 2014-01-02
Starting tweet 116 of 2014-01-03
Starting tweet 47 of 2014-01-04
Starting tweet 117 of 2014-01-03
Starting tweet 116 of 2014-01-02
Starting tweet 118 of 2014-01-03
Starting tweet 48 of 2014-01-04
Starting tweet 117 of 2014-01-02
Starting tweet 119 of 2014-01-03
Starting tweet 118 of 2014-01-02
Starting tweet 49 of 2014-01-04
Starting tweet 120 of 2014-01-03
Starting tweet 119 of 2014-01-02
Starting tweet 50 of 2014-01-04
Starting tweet 121 of 2014-01-03
Starting tweet 120 of 2014-01-02
Starting tweet 51 of 2014-01-04
Starting tweet 122 of 2014-01-03
Starting tweet 121 of 2014-01-02
Starting tweet 52 of 2014-01-04
Starting tweet 123 of 2014-01-03
Starting tweet 122 of 2014-01-02
Starting tweet 53 of 2014-01-04
Starting tweet 123 of 2014-01-02
Starting tweet 124 of 2014-01-03
Starting tweet 54 of 2014-01-04
Starting tweet 124 of 2014-01-02
Starting tweet 125 of 2014-01-03
Starting tweet 55 of 2014-01-04
Starting tweet 125 of 2014-01-02
Starting tweet 126 of 2014-01-03
Starting tweet 56 of 2014-01-04
Starting tweet 126 of 2014-01-02
Starting tweet 57 of 2014-01-04
Starting tweet 127 of 2014-01-03
Starting tweet 127 of 2014-01-02
Starting tweet 58 of 2014-01-04
Starting tweet 128 of 2014-01-03
Starting tweet 128 of 2014-01-02
Starting tweet 129 of 2014-01-03
Starting tweet 59 of 2014-01-04
Starting tweet 129 of 2014-01-02
Starting tweet 60 of 2014-01-04
Starting tweet 130 of 2014-01-03
Starting tweet 130 of 2014-01-02
Starting tweet 61 of 2014-01-04
Starting tweet 131 of 2014-01-03
Starting tweet 131 of 2014-01-02
Starting tweet 132 of 2014-01-03
Starting tweet 62 of 2014-01-04
Starting tweet 63 of 2014-01-04
Starting tweet 132 of 2014-01-02
Starting tweet 133 of 2014-01-03
Starting tweet 64 of 2014-01-04
Starting tweet 133 of 2014-01-02
Starting tweet 134 of 2014-01-03
Starting tweet 65 of 2014-01-04
Starting tweet 135 of 2014-01-03
Starting tweet 134 of 2014-01-02
Starting tweet 66 of 2014-01-04
Starting tweet 136 of 2014-01-03
Starting tweet 135 of 2014-01-02
Starting tweet 137 of 2014-01-03
Starting tweet 67 of 2014-01-04
Starting tweet 136 of 2014-01-02
Starting tweet 138 of 2014-01-03
Starting tweet 68 of 2014-01-04
Starting tweet 137 of 2014-01-02
Starting tweet 69 of 2014-01-04
Starting tweet 138 of 2014-01-02
Starting tweet 70 of 2014-01-04
Starting tweet 139 of 2014-01-02
Starting tweet 71 of 2014-01-04
Starting tweet 140 of 2014-01-02
Starting tweet 72 of 2014-01-04
Starting tweet 141 of 2014-01-02
Starting tweet 73 of 2014-01-04
Starting tweet 74 of 2014-01-04
Starting tweet 75 of 2014-01-04
Starting tweet 76 of 2014-01-04
Starting tweet 77 of 2014-01-04
Starting tweet 78 of 2014-01-04
Starting tweet 79 of 2014-01-04
Starting tweet 80 of 2014-01-04
Starting tweet 81 of 2014-01-04
Starting tweet 82 of 2014-01-04
Starting tweet 83 of 2014-01-04
Starting tweet 84 of 2014-01-04
Starting tweet 85 of 2014-01-04
Starting tweet 86 of 2014-01-04