import issue csv-files into issues and tweets DF
This commit is contained in:
@@ -1,93 +1,3 @@
|
||||
require(stringr)
|
||||
library(foreach)
|
||||
library(doParallel)
|
||||
# MATCH TWEETS ------------------------------------------------------------
|
||||
id_folder <- "matched-ids"
|
||||
unlink(id_folder, recursive = TRUE)
|
||||
dir.create(id_folder)
|
||||
issues <- data.frame(date = drange)
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
issuelist <- xmlToList(issuelist)
|
||||
issueheads <- names(issuelist)
|
||||
issues[issueheads] <- 0
|
||||
tweets$issue <- ""
|
||||
tweets$tags <- ""
|
||||
tagexpand <- c("", "s", "n", "en", "er", "e")
|
||||
# Parallelisation
|
||||
writeLines(c(""), "issuecomp-analysis.log")
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
}
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
require(lubridate)
|
||||
require(XML)
|
||||
require(ggplot2)
|
||||
@@ -95,10 +5,19 @@ require(reshape2)
|
||||
require(stringr)
|
||||
library(foreach)
|
||||
library(doParallel)
|
||||
# MATCH TWEETS ------------------------------------------------------------
|
||||
id_folder <- "matched-ids"
|
||||
unlink(id_folder, recursive = TRUE)
|
||||
dir.create(id_folder)
|
||||
source("issuecomp-functions.R")
|
||||
getwd()
|
||||
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
|
||||
getwd()
|
||||
list.files()
|
||||
list.files("matched-ids/")
|
||||
load(file = "tweets_untagged.RData")
|
||||
issues <- data.frame(date = drange)
|
||||
# Create date range
|
||||
date_start <- as.Date("2014-01-01")
|
||||
date_end <- as.Date("2014-12-31")
|
||||
drange <- as.integer(date_end - date_start)
|
||||
drange <- date_start + days(0:drange)
|
||||
issues <- data.frame(date = drange)
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
@@ -107,86 +26,148 @@ issueheads <- names(issuelist)
|
||||
issues[issueheads] <- 0
|
||||
tweets$issue <- ""
|
||||
tweets$tags <- ""
|
||||
tagexpand <- c("", "s", "n", "en", "er", "e")
|
||||
# Parallelisation
|
||||
writeLines(c(""), "issuecomp-analysis.log")
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
}
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
stopCluster(cl)
|
||||
drange
|
||||
drange[40]
|
||||
drange[50]
|
||||
View(issues)
|
||||
list.files("matched-ids/")
|
||||
results <- list.files("matched-ids/")
|
||||
results
|
||||
read.csv("matched-ids/i10.trans.csv")
|
||||
read.csv("matched-ids/i10.trans.csv", sep=";")
|
||||
read.csv("matched-ids/i10.trans.csv", sep=";", stringsAsFactors=F)
|
||||
read.csv("matched-ids/i10.trans.csv", sep=";", stringsAsFactors=T)
|
||||
reesult_files <- read.csv("matched-ids/i10.trans.csv", sep=";", stringsAsFactors=F)
|
||||
View(reesult_files)
|
||||
result_files <- read.csv("matched-ids/i10.trans.csv", sep=";", colClasses=c("date", "character", "character", "character"))
|
||||
result_files <- read.csv("matched-ids/i10.trans.csv", sep=";", colClasses=c("character", "character", "character", "character"))
|
||||
rm(reesult_files)
|
||||
View(result_files)
|
||||
nrow(result_files)
|
||||
result_files <- result_files(!duplicated(result_files))
|
||||
result_files <- result_files[!duplicated(result_files)]
|
||||
result_files <- result_files[!duplicated(result_files), ]
|
||||
nrow(result_files)
|
||||
result_files <- read.csv("matched-ids/i10.trans.csv", sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
View(result_files)
|
||||
read.results
|
||||
results
|
||||
setwd("matched-ids/")
|
||||
list.files("")
|
||||
getwd()
|
||||
list.files()
|
||||
results <- list.files()
|
||||
results
|
||||
results_cat <- read.csv(results, sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results_cat <- read.csv(results[1], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results_cat
|
||||
View(results_cat)
|
||||
source("issuecomp-functions.R")
|
||||
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
|
||||
source("issuecomp-functions.R")
|
||||
insertRow
|
||||
results_temp <- read.csv(results[2], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
setwd("matched-ids/")
|
||||
results_temp <- read.csv(results[2], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
rm(result_files)
|
||||
insertRow(existingDF = results_cat, results_temp)
|
||||
rm(results_cat)
|
||||
for(r in 1:length(results)) {
|
||||
if(r == 1) {
|
||||
results_cat <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
insertRow(results_cat, results_temp)
|
||||
}
|
||||
}
|
||||
for(r in 1:length(results)) {
|
||||
if(r == 1) {
|
||||
results_cat <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results_cat insertRow(results_cat, results_temp)
|
||||
}
|
||||
}
|
||||
for(r in 1:length(results)) {
|
||||
if(r == 1) {
|
||||
results_cat <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results_cat <- insertRow(results_cat, results_temp)
|
||||
}
|
||||
}
|
||||
View(results_cat)
|
||||
results_cat[20000]
|
||||
results_cat[20000, ]
|
||||
rm(r, results_temp)
|
||||
results_cat <- results_cat[!duplicated(results_cat), ]
|
||||
View(results_cat)
|
||||
rm(results, results_cat)
|
||||
results_files <- list.files()
|
||||
for(r in 1:length(results)) {
|
||||
if(r == 1) {
|
||||
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results <- insertRow(results_cat, results_temp)
|
||||
}
|
||||
}
|
||||
rm(r, results_temp)
|
||||
results <- results[!duplicated(results), ]
|
||||
results_files <- list.files()
|
||||
for(r in 1:length(results_files)) {
|
||||
if(r == 1) {
|
||||
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results <- insertRow(results, results_temp)
|
||||
}
|
||||
}
|
||||
rm(r, results_temp)
|
||||
results <- results[!duplicated(results), ]
|
||||
View(results)
|
||||
View(issues)
|
||||
row.names(results) <- NULL
|
||||
View(results)
|
||||
rownames(results)
|
||||
row.names(results)
|
||||
names(results)
|
||||
View(tweets)
|
||||
View(tweets)
|
||||
names(results) <- c("date", "id_str", "issue", "tags")
|
||||
View(results)
|
||||
results_test <- results[order(results$id_str)]
|
||||
results_test <- results[order(results$id_str), ]
|
||||
View(results_test)
|
||||
results_files <- list.files()
|
||||
for(r in 1:length(results_files)) {
|
||||
if(r == 1) {
|
||||
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results <- insertRow(results, results_temp)
|
||||
}
|
||||
}
|
||||
rm(r, results_temp)
|
||||
rm(r, results_temp, results_files)
|
||||
results <- results[!duplicated(results), ]
|
||||
names(results)
|
||||
names(results) <- c("date", "id_str", "issue", "tags")
|
||||
View(results)
|
||||
results_test <- results[order(results$id_str), ]
|
||||
row.names(results) <- NULL
|
||||
results <- results[order(results$id_str), ]
|
||||
row.names(results) <- NULL
|
||||
View(results)
|
||||
rm(results_test)
|
||||
View(issues)
|
||||
as.character(results$date[2])
|
||||
class(results$date)
|
||||
class(issues$date)
|
||||
View(issues)
|
||||
as.character(issues$date[2])
|
||||
issues$date[2]
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
issuelist <- xmlToList(issuelist)
|
||||
issueheads <- names(issuelist)
|
||||
require(lubridate)
|
||||
require(XML)
|
||||
require(ggplot2)
|
||||
@@ -194,319 +175,68 @@ require(reshape2)
|
||||
require(stringr)
|
||||
library(foreach)
|
||||
library(doParallel)
|
||||
drange[70]
|
||||
drange[80]
|
||||
drange[90]
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 51:90, .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
issuelist <- xmlToList(issuelist)
|
||||
issueheads <- names(issuelist)
|
||||
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
issuelist <- xmlToList(issuelist)
|
||||
issueheads <- names(issuelist)
|
||||
issues[issueheads] <- 0
|
||||
curdate <- as.character(results$date[3])
|
||||
curissue <- as.character(results$issue[3])
|
||||
curdate
|
||||
curissue
|
||||
issues[curdate, curissue] <- issues[curdate, curissue] + 1
|
||||
View(issues)
|
||||
issues <- data.frame(date = drange)
|
||||
issues[issueheads] <- 0
|
||||
View(issues)
|
||||
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
|
||||
View(issues)
|
||||
for(r in 1:nrow(results)) {
|
||||
curdate <- as.character(results$date[r])
|
||||
curissue <- as.character(results$issue[r])
|
||||
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
View(issues)
|
||||
issues[issueheads] <- 0
|
||||
View(issues)
|
||||
for(r in 1:nrow(results)) {
|
||||
curdate <- as.character(results$date[r])
|
||||
curid <- as.character(results$id_str[r])
|
||||
curissue <- as.character(results$issue[r])
|
||||
curtag <- as.character(results$tags[r])
|
||||
# Update issue counter (date and issue)
|
||||
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
|
||||
# Update tweet dataframe (id, issue and tags)
|
||||
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
|
||||
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
|
||||
}
|
||||
View(tweets)
|
||||
tweets$issue <- ""
|
||||
tweets$tags <- ""
|
||||
View(tweets)
|
||||
issues[issueheads] <- 0
|
||||
for(r in 1:nrow(results)) {
|
||||
curdate <- as.character(results$date[r])
|
||||
curid <- as.character(results$id_str[r])
|
||||
curissue <- as.character(results$issue[r])
|
||||
curtag <- as.character(results$tags[r])
|
||||
cat("Sorting match", r, "from", nrow(results), "\n")
|
||||
# Update issue counter (date and issue)
|
||||
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
|
||||
# Update tweet dataframe (id, issue and tags)
|
||||
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
|
||||
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
|
||||
stopCluster(cl)
|
||||
drange[121]
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 91:120, .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
}
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
stopCluster(cl)
|
||||
drange[102]
|
||||
require(lubridate)
|
||||
require(XML)
|
||||
require(ggplot2)
|
||||
require(reshape2)
|
||||
require(stringr)
|
||||
library(foreach)
|
||||
library(doParallel)
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 101:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
}
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
require(lubridate)
|
||||
require(XML)
|
||||
require(ggplot2)
|
||||
require(reshape2)
|
||||
require(stringr)
|
||||
library(foreach)
|
||||
library(doParallel)
|
||||
cl<-makeCluster(3)
|
||||
registerDoParallel(cl)
|
||||
foreach(d = 101:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Put all tweets from specific day in a temporary DF
|
||||
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
|
||||
for(t in 1:nrow(tweets_curday)){
|
||||
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
|
||||
curtext <- as.character(tweets_curday$text[t])
|
||||
curtext <- str_replace_all(curtext, "#", "")
|
||||
curid <- as.character(tweets_curday$id_str[t])
|
||||
# Now test each single issue (not tag!)
|
||||
for(i in 1:length(issueheads)) {
|
||||
curissue <- issueheads[i]
|
||||
curtags <- as.character(issuelist[[curissue]])
|
||||
curfile <- str_c(id_folder,"/",curissue,".csv")
|
||||
# Now test all tags of a single issue
|
||||
for(s in 1:length(curtags)) {
|
||||
curtag <- curtags[s]
|
||||
curchars <- nchar(curtag, type = "chars")
|
||||
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
|
||||
if(curchars <= 4) {
|
||||
curacro <- checkAcronym(string = curtag, chars = curchars)
|
||||
} else {
|
||||
curacro <- FALSE
|
||||
}
|
||||
# Now expand the current tag by possible suffixes that may be plural forms
|
||||
if(!curacro) {
|
||||
for(e in 1:length(tagexpand)) {
|
||||
curtag[e] <- str_c(curtag[1], tagexpand[e])
|
||||
}
|
||||
}
|
||||
# Set Levenshtein distance depending on char length
|
||||
if(curchars <= 4) {
|
||||
curdistance <- 0
|
||||
} else {
|
||||
curdistance <- 1
|
||||
}
|
||||
# Match current tweet with tag. If >= 5 letters allow 1 changed letter, if >=8 letters allow also 1 (Levenshtein distance)
|
||||
tags_found <- NULL
|
||||
# Match the tweet with each variation of tagexpand
|
||||
for(e in 1:length(curtag)) {
|
||||
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
|
||||
}
|
||||
tags_found <- any(tags_found)
|
||||
curtag <- curtag[1]
|
||||
if(tags_found == TRUE) {
|
||||
# # Raise number of findings on this day for this issue by 1
|
||||
# issues[d,curissue] <- issues[d,curissue] + 1
|
||||
#
|
||||
# # Add issue and first matched tag of tweet to tweets-DF
|
||||
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
|
||||
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
|
||||
# Add information to file for function viewPatternMatching
|
||||
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
|
||||
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
# data.frame(date=curdate, issue=curissue)
|
||||
break # next issue, no more tags from same issue
|
||||
}
|
||||
else {
|
||||
#cat("Nothing found\n")
|
||||
}
|
||||
} # /for curtags
|
||||
} # /for issuelist
|
||||
} # /for tweets_curday
|
||||
} # /for drange
|
||||
stopCluster(cl)
|
||||
drange[200]
|
||||
drange[300]
|
||||
drange[280]
|
||||
drange[270]
|
||||
drange[259]
|
||||
View(issues)
|
||||
View(tweets)
|
||||
View(tweets)
|
||||
save(tweets, file="tweets_tagged.RData")
|
||||
|
||||
+65
-14
@@ -7,6 +7,7 @@ library(foreach)
|
||||
library(doParallel)
|
||||
|
||||
source("issuecomp-functions.R")
|
||||
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
|
||||
|
||||
|
||||
load(file = "tweets_untagged.RData")
|
||||
@@ -17,13 +18,8 @@ date_end <- as.Date("2014-12-31")
|
||||
drange <- as.integer(date_end - date_start)
|
||||
drange <- date_start + days(0:drange)
|
||||
|
||||
|
||||
# MATCH TWEETS ------------------------------------------------------------
|
||||
|
||||
id_folder <- "matched-ids"
|
||||
#unlink(id_folder, recursive = TRUE)
|
||||
#dir.create(id_folder)
|
||||
|
||||
# Import issues and prepare everything
|
||||
# Will only be filled after the large categorisation loop
|
||||
issues <- data.frame(date = drange)
|
||||
issuelist <- readLines("issues.xml")
|
||||
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
|
||||
@@ -33,15 +29,24 @@ issues[issueheads] <- 0
|
||||
tweets$issue <- ""
|
||||
tweets$tags <- ""
|
||||
|
||||
|
||||
# MATCH TWEETS ------------------------------------------------------------
|
||||
|
||||
# Create folder where all results will be saved (saver for backup and import)
|
||||
id_folder <- "matched-ids"
|
||||
unlink(id_folder, recursive = TRUE)
|
||||
dir.create(id_folder)
|
||||
|
||||
# Tag expansion for plural, genetiv etc
|
||||
tagexpand <- c("", "s", "n", "en", "er", "e")
|
||||
|
||||
# Parallelisation
|
||||
# Parameters for parallelisation
|
||||
writeLines(c(""), "issuecomp-analysis.log")
|
||||
cl<-makeCluster(4)
|
||||
registerDoParallel(cl)
|
||||
|
||||
foreach(d = 260:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
#for(d in 1:nrow(issues)) {
|
||||
# START CAT LOOP
|
||||
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
|
||||
# Go through every day
|
||||
curdate <- issues$date[d]
|
||||
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
|
||||
@@ -125,12 +130,58 @@ foreach(d = 260:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar%
|
||||
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
|
||||
stopCluster(cl)
|
||||
|
||||
|
||||
|
||||
# IMPORT RESULTS ----------------------------------------------------------
|
||||
|
||||
# Import all files which have been generated at the categorisation run above.
|
||||
setwd("matched-ids/")
|
||||
results_files <- list.files()
|
||||
for(r in 1:length(results_files)) {
|
||||
if(r == 1) {
|
||||
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
} else {
|
||||
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
|
||||
results <- insertRow(results, results_temp)
|
||||
}
|
||||
}
|
||||
rm(r, results_temp, results_files)
|
||||
# Remove duplicates, sort chronologically
|
||||
results <- results[!duplicated(results), ]
|
||||
names(results) <- c("date", "id_str", "issue", "tags")
|
||||
results <- results[order(results$id_str), ]
|
||||
row.names(results) <- NULL
|
||||
|
||||
|
||||
# Now import all results in the dataframes "issues" and "tweets"
|
||||
# (which wasn't possible in the categorisation process because of parallelisation)
|
||||
|
||||
# Reset issues counter
|
||||
# issues[issueheads] <- 0
|
||||
|
||||
for(r in 1:nrow(results)) {
|
||||
curdate <- as.character(results$date[r])
|
||||
curid <- as.character(results$id_str[r])
|
||||
curissue <- as.character(results$issue[r])
|
||||
curtag <- as.character(results$tags[r])
|
||||
|
||||
cat("Sorting match", r, "of 62827 \n")
|
||||
|
||||
# Update issue counter (date and issue)
|
||||
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
|
||||
|
||||
# Update tweet dataframe (id, issue and tags)
|
||||
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
|
||||
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
|
||||
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
|
||||
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
|
||||
}
|
||||
|
||||
|
||||
|
||||
# SAVING ------------------------------------------------------------------
|
||||
|
||||
|
||||
row.names(tweets) <- NULL
|
||||
write.csv(tweets, "tweets.csv")
|
||||
save(tweets, file="tweets.RData")
|
||||
save(tweets, file="tweets_tagged.RData")
|
||||
|
||||
|
||||
|
||||
|
||||
Binary file not shown.
Reference in New Issue
Block a user