You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

203 lines
7.6 KiB

require(lubridate)
require(XML)
require(ggplot2)
require(reshape2)
require(stringr)
require(foreach)
require(doParallel)
source("issuecomp-functions.R")
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
load(file = "tweets_untagged.RData")
# Create date range
date_start <- as.Date("2014-01-01")
date_end <- as.Date("2014-12-31")
drange <- as.integer(date_end - date_start)
drange <- date_start + days(0:drange)
# Import issues and prepare everything
# Will only be filled after the large categorisation loop
issues <- data.frame(date = drange)
issuelist <- readLines("issues-v3.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
issuelist <- xmlToList(issuelist)
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
# MATCH TWEETS ------------------------------------------------------------
# Create folder where all results will be saved (saver for backup and import)
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
# Tag expansion for plural, genetiv etc
tagexpand <- c("", "s", "n", "en", "er", "e")
# Parameters for parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(4)
registerDoParallel(cl)
# START CAT LOOP
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
# Go through every day
curdate <- issues$date[d]
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Select tweet's text, make it lowercase and remove hashtags, mentions and replace hyphens by spaces
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "-", " ")
curtext <- str_replace_all(curtext, "[^[:alnum:] ]", "")
curtext <- str_replace_all(curtext, " ", " ") # remove double spaces
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
# curfile <- str_c(id_folder,"/",curissue,".csv")
curfile <- str_c(id_folder,"/",curdate,".csv") # Possible solution to avoid buggy files when using many processes
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
curacro <- checkAcronym(string = curtag)
# Check if tag is some kind of specific hashtag. If so, do not handle as acronym, but don't expand it either
if(str_detect(curtag, "^#")) {
curacro <- FALSE # hashtags like #WM2014 are also written as #wm2014, so we need case-insensitivity
curhash <- TRUE # But we need to mark it as hashtag, so it doesn't get extended or Levenshtein distance > 0
curtag <- str_replace(curtag, "#", "")
curchars <- curchars - 1
} else {
curhash <- FALSE
}
# Now expand the current tag by possible suffixes that may be plural forms
# Only do if it isn't an acronym or specific hastag
if(!curacro && !curhash) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
}
}
# Set Levenshtein distance depending on char length, acronym and hashtag status
if(curchars <= 7 || curacro || curhash) { # Distance = 1 if 8 chars or longer
curdistance <- 0
} else {
curdistance <- 1
}
# Match current tweet with tag.
# Allow 1 Levenshtein distance if tag is >= 5 letters and no hashtag or acronym
# Make is case-sensitiv if tag is an acronym
tags_found <- NULL
# Match the tweet with each variation of tagexpand
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
}
tags_found <- any(tags_found)
curtag <- curtag[1]
if(tags_found == TRUE) {
# # Raise number of findings on this day for this issue by 1
# issues[d,curissue] <- issues[d,curissue] + 1
#
# # Add issue and first matched tag of tweet to tweets-DF
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
# data.frame(date=curdate, issue=curissue)
break # next issue, no more tags from same issue
}
else {
#cat("Nothing found\n")
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
stopCluster(cl)
# IMPORT RESULTS ----------------------------------------------------------
# Import all files which have been generated at the categorisation run above.
results_files <- list.files("matched-ids/", full.names = T)
for(r in 1:length(results_files)) {
if(r == 1) {
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
} else {
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
results <- insertRow(results, results_temp)
}
}
rm(r, results_temp, results_files)
# Remove duplicates, sort chronologically
results <- results[!duplicated(results), ]
names(results) <- c("date", "id_str", "issue", "tags")
results <- results[order(results$id_str), ]
row.names(results) <- NULL
# Now import all results in the dataframes "issues" and "tweets"
# (which wasn't possible in the categorisation process because of parallelisation)
# Reset issues counter
#issues[issueheads] <- 0
nrow_results <- nrow(results)
for(r in 1:nrow_results) {
curdate <- as.character(results$date[r])
curid <- as.character(results$id_str[r])
curissue <- as.character(results$issue[r])
curtag <- as.character(results$tags[r])
cat("Sorting match", r, "of", nrow_results, "\n")
# Update issue counter (date and issue)
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
# Update tweet dataframe (id, issue and tags)
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
}
# SAVING ------------------------------------------------------------------
save(tweets, file="tweets_tagged.RData")
write.csv(tweets, file="tweets.csv")
save(issues, file="issues.RData")