uni-ba-socialagenda/issuecomp-2-analysis-EXT.R

139 lines
5.3 KiB
R
Raw Permalink Normal View History

2015-03-04 11:36:31 +01:00
require(lubridate)
require(XML)
require(stringr)
require(foreach)
require(doParallel)
source("issuecomp-functions.R")
2015-03-04 11:56:20 +01:00
setwd("E:/max.mehl")
2015-03-04 11:36:31 +01:00
load(file = "tweets_untagged.RData")
# Create date range
date_start <- as.Date("2014-01-01")
date_end <- as.Date("2014-12-31")
drange <- as.integer(date_end - date_start)
drange <- date_start + days(0:drange)
# Import issues and prepare everything
# Will only be filled after the large categorisation loop
issues <- data.frame(date = drange)
issuelist <- readLines("issues-v2.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
issuelist <- xmlToList(issuelist)
issueheads <- names(issuelist)
issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
# MATCH TWEETS ------------------------------------------------------------
# Create folder where all results will be saved (saver for backup and import)
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
# Tag expansion for plural, genetiv etc
tagexpand <- c("", "s", "n", "en", "er", "e")
# Parameters for parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(7)
registerDoParallel(cl)
# START CAT LOOP
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
# Go through every day
curdate <- issues$date[d]
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Put all tweets from specific day in a temporary DF
tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
for(t in 1:nrow(tweets_curday)){
# cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
# Select tweet's text, make it lowercase and remove hashtag indicators (#)
curtext <- as.character(tweets_curday$text[t])
curtext <- str_replace_all(curtext, "#", "")
curid <- as.character(tweets_curday$id_str[t])
# Now test each single issue (not tag!)
for(i in 1:length(issueheads)) {
curissue <- issueheads[i]
curtags <- as.character(issuelist[[curissue]])
curfile <- str_c(id_folder,"/",curissue,".csv")
# Now test all tags of a single issue
for(s in 1:length(curtags)) {
curtag <- curtags[s]
curchars <- nchar(curtag, type = "chars")
# Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
curacro <- checkAcronym(string = curtag)
# Check if tag is some kind of specific hashtag. If so, do not handle as acronym, but don't expand it either
if(str_detect(curtag, "^#")) {
curacro <- FALSE # hashtags like #WM2014 are also written as #wm2014, so we need case-insensitivity
curhash <- TRUE # But we need to mark it as hashtag, so it doesn't get extended or Levenshtein distance > 0
curtag <- str_replace(curtag, "#", "")
curchars <- curchars - 1
} else {
curhash <- FALSE
}
# Now expand the current tag by possible suffixes that may be plural forms
# Only do if it isn't an acronym or specific hastag
if(!curacro && !curhash) {
for(e in 1:length(tagexpand)) {
curtag[e] <- str_c(curtag[1], tagexpand[e])
}
}
# Set Levenshtein distance depending on char length, acronym and hashtag status
if(curchars <= 6 || curacro || curhash) { # Distance = 1 if 7 chars or longer
curdistance <- 0
} else {
curdistance <- 1
}
# Match current tweet with tag.
2015-03-04 11:56:20 +01:00
# Allow 1 Levenshtein distance if tag is >= 5 letters and no hashtag or acronym
# Make is case-sensitiv if tag is an acronym
2015-03-04 11:36:31 +01:00
tags_found <- NULL
# Match the tweet with each variation of tagexpand
for(e in 1:length(curtag)) {
tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
}
tags_found <- any(tags_found)
curtag <- curtag[1]
if(tags_found == TRUE) {
# # Raise number of findings on this day for this issue by 1
# issues[d,curissue] <- issues[d,curissue] + 1
#
# # Add issue and first matched tag of tweet to tweets-DF
# oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
# tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
# oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
# tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
# Add information to file for function viewPatternMatching
write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
# cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
# data.frame(date=curdate, issue=curissue)
break # next issue, no more tags from same issue
}
else {
#cat("Nothing found\n")
}
} # /for curtags
} # /for issuelist
} # /for tweets_curday
} # /for drange
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
stopCluster(cl)