import issue csv-files into issues and tweets DF

This commit is contained in:
2015-02-24 18:19:46 +01:00
parent f9757f1a0a
commit 6af9dfb513
3 changed files with 281 additions and 500 deletions
+65 -14
View File
@@ -7,6 +7,7 @@ library(foreach)
library(doParallel)
source("issuecomp-functions.R")
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
load(file = "tweets_untagged.RData")
@@ -17,13 +18,8 @@ date_end <- as.Date("2014-12-31")
drange <- as.integer(date_end - date_start)
drange <- date_start + days(0:drange)
# MATCH TWEETS ------------------------------------------------------------
id_folder <- "matched-ids"
#unlink(id_folder, recursive = TRUE)
#dir.create(id_folder)
# Import issues and prepare everything
# Will only be filled after the large categorisation loop
issues <- data.frame(date = drange)
issuelist <- readLines("issues.xml")
issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
@@ -33,15 +29,24 @@ issues[issueheads] <- 0
tweets$issue <- ""
tweets$tags <- ""
# MATCH TWEETS ------------------------------------------------------------
# Create folder where all results will be saved (saver for backup and import)
id_folder <- "matched-ids"
unlink(id_folder, recursive = TRUE)
dir.create(id_folder)
# Tag expansion for plural, genetiv etc
tagexpand <- c("", "s", "n", "en", "er", "e")
# Parallelisation
# Parameters for parallelisation
writeLines(c(""), "issuecomp-analysis.log")
cl<-makeCluster(4)
registerDoParallel(cl)
foreach(d = 260:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
#for(d in 1:nrow(issues)) {
# START CAT LOOP
foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
# Go through every day
curdate <- issues$date[d]
cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
@@ -125,12 +130,58 @@ foreach(d = 260:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar%
#rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
stopCluster(cl)
# IMPORT RESULTS ----------------------------------------------------------
# Import all files which have been generated at the categorisation run above.
setwd("matched-ids/")
results_files <- list.files()
for(r in 1:length(results_files)) {
if(r == 1) {
results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
} else {
results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
results <- insertRow(results, results_temp)
}
}
rm(r, results_temp, results_files)
# Remove duplicates, sort chronologically
results <- results[!duplicated(results), ]
names(results) <- c("date", "id_str", "issue", "tags")
results <- results[order(results$id_str), ]
row.names(results) <- NULL
# Now import all results in the dataframes "issues" and "tweets"
# (which wasn't possible in the categorisation process because of parallelisation)
# Reset issues counter
# issues[issueheads] <- 0
for(r in 1:nrow(results)) {
curdate <- as.character(results$date[r])
curid <- as.character(results$id_str[r])
curissue <- as.character(results$issue[r])
curtag <- as.character(results$tags[r])
cat("Sorting match", r, "of 62827 \n")
# Update issue counter (date and issue)
issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
# Update tweet dataframe (id, issue and tags)
oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
}
# SAVING ------------------------------------------------------------------
row.names(tweets) <- NULL
write.csv(tweets, "tweets.csv")
save(tweets, file="tweets.RData")
save(tweets, file="tweets_tagged.RData")