Bachelor thesis: "The influence of sensational issues on the political agenda setting in social media"
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

issuecomp-2-analysis.R 7.6KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202
  1. require(lubridate)
  2. require(XML)
  3. require(ggplot2)
  4. require(reshape2)
  5. require(stringr)
  6. require(foreach)
  7. require(doParallel)
  8. source("issuecomp-functions.R")
  9. setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
  10. load(file = "tweets_untagged.RData")
  11. # Create date range
  12. date_start <- as.Date("2014-01-01")
  13. date_end <- as.Date("2014-12-31")
  14. drange <- as.integer(date_end - date_start)
  15. drange <- date_start + days(0:drange)
  16. # Import issues and prepare everything
  17. # Will only be filled after the large categorisation loop
  18. issues <- data.frame(date = drange)
  19. issuelist <- readLines("issues-v3.xml")
  20. issuelist <- str_replace_all(string = issuelist, pattern = ".*<!-- .+ -->", "")
  21. issuelist <- xmlToList(issuelist)
  22. issueheads <- names(issuelist)
  23. issues[issueheads] <- 0
  24. tweets$issue <- ""
  25. tweets$tags <- ""
  26. # MATCH TWEETS ------------------------------------------------------------
  27. # Create folder where all results will be saved (saver for backup and import)
  28. id_folder <- "matched-ids"
  29. unlink(id_folder, recursive = TRUE)
  30. dir.create(id_folder)
  31. # Tag expansion for plural, genetiv etc
  32. tagexpand <- c("", "s", "n", "en", "er", "e")
  33. # Parameters for parallelisation
  34. writeLines(c(""), "issuecomp-analysis.log")
  35. cl<-makeCluster(4)
  36. registerDoParallel(cl)
  37. # START CAT LOOP
  38. foreach(d = 1:nrow(issues), .packages = c("stringr"), .combine=rbind) %dopar% {
  39. # Go through every day
  40. curdate <- issues$date[d]
  41. cat(paste(as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
  42. # Put all tweets from specific day in a temporary DF
  43. tweets_curday <- tweets[tweets[, "created_at"] == curdate, ]
  44. for(t in 1:nrow(tweets_curday)){
  45. # cat(paste("Starting tweet", t, "of",as.character(curdate),"\n"), file="issuecomp-analysis.log", append=TRUE)
  46. # Select tweet's text, make it lowercase and remove hashtags, mentions and replace hyphens by spaces
  47. curtext <- as.character(tweets_curday$text[t])
  48. curtext <- str_replace_all(curtext, "-", " ")
  49. curtext <- str_replace_all(curtext, "[^[:alnum:] ]", "")
  50. curtext <- str_replace_all(curtext, " ", " ") # remove double spaces
  51. curid <- as.character(tweets_curday$id_str[t])
  52. # Now test each single issue (not tag!)
  53. for(i in 1:length(issueheads)) {
  54. curissue <- issueheads[i]
  55. curtags <- as.character(issuelist[[curissue]])
  56. # curfile <- str_c(id_folder,"/",curissue,".csv")
  57. curfile <- str_c(id_folder,"/",curdate,".csv") # Possible solution to avoid buggy files when using many processes
  58. # Now test all tags of a single issue
  59. for(s in 1:length(curtags)) {
  60. curtag <- curtags[s]
  61. curchars <- nchar(curtag, type = "chars")
  62. # Check if tag is an acronym. If so, ignore.case will be deactivated in smartPatternMatch
  63. curacro <- checkAcronym(string = curtag)
  64. # Check if tag is some kind of specific hashtag. If so, do not handle as acronym, but don't expand it either
  65. if(str_detect(curtag, "^#")) {
  66. curacro <- FALSE # hashtags like #WM2014 are also written as #wm2014, so we need case-insensitivity
  67. curhash <- TRUE # But we need to mark it as hashtag, so it doesn't get extended or Levenshtein distance > 0
  68. curtag <- str_replace(curtag, "#", "")
  69. curchars <- curchars - 1
  70. } else {
  71. curhash <- FALSE
  72. }
  73. # Now expand the current tag by possible suffixes that may be plural forms
  74. # Only do if it isn't an acronym or specific hastag
  75. if(!curacro && !curhash) {
  76. for(e in 1:length(tagexpand)) {
  77. curtag[e] <- str_c(curtag[1], tagexpand[e])
  78. }
  79. }
  80. # Set Levenshtein distance depending on char length, acronym and hashtag status
  81. if(curchars <= 7 || curacro || curhash) { # Distance = 1 if 8 chars or longer
  82. curdistance <- 0
  83. } else {
  84. curdistance <- 1
  85. }
  86. # Match current tweet with tag.
  87. # Allow 1 Levenshtein distance if tag is >= 5 letters and no hashtag or acronym
  88. # Make is case-sensitiv if tag is an acronym
  89. tags_found <- NULL
  90. # Match the tweet with each variation of tagexpand
  91. for(e in 1:length(curtag)) {
  92. tags_found[e] <- smartPatternMatch(curtext, curtag[e], curdistance, curacro)
  93. }
  94. tags_found <- any(tags_found)
  95. curtag <- curtag[1]
  96. if(tags_found == TRUE) {
  97. # # Raise number of findings on this day for this issue by 1
  98. # issues[d,curissue] <- issues[d,curissue] + 1
  99. #
  100. # # Add issue and first matched tag of tweet to tweets-DF
  101. # oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
  102. # tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ";")
  103. # oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
  104. # tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ";")
  105. # Add information to file for function viewPatternMatching
  106. write(str_c(curdate,";\"",curid,"\";",curissue,";",curtag), curfile, append = TRUE)
  107. # cat(paste("Match!\n"), file="issuecomp-analysis.log", append=TRUE)
  108. # data.frame(date=curdate, issue=curissue)
  109. break # next issue, no more tags from same issue
  110. }
  111. else {
  112. #cat("Nothing found\n")
  113. }
  114. } # /for curtags
  115. } # /for issuelist
  116. } # /for tweets_curday
  117. } # /for drange
  118. #rm(tweets_curday,curacro, curchars, curdate,curfile,curid,curissue,curtag,curtags,curtext,d,date_end,date_start,i,id_folder,oldissue,oldtag,s,t,tags_found)
  119. stopCluster(cl)
  120. # IMPORT RESULTS ----------------------------------------------------------
  121. # Import all files which have been generated at the categorisation run above.
  122. results_files <- list.files("matched-ids/", full.names = T)
  123. for(r in 1:length(results_files)) {
  124. if(r == 1) {
  125. results <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
  126. } else {
  127. results_temp <- read.csv(results_files[r], sep=";", colClasses=c("character", "character", "character", "character"), header=F)
  128. results <- insertRow(results, results_temp)
  129. }
  130. }
  131. rm(r, results_temp, results_files)
  132. # Remove duplicates, sort chronologically
  133. results <- results[!duplicated(results), ]
  134. names(results) <- c("date", "id_str", "issue", "tags")
  135. results <- results[order(results$id_str), ]
  136. row.names(results) <- NULL
  137. # Now import all results in the dataframes "issues" and "tweets"
  138. # (which wasn't possible in the categorisation process because of parallelisation)
  139. # Reset issues counter
  140. #issues[issueheads] <- 0
  141. nrow_results <- nrow(results)
  142. for(r in 1:nrow_results) {
  143. curdate <- as.character(results$date[r])
  144. curid <- as.character(results$id_str[r])
  145. curissue <- as.character(results$issue[r])
  146. curtag <- as.character(results$tags[r])
  147. cat("Sorting match", r, "of", nrow_results, "\n")
  148. # Update issue counter (date and issue)
  149. issues[issues[, "date"] == curdate, curissue] <- issues[issues[, "date"] == curdate, curissue] + 1
  150. # Update tweet dataframe (id, issue and tags)
  151. oldissue <- tweets[tweets[, "id_str"] == curid, "issue"]
  152. tweets[tweets[, "id_str"] == curid, "issue"] <- str_c(oldissue, curissue, ",")
  153. oldtag <- tweets[tweets[, "id_str"] == curid, "tags"]
  154. tweets[tweets[, "id_str"] == curid, "tags"] <- str_c(oldtag, curtag, ",")
  155. }
  156. # SAVING ------------------------------------------------------------------
  157. save(tweets, file="tweets_tagged.RData")
  158. write.csv(tweets, file="tweets.csv")
  159. save(issues, file="issues.RData")