uni-ba-socialagenda/issuecomp-1-scraping.R

198 lines
6.4 KiB
R
Raw Permalink Normal View History

# PREPARATIONS ------------------------------------------------------------
2014-11-29 13:52:09 +01:00
require(jsonlite)
require(stringr)
2014-11-29 16:11:13 +01:00
require(devtools)
require(RTwitterAPI)
2014-11-29 13:52:09 +01:00
2014-11-29 16:11:13 +01:00
setwd("~/Dokumente/Uni/Aktuell/BA-Arbeit/uni-ba-issuecomp")
2014-11-29 13:52:09 +01:00
source("issuecomp-functions.R")
2014-12-01 17:41:33 +01:00
2014-11-29 13:52:09 +01:00
#acc_url <- "http://www.bundestwitter.de/api/politiker"
#acc_df <- fromJSON(acc_url)
2014-11-29 13:52:09 +01:00
2015-01-10 13:07:12 +01:00
acc_df <- read.csv("MdB-twitter.csv")
delrow <- NULL
for(r in 1:nrow(acc_df)) {
acc <- as.character(acc_df$twitter_acc[r])
if(!nzchar(acc)) {
delrow <- c(delrow, r)
}
}
acc_df <- acc_df[-delrow, ]
rm(delrow, r, acc)
acc_df$row.names <- NULL
row.names(acc_df) <- NULL
2014-11-29 16:11:13 +01:00
# COLLECT ALL TWEETS ------------------------------------------------------
# http://www.joyofdata.de/blog/twitters-rest-api-v1-1-with-r-for-linux-and-windows/
# --> devtools::install_github("joyofdata/RTwitterAPI")
# https://dev.twitter.com/rest/reference/get/statuses/user_timeline
api_params <- c(
"oauth_consumer_key" = readLines("twitter-api-credentials.txt")[2],
2014-11-29 16:11:13 +01:00
"oauth_nonce" = NA,
"oauth_signature_method" = "HMAC-SHA1",
"oauth_timestamp" = NA,
"oauth_token" = readLines("twitter-api-credentials.txt")[4],
2014-11-29 16:11:13 +01:00
"oauth_version" = "1.0",
"consumer_secret" = readLines("twitter-api-credentials.txt")[3],
"oauth_token_secret" = readLines("twitter-api-credentials.txt")[5]
)
2014-11-29 21:52:46 +01:00
2014-12-06 13:46:35 +01:00
#api_url2 <- "https://api.twitter.com/1.1/statuses/show.json"
#id2="498492933922754560" # 499533113676931073(\" ), 325320073906622464(\\>), 498492933922754560(\"W)
#query2 <- c(id=id2, trim_user="true", include_entities="false")
#current2 <- twitter_api_call(api_url2, query2, api_params)
2014-12-05 13:21:53 +01:00
2014-12-05 12:28:51 +01:00
api_url <- "https://api.twitter.com/1.1/statuses/user_timeline.json";
max_count <- "200"
keep <- c("created_at", "id_str", "text", "retweet_count")
# tweets_complete: All tweets
# tweets_full: All tweets of current user
# tweets_temp: The current max 200 tweets of current user
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
2014-12-06 13:46:35 +01:00
tweets_complete <- tweets_full
for(a in 1:nrow(acc_df)) {
user <- as.character(acc_df$twitter_acc[a])
2014-12-01 18:38:58 +01:00
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
2014-12-05 12:28:51 +01:00
error <- 0
2014-12-01 18:38:58 +01:00
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
2014-12-01 17:41:33 +01:00
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
2014-12-05 12:28:51 +01:00
# Contains "error" column
status <- errorErrorColumn(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
2014-12-01 23:58:40 +01:00
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) { # page does not exist
2014-12-07 12:06:09 +01:00
status <- errorCode34()
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) { # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
2014-12-05 12:28:51 +01:00
next
}
2014-12-05 12:28:51 +01:00
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
2014-12-01 17:41:33 +01:00
2014-12-01 18:38:58 +01:00
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
2014-12-01 18:38:58 +01:00
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
break
}
## CHECK if we need another loop
# Extract year of last tweet in tweets_temp
year_last <- as.numeric(str_extract(tweets_temp$created_at[nrow(tweets_temp)], "\\d{4}$"))
status <- year_last < 2014
# Is last tweet earlier than 2014? So break the loop
if (status) {
2014-12-01 18:38:58 +01:00
# Is even the first tweet older than 2014?
year_first <- as.numeric(str_extract(tweets_temp$created_at[1], "\\d{4}$"))
status <- year_first < 2014
if(status) {
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
2014-12-01 18:38:58 +01:00
}
tweets_full <- insertRow(tweets_full, tweets_temp)
2014-12-01 18:38:58 +01:00
break # End loop because 2013 is reached
}
# The last tweet is newer or equal 2014, so we need another loop
2014-12-01 18:38:58 +01:00
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
2014-12-01 18:38:58 +01:00
loop <- loop + 1 # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
2014-12-01 18:38:58 +01:00
}
} # /repeat
2014-12-01 17:41:33 +01:00
2014-12-06 13:46:35 +01:00
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 or newer from user[a] is downloaded. Now next user in for-loop
}
rm(a, code, current, error, loop, max_id, max_count, year_first, year_last, name, query, status, user, wait, tweets_full, tweets_temp)
2014-12-07 18:32:50 +01:00
# CLEAR DATAFRAME ---------------------------------------------------------
2014-12-07 19:08:18 +01:00
save(tweets_complete, file="tweets_complete.RData")
2014-12-07 18:32:50 +01:00
# Remove duplicates
tweets <- tweets_complete[!duplicated(tweets_complete), ]
2014-12-07 19:08:18 +01:00
tweets <- na.omit(tweets)
2014-12-07 18:32:50 +01:00
rm(tweets_complete)
# Format dates in data frame
Sys.setlocale("LC_TIME", "C")
tweets$created_at <- as.POSIXct(tweets$created_at, format = "%a %b %d %H:%M:%S %z %Y")
tweets <- tweets[order(tweets$created_at), ]
# Finally delete every tweet not from 2014 (2013 or 2015)
2014-12-07 18:32:50 +01:00
delrow <- NULL
for(r in 1:nrow(tweets)) {
if(format(tweets$created_at[r], "%Y") != "2014") {
delrow <- c(delrow, r)
}
2015-01-15 20:24:40 +01:00
curtext <- as.character(tweets$text[r])
curtext <- str_replace_all(curtext, "$", " ")
curtext <- str_replace_all(curtext, "http://.+?\\s", "URL ")
tweets$text[r] <- curtext
2014-12-07 18:32:50 +01:00
}
tweets <- tweets[-delrow, ]
rm(delrow, r)
2015-01-15 20:24:40 +01:00
2014-12-07 19:08:18 +01:00
# Convert dates to omit (unnecessary) time
tweets$created_at <- format(tweets$created_at, "%Y-%m-%d")
2014-11-29 21:30:42 +01:00
2015-01-15 20:24:40 +01:00
save(tweets, file="tweets_untagged.RData")