cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
#break
}
tweets_full$id_str[nrow(tweets_full)]
tweets_temp$id_str[nrow(tweets_temp)]
tweets_complete$user[20674]
a
current
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
for(a in 1:nrow(acc_df)) {
user <- as.character(acc_df$screenname[a])
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
error <- 0
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Contains "error" column
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) {  # page does not exist
status <- errorCode34
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) {  # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
next
}
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
#break
}
## Last loop is reached. Now clear the data frame
# Is the last tweet in tweets_temp from 2013?
status <- str_detect(tweets_temp$created_at[nrow(tweets_temp)], "2014$")
if (!status) { # Starting when tweet not from 2014
# Delete all tweets other than from 2014
old <- 0
for(r in 1:nrow(tweets_temp)) {
status <- str_detect(tweets_temp$created_at[r], "2014$")
if(is.na(status)) {
#status <- FALSE
cat("[INFO] NA-Status in Tweet", r)
}
if(!status) { # Starting when tweet not from 2014
old <- old + 1
}
}
if(old > 0) {
old <- old - 1
# If even the first entry isn't from 2014, we have to set "old" manually because of a bug
status <- str_detect(tweets_temp$created_at[1], "2014$")
if(!status) {
old <- nrow(tweets_temp)
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
}
# delete all lines which are older than 2014
tweets_temp <- head(tweets_temp, -old)
}
rm(old)
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
break  # End loop because 2013 is reached
}
# The last tweet is still from 2014, so we need another loop
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
loop <- loop + 1  # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
}
} # /repeat
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 from user[r] is downloaded. Now next user in for-loop
}
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
for(a in 66:nrow(acc_df)) {
user <- as.character(acc_df$screenname[a])
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
error <- 0
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Contains "error" column
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) {  # page does not exist
status <- errorCode34
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) {  # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
next
}
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
#break
}
## Last loop is reached. Now clear the data frame
# Is the last tweet in tweets_temp from 2013?
status <- str_detect(tweets_temp$created_at[nrow(tweets_temp)], "2014$")
if (!status) { # Starting when tweet not from 2014
# Delete all tweets other than from 2014
old <- 0
for(r in 1:nrow(tweets_temp)) {
status <- str_detect(tweets_temp$created_at[r], "2014$")
if(is.na(status)) {
#status <- FALSE
cat("[INFO] NA-Status in Tweet", r)
}
if(!status) { # Starting when tweet not from 2014
old <- old + 1
}
}
if(old > 0) {
old <- old - 1
# If even the first entry isn't from 2014, we have to set "old" manually because of a bug
status <- str_detect(tweets_temp$created_at[1], "2014$")
if(!status) {
old <- nrow(tweets_temp)
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
}
# delete all lines which are older than 2014
tweets_temp <- head(tweets_temp, -old)
}
rm(old)
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
break  # End loop because 2013 is reached
}
# The last tweet is still from 2014, so we need another loop
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
loop <- loop + 1  # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
}
} # /repeat
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 from user[r] is downloaded. Now next user in for-loop
}
a
tweets_complete$user[22982]
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
for(a in 68:nrow(acc_df)) {
user <- as.character(acc_df$screenname[a])
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
error <- 0
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Contains "error" column
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) {  # page does not exist
status <- errorCode34
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) {  # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
next
}
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
break
}
## Last loop is reached. Now clear the data frame
# Is the last tweet in tweets_temp from 2013?
status <- str_detect(tweets_temp$created_at[nrow(tweets_temp)], "2014$")
if (!status) { # Starting when tweet not from 2014
# Delete all tweets other than from 2014
old <- 0
for(r in 1:nrow(tweets_temp)) {
status <- str_detect(tweets_temp$created_at[r], "2014$")
if(is.na(status)) {
#status <- FALSE
cat("[INFO] NA-Status in Tweet", r)
}
if(!status) { # Starting when tweet not from 2014
old <- old + 1
}
}
if(old > 0) {
old <- old - 1
# If even the first entry isn't from 2014, we have to set "old" manually because of a bug
status <- str_detect(tweets_temp$created_at[1], "2014$")
if(!status) {
old <- nrow(tweets_temp)
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
}
# delete all lines which are older than 2014
tweets_temp <- head(tweets_temp, -old)
}
rm(old)
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
break  # End loop because 2013 is reached
}
# The last tweet is still from 2014, so we need another loop
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
loop <- loop + 1  # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
}
} # /repeat
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 from user[r] is downloaded. Now next user in for-loop
}
status
current
tweets_temp
status
fromJSON(current)
tweets_temp <- fromJSON(correctJSON(current))
tweets_temp
status <- errorErrorColumn(tweets_temp)
a
View(acc_df)
tweets_complete$user[32539]
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
for(a in 94:nrow(acc_df)) {
user <- as.character(acc_df$screenname[a])
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
error <- 0
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Contains "error" column
status <- errorErrorColumn(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) {  # page does not exist
status <- errorCode34
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) {  # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
next
}
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
break
}
## Last loop is reached. Now clear the data frame
# Is the last tweet in tweets_temp from 2013?
status <- str_detect(tweets_temp$created_at[nrow(tweets_temp)], "2014$")
if (!status) { # Starting when tweet not from 2014
# Delete all tweets other than from 2014
old <- 0
for(r in 1:nrow(tweets_temp)) {
status <- str_detect(tweets_temp$created_at[r], "2014$")
if(is.na(status)) {
#status <- FALSE
cat("[INFO] NA-Status in Tweet", r)
}
if(!status) { # Starting when tweet not from 2014
old <- old + 1
}
}
if(old > 0) {
old <- old - 1
# If even the first entry isn't from 2014, we have to set "old" manually because of a bug
status <- str_detect(tweets_temp$created_at[1], "2014$")
if(!status) {
old <- nrow(tweets_temp)
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
}
# delete all lines which are older than 2014
tweets_temp <- head(tweets_temp, -old)
}
rm(old)
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
break  # End loop because 2013 is reached
}
# The last tweet is still from 2014, so we need another loop
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
loop <- loop + 1  # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
}
} # /repeat
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 from user[r] is downloaded. Now next user in for-loop
}
status
tweets_full <- data.frame(user=character(), name=character(), created_at=character(), id_str=character(), text=character(), retweet_count=character())
a
for(a in 346:nrow(acc_df)) {
user <- as.character(acc_df$screenname[a])
name <- as.character(acc_df$name[a])
max_id <- "999999999999999999"
loop <- 1
error <- 0
repeat {
# Define specific search query
query <- c(include_rts=1, exclude_replies="true", trim_user="true", include_entities="false",
screen_name=user,
count=max_count,
max_id=max_id);
# At first, work with an temporary tweet-DB
current <- twitter_api_call(api_url, query, api_params)
rm(tweets_temp)
tweets_temp <- fromJSON(correctJSON(current))
## START ERROR HANDLING ##
# Empty API output
status <- errorEmptyAPI(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Contains "error" column
status <- errorErrorColumn(tweets_temp)
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
# Check if error code exists
code <- errorCheckCode(tweets_temp) # 0 if no error
if(code == 34) {  # page does not exist
status <- errorCode34()
if(status == 1) { Sys.sleep(3);error <- error + 1;next}
if(status == 2) {break}
}
if(code == 88) {  # rate limit exceeded
wait <- errorCode88()
Sys.sleep(wait)
next
}
## END ERROR HANDLING ##
# Delete unnecessary columns and add username and real name to dataframe
tweets_temp <- tweets_temp[keep]
tweets_temp <- cbind(user=user, name=name, tweets_temp)
# Now sleep 3 second to dodge 300queries/15min limit
cat("[",a,"/",nrow(acc_df),"] ", sep = "")
cat("User: ",user," in loop: ",loop,". \n", sep = "")
Sys.sleep(2)
if(tweets_full$id_str[nrow(tweets_full)] == tweets_temp$id_str[nrow(tweets_temp)] && nrow(tweets_full) > 0) {
cat("[INFO] Last tweet of temp is last tweet of full. Abort loop and begin with next user.\n")
break
}
## Last loop is reached. Now clear the data frame
# Is the last tweet in tweets_temp from 2013?
status <- str_detect(tweets_temp$created_at[nrow(tweets_temp)], "2014$")
if (!status) { # Starting when tweet not from 2014
# Delete all tweets other than from 2014
old <- 0
for(r in 1:nrow(tweets_temp)) {
status <- str_detect(tweets_temp$created_at[r], "2014$")
if(is.na(status)) {
#status <- FALSE
cat("[INFO] NA-Status in Tweet", r)
}
if(!status) { # Starting when tweet not from 2014
old <- old + 1
}
}
if(old > 0) {
old <- old - 1
# If even the first entry isn't from 2014, we have to set "old" manually because of a bug
status <- str_detect(tweets_temp$created_at[1], "2014$")
if(!status) {
old <- nrow(tweets_temp)
cat("[INFO] Timeline enhält keinen einzigen aus 2014\n")
}
# delete all lines which are older than 2014
tweets_temp <- head(tweets_temp, -old)
}
rm(old)
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
break  # End loop because 2013 is reached
}
# The last tweet is still from 2014, so we need another loop
else {
# Setting max_id to gather next 200 tweets
max_id <- tweets_temp$id_str[nrow(tweets_temp)]
loop <- loop + 1  # just for stats
tweets_full <- insertRow(tweets_full, tweets_temp)
#rm(tweets_temp)
}
} # /repeat
tweets_complete <- insertRow(tweets_complete, tweets_full)
tweets_full <- head(tweets_full, -nrow(tweets_full)) # Empty tweets_full
cat("User:",user,"finished after",loop,"loops. Total Tweets now:",nrow(tweets_complete),"\n")
write.csv(tweets_complete, "tweets_complete.csv")
# Every tweet from 2014 from user[a] is downloaded. Now next user in for-loop
}
save(tweets_complete, file="tweets_complete.RData")
tweets_complete$id_str[146982]
class(tweets_complete$id_str[146982])
tweets_complete$id_str[1]
