1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stream.R
\name{stream_tweets}
\alias{stream_tweets}
\title{Collect a live stream of Twitter data}
\usage{
stream_tweets(
q = "",
timeout = 30,
parse = TRUE,
token = NULL,
file_name = NULL,
verbose = TRUE,
append = TRUE,
...
)
}
\arguments{
\item{q}{Query used to select and customize streaming collection
method. There are four possible methods:
\enumerate{
\item The default, \code{q = ""}, returns a small random sample of all
publicly available Twitter statuses.
\item To filter by keyword, provide a comma separated character string with
the desired phrase(s) and keyword(s).
\item Track users by providing a comma separated list of user IDs or
screen names.
\item Use four latitude/longitude bounding box points to stream by geo
location. This must be provided via a vector of length 4, e.g.,
\code{c(-125, 26, -65, 49)}.
}}
\item{timeout}{Integer specifying number of seconds to stream tweets for.
Stream indefinitely with \code{timeout = Inf}.
The stream can be interrupted at any time, and \code{file_name} will still be
valid file.}
\item{parse}{Use \code{FALSE} to opt-out of parsing the tweets.}
\item{token}{Expert use only. Use this to override authentication for
a single API call. In most cases you are better off changing the
default for all calls. See \code{\link[=auth_as]{auth_as()}} for details.}
\item{file_name}{Character with name of file. If not specified,
will write to a temporary file \code{stream_tweets*.json}.}
\item{verbose}{If \code{TRUE}, display a progress bar.}
\item{append}{If \code{TRUE}, will append to the end of \code{file_name}; if
\code{FALSE}, will overwrite.}
\item{...}{Other arguments passed in to query parameters.}
}
\value{
A tibble with one row per tweet
}
\description{
Streams public statuses to a file via one of the following four methods:
\enumerate{
\item Sampling a small random sample of all publicly available tweets
\item Filtering via a search-like query (up to 400 keywords)
\item Tracking via vector of user ids (up to 5000 user_ids)
\item Location via geo coordinates (1-360 degree location boxes)
}
Learn more in \code{vignette("stream", package = "rtweet")}
}
\examples{
\dontrun{
# stream tweets mentioning "#rstats" for 10 seconds
rstats1 <- stream_tweets("#rstats", timeout = 10, file_name = "rstats.json")
rstats1
# Download another 10s worth of data to the same file
rstats2 <- stream_tweets("#rstats", timeout = 10, file_name = "rstats.json",
append = TRUE)
# stream tweets about continental USA for 10 seconds
usa <- stream_tweets(location = lookup_coords("usa"), file_name = "usa.json",
timeout = 10)
}
}
\references{
\url{https://developer.twitter.com/en/docs/twitter-api/v1/tweets/sample-realtime/api-reference/get-statuses-sample},
\url{https://developer.twitter.com/en/docs/twitter-api/v1/tweets/filter-realtime/overview}
Stream: \url{https://developer.twitter.com/en/docs/twitter-api/v1/tweets/sample-realtime/api-reference/get-statuses-sample}
Filter: \url{https://developer.twitter.com/en/docs/twitter-api/v1/tweets/filter-realtime/api-reference/post-statuses-filter}
}
\seealso{
\code{parse_stream()}.
}
|