Skip to main content

Tweet Data

Retrieve specific tweet information, content, and engagement metrics by tweet ID or search for tweets based on criteria.

Get Tweet by ID

HTTP Request

GET /x/tweet/{tweet_id}
Authorization: Bearer YOUR_API_KEY
Content-Type: application/json

Path Parameters

tweet_id

  • Type: String
  • Description: Unique tweet identifier
  • Example: 1234567890123456789

Query Parameters

fields

  • Type: String
  • Description: Comma-separated list of fields to include
  • Options: id, text, created_at, author_id, public_metrics, entities, attachments, geo, context_annotations
  • Default: All fields
  • Example: ?fields=id,text,public_metrics

include_user

  • Type: Boolean
  • Description: Include author information in response
  • Default: false
  • Example: ?include_user=true

Search Tweets

HTTP Request

GET /x/tweets/search
Authorization: Bearer YOUR_API_KEY
Content-Type: application/json

Query Parameters

query

  • Type: String
  • Description: Search query string
  • Example: ?query=javascript programming

max_results

  • Type: Integer
  • Description: Maximum number of results to return
  • Default: 10
  • Maximum: 100
  • Example: ?max_results=50

sort_order

  • Type: String
  • Description: Sort order for results
  • Options: relevance, recent
  • Default: relevance
  • Example: ?sort_order=recent

lang

  • Type: String
  • Description: Language code for results
  • Example: ?lang=en

since_id

  • Type: String
  • Description: Return tweets newer than this tweet ID
  • Example: ?since_id=1234567890123456789

until_id

  • Type: String
  • Description: Return tweets older than this tweet ID
  • Example: ?until_id=1234567890123456789

Response Format

Get Tweet Response (200 OK)

{
  "data": {
    "id": "1234567890123456789",
    "text": "Just shipped a new feature! Check it out at https://example.com #programming #javascript",
    "created_at": "2024-01-15T10:30:00Z",
    "author_id": "9876543210987654321",
    "public_metrics": {
      "retweet_count": 25,
      "like_count": 150,
      "reply_count": 12,
      "quote_count": 5,
      "bookmark_count": 8,
      "impression_count": 2500
    },
    "entities": {
      "hashtags": [
        {
          "tag": "programming",
          "start": 65,
          "end": 76
        },
        {
          "tag": "javascript",
          "start": 77,
          "end": 87
        }
      ],
      "urls": [
        {
          "start": 40,
          "end": 63,
          "url": "https://t.co/abcdef123",
          "expanded_url": "https://example.com",
          "display_url": "example.com"
        }
      ],
      "mentions": []
    },
    "attachments": {
      "media_keys": ["media_1234567890123456789"]
    },
    "geo": {
      "place_id": "01a9a39529905f02"
    },
    "context_annotations": [
      {
        "domain": {
          "id": "65",
          "name": "Interests and Hobbies Vertical",
          "description": "A vertical category."
        },
        "entity": {
          "id": "848919371311001600",
          "name": "Computer programming",
          "description": "Computer programming"
        }
      }
    ]
  },
  "includes": {
    "users": [
      {
        "id": "9876543210987654321",
        "username": "example_user",
        "name": "Example User",
        "verified": true
      }
    ],
    "media": [
      {
        "media_key": "media_1234567890123456789",
        "type": "photo",
        "url": "https://pbs.twimg.com/media/1234567890.jpg"
      }
    ]
  },
  "meta": {
    "result_count": 1
  }
}

Search Response (200 OK)

{
  "data": [
    {
      "id": "1234567890123456789",
      "text": "Learning JavaScript is amazing! #javascript #programming",
      "created_at": "2024-01-15T10:30:00Z",
      "author_id": "9876543210987654321",
      "public_metrics": {
        "retweet_count": 15,
        "like_count": 89,
        "reply_count": 8,
        "quote_count": 2
      },
      "entities": {
        "hashtags": [
          {
            "tag": "javascript",
            "start": 30,
            "end": 40
          },
          {
            "tag": "programming",
            "start": 41,
            "end": 52
          }
        ]
      }
    }
  ],
  "meta": {
    "result_count": 1,
    "total_results": 150,
    "next_token": "b26v89c19zqg8o3fo3s1bp19s8f6w3g6"
  },
  "includes": {
    "users": [
      {
        "id": "9876543210987654321",
        "username": "example_user",
        "name": "Example User"
      }
    ]
  }
}

Error Response (404 Not Found)

{
  "error": "Tweet not found",
  "code": 404,
  "details": {
    "tweet_id": "1234567890123456789"
  },
  "timestamp": "2024-01-15T10:30:00Z",
  "requestId": "req_1234567890"
}

Implementation Examples

JavaScript (Node.js)

const axios = require("axios");

async function getTweet(apiKey, tweetId, options = {}) {
  try {
    const params = new URLSearchParams();

    if (options.fields) params.append("fields", options.fields);
    if (options.includeUser) params.append("include_user", "true");

    const response = await axios.get(`https://scrape.st/x/tweet/${tweetId}?${params}`, {
      headers: {
        "x-api-key": apiKey,
        "Content-Type": "application/json",
      },
    });

    const tweetData = response.data;
    console.log(`Retrieved tweet: ${tweetData.data.id}`);

    // Analyze tweet
    analyzeTweet(tweetData.data);

    return tweetData;
  } catch (error) {
    console.error(`Failed to get tweet ${tweetId}:`, error.response?.data || error.message);
    throw error;
  }
}

async function searchTweets(apiKey, query, options = {}) {
  try {
    const params = new URLSearchParams();
    params.append("query", query);

    if (options.maxResults) params.append("max_results", options.maxResults);
    if (options.sortOrder) params.append("sort_order", options.sortOrder);
    if (options.lang) params.append("lang", options.lang);
    if (options.sinceId) params.append("since_id", options.sinceId);
    if (options.untilId) params.append("until_id", options.untilId);

    const response = await axios.get(`https://scrape.st/x/tweets/search?${params}`, {
      headers: {
        "x-api-key": apiKey,
        "Content-Type": "application/json",
      },
    });

    const searchData = response.data;
    console.log(`Found ${searchData.meta.result_count} tweets for query: ${query}`);

    return searchData;
  } catch (error) {
    console.error(`Failed to search tweets:`, error.response?.data || error.message);
    throw error;
  }
}

function analyzeTweet(tweet) {
  console.log("\n=== Tweet Analysis ===");
  console.log(`Tweet ID: ${tweet.id}`);
  console.log(`Created: ${new Date(tweet.created_at).toLocaleString()}`);
  console.log(`Text: ${tweet.text.substring(0, 100)}...`);

  const metrics = tweet.public_metrics;
  console.log(`\nEngagement Metrics:`);
  console.log(`Likes: ${metrics.like_count.toLocaleString()}`);
  console.log(`Retweets: ${metrics.retweet_count.toLocaleString()}`);
  console.log(`Replies: ${metrics.reply_count.toLocaleString()}`);
  console.log(`Quotes: ${metrics.quote_count.toLocaleString()}`);

  // Calculate engagement rate
  const totalEngagement = metrics.like_count + metrics.retweet_count + metrics.reply_count + metrics.quote_count;
  const engagementRate = metrics.impression_count > 0 ? (totalEngagement / metrics.impression_count) * 100 : 0;
  console.log(`Engagement Rate: ${engagementRate.toFixed(2)}%`);

  // Analyze entities
  if (tweet.entities) {
    console.log(`\nEntities:`);
    if (tweet.entities.hashtags) {
      console.log(`Hashtags: ${tweet.entities.hashtags.map((h) => h.tag).join(", ")}`);
    }
    if (tweet.entities.urls) {
      console.log(`URLs: ${tweet.entities.urls.length}`);
    }
    if (tweet.entities.mentions) {
      console.log(`Mentions: ${tweet.entities.mentions.length}`);
    }
  }
}

// Usage
getTweet("your_api_key_here", "1234567890123456789", {
  includeUser: true,
  fields: "id,text,created_at,public_metrics,entities",
});

searchTweets("your_api_key_here", "javascript programming", {
  maxResults: 20,
  sortOrder: "recent",
  lang: "en",
});

Python

import requests
from datetime import datetime

def get_tweet(api_key, tweet_id, **options):
    params = {}
    if 'fields' in options:
        params['fields'] = options['fields']
    if 'include_user' in options:
        params['include_user'] = str(options['include_user']).lower()

    headers = {
        "x-api-key": api_key,
        "Content-Type": "application/json"
    }

    try:
        response = requests.get(f"https://scrape.st/x/tweet/{tweet_id}",
                             params=params, headers=headers)
        response.raise_for_status()
        tweet_data = response.json()

        print(f"Retrieved tweet: {tweet_data['data']['id']}")

        # Analyze tweet
        analyze_tweet(tweet_data['data'])

        return tweet_data
    except requests.exceptions.RequestException as error:
        print(f"Failed to get tweet {tweet_id}: {error}")
        if error.response:
            print(f"Error details: {error.response.text}")
        raise

def search_tweets(api_key, query, **options):
    params = {'query': query}
    if 'max_results' in options:
        params['max_results'] = options['max_results']
    if 'sort_order' in options:
        params['sort_order'] = options['sort_order']
    if 'lang' in options:
        params['lang'] = options['lang']
    if 'since_id' in options:
        params['since_id'] = options['since_id']
    if 'until_id' in options:
        params['until_id'] = options['until_id']

    headers = {
        "x-api-key": api_key,
        "Content-Type": "application/json"
    }

    try:
        response = requests.get("https://scrape.st/x/tweets/search",
                             params=params, headers=headers)
        response.raise_for_status()
        search_data = response.json()

        print(f"Found {search_data['meta']['result_count']} tweets for query: {query}")

        return search_data
    except requests.exceptions.RequestException as error:
        print(f"Failed to search tweets: {error}")
        if error.response:
            print(f"Error details: {error.response.text}")
        raise

def analyze_tweet(tweet):
    print("\n=== Tweet Analysis ===")
    print(f"Tweet ID: {tweet['id']}")
    print(f"Created: {datetime.fromisoformat(tweet['created_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"Text: {tweet['text'][:100]}...")

    metrics = tweet['public_metrics']
    print(f"\nEngagement Metrics:")
    print(f"Likes: {metrics['like_count']:,}")
    print(f"Retweets: {metrics['retweet_count']:,}")
    print(f"Replies: {metrics['reply_count']:,}")
    print(f"Quotes: {metrics['quote_count']:,}")

    # Calculate engagement rate
    total_engagement = metrics['like_count'] + metrics['retweet_count'] + \
                      metrics['reply_count'] + metrics['quote_count']
    engagement_rate = (total_engagement / metrics['impression_count'] * 100) if 'impression_count' in metrics else 0
    print(f"Engagement Rate: {engagement_rate:.2f}%")

    # Analyze entities
    if 'entities' in tweet:
        print(f"\nEntities:")
        if 'hashtags' in tweet['entities']:
            hashtags = [h['tag'] for h in tweet['entities']['hashtags']]
            print(f"Hashtags: {', '.join(hashtags)}")
        if 'urls' in tweet['entities']:
            print(f"URLs: {len(tweet['entities']['urls'])}")
        if 'mentions' in tweet['entities']:
            print(f"Mentions: {len(tweet['entities']['mentions'])}")

# Usage
get_tweet("your_api_key_here", "1234567890123456789",
          include_user=True,
          fields="id,text,created_at,public_metrics,entities")

search_tweets("your_api_key_here", "javascript programming",
             max_results=20,
             sort_order="recent",
             lang="en")

cURL

# Get specific tweet
curl -X GET "https://scrape.st/x/tweet/1234567890123456789" \
  -H "x-api-key: YOUR_API_KEY" \
  -H "Content-Type: application/json"

# Get tweet with author information
curl -X GET "https://scrape.st/x/tweet/1234567890123456789?include_user=true" \
  -H "x-api-key: YOUR_API_KEY" \
  -H "Content-Type: application/json"

# Search tweets
curl -X GET "https://scrape.st/x/tweets/search?query=javascript%20programming&max_results=20&sort_order=recent" \
  -H "x-api-key: YOUR_API_KEY" \
  -H "Content-Type: application/json"

Complex Search Queries

// Search with multiple criteria
const complexSearch = await searchTweets(apiKey, "javascript OR programming", {
  maxResults: 50,
  lang: "en",
  sortOrder: "relevance",
});

// Search with exclusions
const exclusionSearch = await searchTweets(apiKey, "javascript -spam", {
  maxResults: 30,
  sortOrder: "recent",
});

// Search for specific user tweets
const userSearch = await searchTweets(apiKey, "from:twitter_user javascript", {
  maxResults: 20,
  sortOrder: "recent",
});

Hashtag Analysis

async function analyzeHashtag(apiKey, hashtag, maxResults = 100) {
  const searchQuery = `#${hashtag}`;
  const results = await searchTweets(apiKey, searchQuery, {
    maxResults,
    sortOrder: "recent",
  });

  const analysis = {
    hashtag: hashtag,
    totalTweets: results.meta.result_count,
    totalLikes: 0,
    totalRetweets: 0,
    totalReplies: 0,
    topTweets: [],
    contributors: new Set(),
    timeRange: {
      earliest: null,
      latest: null,
    },
  };

  results.data.forEach((tweet) => {
    const metrics = tweet.public_metrics;

    analysis.totalLikes += metrics.like_count;
    analysis.totalRetweets += metrics.retweet_count;
    analysis.totalReplies += metrics.reply_count;

    analysis.contributors.add(tweet.author_id);

    // Track time range
    const tweetTime = new Date(tweet.created_at);
    if (!analysis.timeRange.earliest || tweetTime < analysis.timeRange.earliest) {
      analysis.timeRange.earliest = tweetTime;
    }
    if (!analysis.timeRange.latest || tweetTime > analysis.timeRange.latest) {
      analysis.timeRange.latest = tweetTime;
    }

    // Top tweets by engagement
    const engagement = metrics.like_count + metrics.retweet_count + metrics.reply_count;
    analysis.topTweets.push({
      id: tweet.id,
      text: tweet.text,
      engagement,
      author: tweet.author_id,
    });
  });

  // Sort top tweets
  analysis.topTweets.sort((a, b) => b.engagement - a.engagement);
  analysis.topTweets = analysis.topTweets.slice(0, 10);

  analysis.averageLikes = analysis.totalLikes / analysis.totalTweets;
  analysis.averageRetweets = analysis.totalRetweets / analysis.totalTweets;
  analysis.uniqueContributors = analysis.contributors.size;

  return analysis;
}

Error Handling

Rate Limit Handling

async function getTweetWithRetry(apiKey, tweetId, options = {}, maxRetries = 3) {
  for (let attempt = 1; attempt <= maxRetries; attempt++) {
    try {
      return await getTweet(apiKey, tweetId, options);
    } catch (error) {
      if (error.response?.status === 429) {
        const retryAfter = error.response.headers["x-rate-limit-reset"] || 900;
        const waitTime = parseInt(retryAfter) * 1000 - Date.now();

        if (waitTime > 0) {
          console.log(`Rate limited, waiting ${Math.ceil(waitTime / 1000)} seconds...`);
          await new Promise((resolve) => setTimeout(resolve, waitTime));
        }
        continue;
      }

      if (error.response?.status === 404) {
        throw new Error(`Tweet ${tweetId} not found`);
      }

      if (attempt === maxRetries) {
        throw error;
      }

      const delay = Math.pow(2, attempt) * 1000;
      console.log(`Attempt ${attempt} failed, retrying in ${delay}ms...`);
      await new Promise((resolve) => setTimeout(resolve, delay));
    }
  }
}

Best Practices

For Efficient Queries

  • Specific Fields: Request only the fields you need
  • Pagination: Use pagination for large result sets
  • Caching: Cache frequently accessed tweets
  • Batch Processing: Process multiple tweets together

For Search Optimization

  • Specific Queries: Use specific search terms
  • Language Filtering: Filter by language when possible
  • Time Constraints: Use time-based constraints for recent data
  • Result Limits: Limit results to reduce data transfer

For Data Management

  • Data Validation: Validate tweet data before processing
  • Error Handling: Handle tweet not found and rate limit errors
  • Storage Planning: Plan appropriate storage for tweet data
  • Privacy Compliance: Respect user privacy and content rights

This completes the REST API Reference documentation. For streaming capabilities, see the Streams documentation.