Fetch YouTube Comments
🔑 ID:
52560
👨💻
Python
🕒
21/06/2024
Free
Description:
This code allows you to fetch comments from any YouTube channel and saves them with information about each comment in a CSV file.
To learn more about how the code works, read this.
Code:
import os import csv import json from googleapiclient.discovery import build from google_auth_oauthlib.flow import InstalledAppFlow from google.auth.transport.requests import Request from google.oauth2.credentials import Credentials def save_state(state): with open("fetch_state.json", "w") as f: json.dump(state, f) def load_state(): try: with open("fetch_state.json", "r") as f: return json.load(f) except FileNotFoundError: return None def get_authenticated_service(clients_secret_file): creds = None if os.path.exists("token.json"): creds = Credentials.from_authorized_user_file("token.json", ['https://www.googleapis.com/auth/youtube.force-ssl']) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file(clients_secret_file, ['https://www.googleapis.com/auth/youtube.force-ssl']) creds = flow.run_local_server(port=0) with open("token.json", 'w') as token: token.write(creds.to_json()) return build('youtube', 'v3', credentials=creds) def fetch_video_privacy(authentication, video_id): try: request = authentication.videos().list( part="status", id=video_id ) response = request.execute() items = response.get('items', []) item = items[0] privacy_status = item['status']['privacyStatus'] return privacy_status except Exception as e: print(f"Error fetching details: {e}") return None authentication = get_authenticated_service('YOUR_CLIENTS_SECRET_FILE') channel_id = 'CHANNEL_ID' start_date = 'YYYY-MM-DDT00:00:00Z' end_date = 'YYYY-MM-DDT23:59:59Z' with open('youtube_comments.csv', 'a', newline='', encoding='utf-8') as csvfile: writer = csv.writer(csvfile, delimiter=',', quotechar='"', quoting=csv.QUOTE_ALL) if csvfile.tell() == 0: writer.writerow(['Comment', 'User', 'Profile Picture', 'Video ID', 'Video Privacy', 'Time', 'Testimonial Worthy']) def fetch_comments_over_time(youtube, start_date, end_date): state = load_state() if state: next_page_token = state.get("next_page_token") if state.get("last_fetched_date") != None: last_fetched_date = state.get("last_fetched_date") end_date = last_fetched_date else: next_page_token = None last_fetched_date = start_date all_comments = 0 try: while True: response = youtube.commentThreads().list( part="snippet", allThreadsRelatedToChannelId=channel_id, pageToken=next_page_token, maxResults=100, textFormat="plainText", order="time", ).execute() for item in response['items']: comment = item['snippet']['topLevelComment']['snippet']['textDisplay'] user_name = item['snippet']['topLevelComment']['snippet']['authorDisplayName'] user_image = item['snippet']['topLevelComment']['snippet']['authorProfileImageUrl'] date = item['snippet']['topLevelComment']['snippet']['publishedAt'] video_id = item['snippet']['videoId'] video_privacy = fetch_video_privacy(youtube, video_id) if date < start_date: raise Exception("Date out of range") if start_date <= date < end_date: writer.writerow([comment, user_name, user_image, video_id, video_privacy, date, '']) # Write each comment as fetched last_fetched_date = date all_comments = all_comments + 1 print(all_comments) save_state({ "next_page_token": next_page_token, "last_fetched_date": last_fetched_date, }) next_page_token = response.get('nextPageToken') if next_page_token: print("Next Page Toke: " + next_page_token) if not next_page_token: print("No more comments.") break except Exception as e: print(f"Error during comment fetch: {e}") fetch_comments_over_time(authentication, start_date, end_date)
GitHub Link
✖️ Not Available
Download File
✖️ Not Available
If you’re encountering any problems or need further assistance with this code, we’re here to help! Join our community on the forum or Discord for support, tips, and discussion.