File size: 4,114 Bytes
6cdf09c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
"""
uses channel_ids to fetch all uploaded videos on that particular channel and their urls and writes it in a file
and then fetches captions from each of the video and write it into a .txt file
"""

import timeit
from googleapiclient.discovery import build
from dotenv import load_dotenv
import os
import json

load_dotenv()
api_key = os.getenv('yt_secret_key')
os.chdir('D:/Machine Learning/SLM-Project/')
youtube = build('youtube', 'v3', developerKey=api_key)

start_time = timeit.default_timer()

file_path = 'channelIDs.json'
with open(file_path, 'r') as file:
  channelData = json.load(file)

def fetchVideoUrl(channelId):
    next_page_token = None
    videoIds = []

    while True:
        # fetches the channel's info
        channelRes = youtube.channels().list(
            part='contentDetails', id=channelId
        ).execute()

        # uses the channel info and then fetches the playlist
        if 'items' in channelRes and channelRes['items']:
            playlistId = channelRes['items'][0]['contentDetails']['relatedPlaylists']['uploads']

            # uses that playlist info and then fetches the links of uploaded videos
            playlistResult = youtube.playlistItems().list(
                part='contentDetails', playlistId=playlistId,
                maxResults=100, pageToken=next_page_token
            ).execute()

            # append videoIds from the current page
            videoIds.extend([item['contentDetails']['videoId'] for item in playlistResult.get('items', [])])

            next_page_token = playlistResult.get('nextPageToken')

            if not next_page_token:
                break

    return videoIds

# Initialize an empty list to store all videoIds
all_videoIds = []

for channel_id in channelData:
    videoIdUrls = fetchVideoUrl(channel_id)
    all_videoIds.extend(videoIdUrls)

for channel_id in channelData:
  videoIdUrls = fetchVideoUrl(channel_id)

vidFetchTime = timeit.default_timer()

print(len(videoIdUrls))
print(f"videos fetched in: {vidFetchTime - start_time} secs")

# converting videoIds into videoUrls
urlDict = []
for i in videoIdUrls:
  videoLink = f"https://www.youtube.com/watch?v={i}"
  urlDict.append(videoLink)

def convertToJson(results):
  with open('videoUrls.json', 'w') as outfile:
    json.dump(results, outfile, indent=2)
    print('data written in JSON file successfully')

convertToJson(urlDict)

# get the captions from each video link, if available
from youtube_transcript_api import YouTubeTranscriptApi, TranscriptsDisabled
import logging

# Set up logging
logging.basicConfig(filename='youtube_fetch.log', level=logging.ERROR)

# Modify get_captions to store raw transcript data
def get_captions(vidId):
  try:
    raw_transcripts = []
    videoNo = 0
    for ids in vidId:
      try:
        captions = YouTubeTranscriptApi.get_transcript(
          ids, languages=['en'], preserve_formatting=True
        )
        if captions:
          formatted_captions = [{'text': caption['text']} for caption in captions]
          raw_transcripts.append(formatted_captions)
          videoNo += 1
        else:
          continue
      except TranscriptsDisabled as e:
        print(f"Transcripts are disabled for the video: {str(e)}")
      except Exception as e:
        logging.error(f"Error while fetching the videos: {str(e)}")
    print(f"no of videos that had captions were: {videoNo}")
    return raw_transcripts
  except Exception as e:
    logging.error(f"Error in getting captions: {str(e)}")

caption = get_captions(videoIdUrls)

capFetchTime = timeit.default_timer()
print(f"captions fetched in: {capFetchTime - vidFetchTime} secs")

# save those captions in a file, all of them in one
with open('Data/captions.txt', 'w', encoding='utf-8') as file:
  for video_captions in caption:
    for line in video_captions:
      file.write(line['text'] + ' ')
      
print('captions file saved successfully!')
writingTime = timeit.default_timer()
print(f"file written in: {writingTime - capFetchTime} secs")

end_time = timeit.default_timer()
totalTime = (end_time - start_time)
print(f"total time taken: {totalTime} secs")