YchKhan commited on
Commit
387f7f6
1 Parent(s): 3f549d5

Create scrape_3gpp.py

Browse files
Files changed (1) hide show
  1. scrape_3gpp.py +375 -0
scrape_3gpp.py ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ from bs4 import BeautifulSoup
4
+ from urllib.parse import urljoin
5
+ import pandas as pd
6
+ import numpy as np
7
+ import zipfile
8
+
9
+ def scrape(url, excel_file, folder_name):
10
+ filenames = []
11
+ # Check if the excel_file argument is provided and if the file exists.
12
+ if excel_file and os.path.exists(excel_file):
13
+ try:
14
+ df = pd.read_excel(excel_file)
15
+
16
+ # If 'Actions' in df.columns and filter based on it, and construct URLs from 'TDoc' or 'URL' columns
17
+ if 'Actions' in df.columns:
18
+ df = df[df['Actions'] == 'x']
19
+
20
+ elif 'File' in df.columns:
21
+ filenames = [f"{url}/{row['File']}.zip" for index, row in df.iterrows()]
22
+ elif 'URL' in df.columns:
23
+ filenames = df['URL'].tolist()
24
+ except Exception as e:
25
+ print(f"Error reading Excel file: {e}")
26
+ # Optionally, handle the error or return a message if needed
27
+
28
+ # If no Excel file is provided or found, or if it lacks 'TDoc'/'URL', the function can still continue with predefined URLs or other logic
29
+ download_directory = folder_name
30
+ if not os.path.exists(download_directory):
31
+ os.makedirs(download_directory)
32
+
33
+
34
+ if not filenames:
35
+ print("No Excel file provided, or no valid URLs found in the file.")
36
+ # You can either return here or continue with other predefined logic
37
+ response = requests.get(url)
38
+
39
+ # Analyser le contenu HTML de la page
40
+ soup = BeautifulSoup(response.content, "html.parser")
41
+
42
+ # Trouver tous les balises <a> avec des attributs href (liens)
43
+ links = soup.find_all("a", href=True)
44
+
45
+ # Filtrer les liens se terminant par ".zip"
46
+ zip_links = [link['href'] for link in links if link['href'].endswith('.zip')]
47
+
48
+ # Télécharger chaque fichier zip
49
+ for zip_link in zip_links:
50
+ # Construire l'URL absolue du fichier zip
51
+ absolute_url = urljoin(url, zip_link)
52
+
53
+ # Extraire le nom de fichier de l'URL
54
+ filename = os.path.basename(absolute_url)
55
+
56
+ # Chemin où le fichier sera enregistré
57
+ save_path = os.path.join(download_directory, filename)
58
+
59
+ # Envoyer une requête GET pour télécharger le fichier
60
+ with requests.get(absolute_url, stream=True) as r:
61
+ r.raise_for_status()
62
+ with open(save_path, 'wb') as f:
63
+ for chunk in r.iter_content(chunk_size=8192):
64
+ f.write(chunk)
65
+
66
+
67
+
68
+ else:
69
+ # Proceed with downloading files using the filenames list
70
+ for file_url in filenames:
71
+ filename = os.path.basename(file_url)
72
+ save_path = os.path.join(download_directory, filename)
73
+
74
+ try:
75
+ with requests.get(file_url, stream=True) as r:
76
+ r.raise_for_status()
77
+ with open(save_path, 'wb') as f:
78
+ for chunk in r.iter_content(chunk_size=8192):
79
+ f.write(chunk)
80
+ except requests.exceptions.HTTPError as e:
81
+ print(f"HTTP error occurred: {file_url}: {e}")
82
+ return False, "Il n'y a pas de colonne action ou alors celle ci n'est pas bien écrite, format attendu: 'Actions'"
83
+
84
+ return True, "Téléchargement terminé !"
85
+
86
+
87
+ def extractZip(folder_name):
88
+ # Répertoire où les fichiers zip sont déjà téléchargés
89
+ download_directory = folder_name
90
+ extract_directory = folder_name + " extraction" # Répertoire où le contenu des fichiers zip sera extrait
91
+
92
+ # Extraire le contenu de tous les fichiers zip dans le répertoire de téléchargement
93
+ for zip_file in os.listdir(download_directory):
94
+ zip_path = os.path.join(download_directory, zip_file)
95
+ # Vérifier si le fichier est un fichier zip
96
+ if zip_file.endswith(".zip"):
97
+ extract_dir = os.path.join(extract_directory, os.path.splitext(zip_file)[0]) # Supprimer l'extension .zip
98
+
99
+ # Vérifier si le fichier zip existe
100
+ if os.path.exists(zip_path):
101
+ # Créer un répertoire pour extraire le contenu s'il n'existe pas
102
+ if not os.path.exists(extract_dir):
103
+ os.makedirs(extract_dir)
104
+
105
+ # Extraire le contenu du fichier zip
106
+ print(f"Extraction en cours pour {zip_file}")
107
+ with zipfile.ZipFile(zip_path, 'r') as zip_ref:
108
+ zip_ref.extractall(extract_dir)
109
+
110
+ print(f"Extraction terminée pour {zip_file}")
111
+ else:
112
+ print(f"Fichier zip {zip_file} introuvable")
113
+
114
+ print("Toutes les extractions sont terminées !")
115
+
116
+ def excel3gpp(url):
117
+ response = requests.get(url)
118
+ response.raise_for_status() # This will raise an exception if there's an error
119
+
120
+ # Use BeautifulSoup to parse the HTML content
121
+ soup = BeautifulSoup(response.text, 'html.parser')
122
+
123
+ # Look for Excel file links; assuming they have .xlsx or .xls extensions
124
+ excel_links = [a['href'] for a in soup.find_all('a', href=True) if a['href'].endswith(('.xlsx', '.xls'))]
125
+
126
+ # Download the first Excel file found (if any)
127
+ if excel_links:
128
+ excel_url = excel_links[0] # Assuming you want the first Excel file
129
+ if not excel_url.startswith('http'):
130
+ excel_url = os.path.join(url, excel_url) # Handle relative URLs
131
+
132
+ # Download the Excel file
133
+ excel_response = requests.get(excel_url)
134
+ excel_response.raise_for_status()
135
+
136
+ # Define the path where you want to save the file
137
+ filename = excel_url.split('/')[-1]
138
+ filepath = os.path.join('path_to_save_directory', filename) # Replace 'path_to_save_directory' with your desired path
139
+
140
+ # Write the content of the Excel file to a local file
141
+ # Write the content of the Excel file to a local file named 'guide.xlsx'
142
+ filepath = 'guide.xlsx' # Directly specify the filename
143
+
144
+ with open(filepath, 'wb') as f:
145
+ f.write(excel_response.content)
146
+ print(f'Excel file downloaded and saved as: {filepath}')
147
+
148
+
149
+ def replace_line_breaks(text):
150
+ return text.replace("\n", "/n")
151
+
152
+ def remod_text(text):
153
+ return text.replace("/n", "\n")
154
+
155
+ def extractionPrincipale(url, excel_file=None):
156
+ folder_name = url.split("/")[-2]
157
+
158
+ result, message = scrape(url, excel_file, folder_name)
159
+ if result:
160
+ print("Success:", message)
161
+ else:
162
+ return(None, message)
163
+
164
+
165
+ extractZip(folder_name)
166
+ excel3gpp(url)
167
+
168
+
169
+ extract_directory = folder_name +" extraction"
170
+ categories = {
171
+ "Other": ["URL", "File", "Type", "Title", "Source", "Content"],
172
+ "CR": ["URL", "File", "Type", "Title", "Source", "Content"],
173
+ "pCR":["URL", "File", "Type", "Title", "Source", "Content"],
174
+ "LS": ["URL", "File", "Type", "Title", "Source", "Content"],
175
+ "WID": ["URL", "File", "Type", "Title", "Source", "Content"],
176
+ "SID": ["URL", "File", "Type", "Title", "Source", "Content"],
177
+ "DISCUSSION": ["URL", "File", "Type", "Title", "Source", "Content"],
178
+ "pdf": ["URL", "File", "Type", "Title", "Source", "Content"],
179
+ "ppt": ["URL", "File", "Type", "Title", "Source", "Content"],
180
+ "pptx": ["URL", "File", "Type", "Title", "Source", "Content"]
181
+ }
182
+
183
+ data = []
184
+ errors_count = 0
185
+ pre_title_section = None
186
+ for folder in os.listdir(extract_directory):
187
+ folder_path = os.path.join(extract_directory, folder)
188
+ if os.path.isdir(folder_path):
189
+ for file in os.listdir(folder_path):
190
+ if file == "__MACOSX":
191
+ continue
192
+ file_path = os.path.join(folder_path, file)
193
+ if file.endswith((".pptx", ".ppt", ".pdf", ".docx", ".doc", ".DOCX")):
194
+ try:
195
+ text = textract.process(file_path).decode('utf-8')
196
+ except Exception as e:
197
+ print(f"Error processing {file_path}: {e}")
198
+ errors_count += 1
199
+ continue
200
+
201
+ cleaned_text_lines = text.split('\n')
202
+ cleaned_text = '\n'.join([line.strip('|').strip() for line in cleaned_text_lines if line.strip()])
203
+
204
+ title = ""
205
+ debut = ""
206
+ sections = cleaned_text.split("Title:")
207
+ if len(sections) > 1:
208
+ pre_title_section = sections[0].strip().split()
209
+ title = sections[1].strip().split("\n")[0].strip()
210
+ debut = sections[0].strip()
211
+
212
+ category = "Other"
213
+ if file.endswith(".pdf"):
214
+ category = "pdf"
215
+ elif file.endswith((".ppt", ".pptx")):
216
+ category = "ppt" # assuming all ppt and pptx files go into the same category
217
+ elif "CHANGE REQUEST" in debut:
218
+ category = "CR"
219
+ elif "Discussion" in title:
220
+ category = "DISCUSSION"
221
+ elif "WID" in title:
222
+ category = "WID"
223
+ elif "SID" in title:
224
+ category = "SID"
225
+ elif "LS" in title:
226
+ category = "LS"
227
+ elif pre_title_section and pre_title_section[-1] == 'pCR':
228
+ category = "pCR"
229
+ elif "Pseudo-CR" in title:
230
+ category = "pCR"
231
+
232
+
233
+ contenu = "" # This will hold the concatenated content for 'Contenu' column
234
+ if category in categories:
235
+ columns = categories[category]
236
+ extracted_content = []
237
+ if category == "CR":
238
+ reason_for_change = ""
239
+ summary_of_change = ""
240
+ if len(sections) > 1:
241
+ reason_for_change = sections[1].split("Reason for change", 1)[-1].split("Summary of change")[0].strip()
242
+ summary_of_change = sections[1].split("Summary of change", 1)[-1].split("Consequences if not")[0].strip()
243
+ extracted_content.append(f"Reason for change: {reason_for_change}")
244
+ extracted_content.append(f"Summary of change: {summary_of_change}")
245
+ elif category == "pCR":
246
+ if len(sections) > 1:# Handle 'pCR' category-specific content extraction
247
+ pcr_specific_content = sections[1].split("Introduction", 1)[-1].split("First Change")[0].strip()
248
+ extracted_content.append(f"Introduction: {pcr_specific_content}")
249
+ elif category == "LS":
250
+ overall_review = ""
251
+ if len(sections) > 1:
252
+ overall_review = sections[1].split("Overall description", 1)[-1].strip()
253
+ extracted_content.append(f"Overall review: {overall_review}")
254
+ elif category in ["WID", "SID"]:
255
+ objective = ""
256
+ start_index = cleaned_text.find("Objective")
257
+ end_index = cleaned_text.find("Expected Output and Time scale")
258
+ if start_index != -1 and end_index != -1:
259
+ objective = cleaned_text[start_index + len("Objective"):end_index].strip()
260
+ extracted_content.append(f"Objective: {objective}")
261
+ elif category == "DISCUSSION":
262
+ Discussion = ""
263
+ extracted_text = replace_line_breaks(cleaned_text)
264
+ start_index_doc_for = extracted_text.find("Document for:")
265
+ if start_index_doc_for != -1:
266
+ start_index_word_after_doc_for = start_index_doc_for + len("Document for:")
267
+ end_index_word_after_doc_for = start_index_word_after_doc_for + extracted_text[start_index_word_after_doc_for:].find("/n")
268
+ word_after_doc_for = extracted_text[start_index_word_after_doc_for:end_index_word_after_doc_for].strip()
269
+ result_intro = ''
270
+ result_conclusion = ''
271
+ result_info = ''
272
+ if word_after_doc_for.lower() == "discussion":
273
+ start_index_intro = extracted_text.find("Introduction")
274
+ end_index_intro = extracted_text.find("Discussion", start_index_intro)
275
+
276
+ intro_text = ""
277
+ if start_index_intro != -1 and end_index_intro != -1:
278
+ intro_text = extracted_text[start_index_intro + len("Introduction"):end_index_intro].strip()
279
+ result_intro = remod_text(intro_text) # Convert back line breaks
280
+ else:
281
+ result_intro = "Introduction section not found."
282
+
283
+ # Attempt to find "Conclusion"
284
+ start_index_conclusion = extracted_text.find("Conclusion", end_index_intro)
285
+ end_index_conclusion = extracted_text.find("Proposal", start_index_conclusion if start_index_conclusion != -1 else end_index_intro)
286
+
287
+ conclusion_text = ""
288
+ if start_index_conclusion != -1 and end_index_conclusion != -1:
289
+ conclusion_text = extracted_text[start_index_conclusion + len("Conclusion"):end_index_conclusion].strip()
290
+ result_conclusion = remod_text(conclusion_text)
291
+ elif start_index_conclusion == -1: # Conclusion not found, look for Proposal directly
292
+ start_index_proposal = extracted_text.find("Proposal", end_index_intro)
293
+ if start_index_proposal != -1:
294
+ end_index_proposal = len(extracted_text) # Assuming "Proposal" section goes till the end if present
295
+ proposal_text = extracted_text[start_index_proposal + len("Proposal"):end_index_proposal].strip()
296
+ result_conclusion = remod_text(proposal_text) # Using "Proposal" content as "Conclusion"
297
+ else:
298
+ result_conclusion = "Conclusion/Proposal section not found."
299
+ else:
300
+ # Handle case where "Conclusion" exists but no "Proposal" to mark its end
301
+ conclusion_text = extracted_text[start_index_conclusion + len("Conclusion"):].strip()
302
+ result_conclusion = remod_text(conclusion_text)
303
+ Discussion=f"Introduction: {result_intro}\nConclusion/Proposal: {result_conclusion}"
304
+ elif word_after_doc_for.lower() == "information":
305
+ start_index_info = extracted_text.find(word_after_doc_for)
306
+ if start_index_info != -1:
307
+ info_to_end = extracted_text[start_index_info + len("Information"):].strip()
308
+ result_info = remod_text(info_to_end)
309
+ Discussion = f"Discussion:{result_info}"
310
+ else:
311
+ Discussion = "The word after 'Document for:' is not 'Discussion', 'DISCUSSION', 'Information', or 'INFORMATION'."
312
+ else:
313
+ Discussion = "The phrase 'Document for:' was not found."
314
+ # Since DISCUSSION category handling requires more specific processing, adapt as necessary
315
+ # Here's a simplified example
316
+ discussion_details = Discussion
317
+ extracted_content.append(discussion_details)
318
+ # Add more categories as needed
319
+ contenu = "\n".join(extracted_content)
320
+
321
+ # Assuming 'source' needs to be filled from the guide.xlsx mapping
322
+ # Placeholder for source value calculation
323
+ source = "" # Update this with actual source determination logic
324
+ status = ""
325
+ data.append([url+ "/" + folder + '.zip', folder , category, title, source,status, contenu])
326
+
327
+ # After processing all files and directories
328
+ # Read the guide.xlsx file into a DataFrame to map 'TDoc' to 'Source'
329
+ guide_df = pd.read_excel('guide.xlsx', usecols=['Source', 'TDoc','TDoc Status'])
330
+ tdoc_source_map = {row['TDoc']: row['Source'] for index, row in guide_df.iterrows()}
331
+ tdoc_status_map = {row['TDoc']: row['TDoc Status'] for index, row in guide_df.iterrows()}
332
+ # Update the 'Source' in your data based on matching 'Nom du fichier' with 'TDoc'
333
+ for item in data:
334
+ nom_du_fichier = item[1] # Assuming 'Nom du fichier' is the first item in your data list
335
+ if nom_du_fichier in tdoc_source_map:
336
+ item[4] = tdoc_source_map[nom_du_fichier] # Update the 'Source' field, assuming it's the fourth item
337
+ item[5] = tdoc_status_map[nom_du_fichier]
338
+
339
+
340
+
341
+
342
+ new_df_columns = ["URL", "File", "Type", "title", "Source", "Status", "Content"] # Create a DataFrame with the updated data
343
+ new_df = pd.DataFrame(data, columns=new_df_columns)
344
+ try:
345
+ old_df = pd.read_excel(excel_file)
346
+
347
+ # Check if 'Actions' column exists in the old DataFrame
348
+ if 'Actions' in old_df.columns:
349
+ # Assuming you want to update 'Content' in old_df for matching 'TDoc' values in 'File'
350
+ for index, new_row in new_df.iterrows():
351
+ # Find matching rows in old_df where 'TDoc' matches 'File' from new_df
352
+ match_indices = old_df[old_df['TDoc'] == new_row['File']].index
353
+ # Update 'Content' in old_df for matching rows
354
+ for i in match_indices:
355
+ old_df.at[i, 'Content'] = new_row['Content']
356
+ old_df.at[i, 'URL'] = new_row['URL']
357
+
358
+ df = old_df
359
+ ###placer la colonne content en 4eme position
360
+ # current_columns = df.columns.tolist()
361
+ # current_columns.remove('URL')
362
+ # # Insert 'Content' at the desired position
363
+ # new_columns_order = current_columns[:1] + ['URL'] + current_columns[3:]
364
+ # df = df[new_columns_order]
365
+ else:
366
+ # If 'Actions' column doesn't exist, simply concatenate the DataFrames
367
+ df = pd.concat([old_df, new_df], axis=0, ignore_index=True)
368
+ except Exception as e:
369
+ print("The provided excel file seems invalid:", e)
370
+ df = new_df
371
+
372
+ file_name = url.split("/")[-2] + ".xlsx"
373
+ # Save the updated DataFrame to Excel
374
+ df.to_excel(file_name, index=False)
375
+ return file_name, "Téléchargement réussi"