|
import requests |
|
from bs4 import BeautifulSoup |
|
import time |
|
import json |
|
from datasets import Dataset |
|
import re |
|
|
|
|
|
BASE_EN = "https://developer.nvidia.com/blog" |
|
BASE_JP = "https://developer.nvidia.com/ja-jp/blog" |
|
JP_RECENT_POSTS = "https://developer.nvidia.com/ja-jp/blog/recent-posts/" |
|
|
|
|
|
JP_ARCHIVES = { |
|
"2025": "https://developer.nvidia.com/ja-jp/blog/2025/", |
|
"2024": "https://developer.nvidia.com/ja-jp/blog/2024/", |
|
"2023": "https://developer.nvidia.com/ja-jp/blog/2023/", |
|
"2022": "https://developer.nvidia.com/ja-jp/blog/2022/", |
|
"2021": "https://developer.nvidia.com/ja-jp/blog/2021/", |
|
|
|
} |
|
|
|
|
|
headers = { |
|
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36', |
|
'Accept-Language': 'en-US,en;q=0.9,ja;q=0.8', |
|
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8', |
|
'Connection': 'keep-alive', |
|
'Upgrade-Insecure-Requests': '1', |
|
'Cache-Control': 'max-age=0' |
|
} |
|
|
|
def get_article_content(url): |
|
""" |
|
Fetches the content of an article given its URL. |
|
Returns a tuple: (title, content). |
|
""" |
|
try: |
|
print(f"Fetching content from: {url}") |
|
response = requests.get(url, headers=headers) |
|
if response.status_code != 200: |
|
print(f"Failed to fetch {url} (Status code: {response.status_code})") |
|
return None, None |
|
|
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
title_tag = soup.find("h1") |
|
title = title_tag.get_text(strip=True) if title_tag else "No title found" |
|
|
|
|
|
content_selectors = [ |
|
"div.entry-content", |
|
"div.post-content", |
|
"article.post", |
|
"div.blog-post-content" |
|
] |
|
|
|
content = "" |
|
for selector in content_selectors: |
|
content_div = soup.select_one(selector) |
|
if content_div: |
|
|
|
for element in content_div.find_all(['script', 'style', 'nav', 'footer']): |
|
element.decompose() |
|
content = content_div.get_text(separator="\n", strip=True) |
|
break |
|
|
|
if not content: |
|
|
|
main_content = soup.find("main") or soup.find("article") or soup.find("div", id="content") |
|
if main_content: |
|
for element in main_content.find_all(['script', 'style', 'nav', 'footer', 'header']): |
|
element.decompose() |
|
content = main_content.get_text(separator="\n", strip=True) |
|
else: |
|
content = "No content found" |
|
|
|
return title, content |
|
|
|
except Exception as e: |
|
print(f"Exception occurred while fetching {url}: {e}") |
|
return None, None |
|
|
|
def extract_article_links_from_page(soup): |
|
""" |
|
Extract article links from a BeautifulSoup object |
|
""" |
|
links = [] |
|
links_found = False |
|
|
|
|
|
article_headings = soup.find_all(["h2", "h3", "h4"]) |
|
for heading in article_headings: |
|
link = heading.find("a", href=True) |
|
if link and link.get('href'): |
|
href = link.get('href') |
|
if '/blog/' in href: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in links: |
|
links.append(href) |
|
print(f"Found article from heading: {href}") |
|
links_found = True |
|
|
|
|
|
view_post_links = soup.find_all("a", string=["投稿を見る", "記事を読む", "続きを読む"]) |
|
for link in view_post_links: |
|
href = link.get('href') |
|
if href and '/blog/' in href: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in links: |
|
links.append(href) |
|
print(f"Found article from view post link: {href}") |
|
links_found = True |
|
|
|
|
|
if not links_found: |
|
|
|
all_post_links = soup.find_all("a", href=True) |
|
for link in all_post_links: |
|
href = link.get('href') |
|
if href and '/ja-jp/blog/' in href and not '/category/' in href and not '/recent-posts/' in href: |
|
|
|
if any(f"/ja-jp/blog/{year}" in href for year in JP_ARCHIVES.keys()): |
|
if href.count('/') > 5: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in links: |
|
links.append(href) |
|
print(f"Found article: {href}") |
|
links_found = True |
|
else: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in links: |
|
links.append(href) |
|
print(f"Found article: {href}") |
|
links_found = True |
|
|
|
return links |
|
|
|
def get_articles_from_archive(archive_url, num_articles=100): |
|
""" |
|
Gets articles from a specific archive URL (like a year archive) |
|
""" |
|
all_links = [] |
|
page = 1 |
|
|
|
while len(all_links) < num_articles: |
|
try: |
|
if page == 1: |
|
url = archive_url |
|
else: |
|
url = f"{archive_url}page/{page}/" |
|
|
|
print(f"Fetching archive page {page}: {url}") |
|
response = requests.get(url, headers=headers) |
|
|
|
if response.status_code != 200: |
|
print(f"Failed to fetch page {page}: {response.status_code}") |
|
break |
|
|
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
page_links = extract_article_links_from_page(soup) |
|
|
|
if not page_links: |
|
print(f"No articles found on page {page}") |
|
break |
|
|
|
|
|
for link in page_links: |
|
if link not in all_links: |
|
all_links.append(link) |
|
if len(all_links) >= num_articles: |
|
break |
|
|
|
if len(all_links) >= num_articles: |
|
break |
|
|
|
|
|
next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next") |
|
if not next_page: |
|
print("No next page found") |
|
break |
|
|
|
page += 1 |
|
time.sleep(2) |
|
|
|
except Exception as e: |
|
print(f"Error on page {page}: {e}") |
|
break |
|
|
|
return all_links[:num_articles] |
|
|
|
def get_japanese_articles(num_articles=1000): |
|
""" |
|
Gets Japanese articles from all available sources |
|
""" |
|
all_links = [] |
|
|
|
|
|
print("Getting articles from recent posts page...") |
|
recent_links = get_articles_from_archive(JP_RECENT_POSTS, num_articles) |
|
all_links.extend(recent_links) |
|
print(f"Found {len(recent_links)} articles from recent posts") |
|
|
|
|
|
if len(all_links) < num_articles: |
|
for year, url in JP_ARCHIVES.items(): |
|
if len(all_links) >= num_articles: |
|
break |
|
|
|
print(f"\nGetting articles from {year} archive...") |
|
year_links = get_articles_from_archive(url, num_articles - len(all_links)) |
|
|
|
|
|
for link in year_links: |
|
if link not in all_links: |
|
all_links.append(link) |
|
if len(all_links) >= num_articles: |
|
break |
|
|
|
print(f"Found {len(year_links)} articles from {year}") |
|
time.sleep(2) |
|
|
|
return all_links[:num_articles] |
|
|
|
def get_blog_posts(lang='ja-jp', num_articles=100): |
|
""" |
|
Gets blog posts using a different approach - directly searching for post links |
|
""" |
|
if lang == 'ja-jp': |
|
|
|
return get_japanese_articles(num_articles) |
|
else: |
|
|
|
base_url = BASE_EN |
|
all_links = [] |
|
page = 1 |
|
|
|
while len(all_links) < num_articles: |
|
try: |
|
if page == 1: |
|
url = base_url |
|
else: |
|
url = f"{base_url}/page/{page}" |
|
|
|
print(f"Fetching blog listing page {page}: {url}") |
|
response = requests.get(url, headers=headers) |
|
|
|
if response.status_code != 200: |
|
print(f"Failed to fetch page {page}: {response.status_code}") |
|
break |
|
|
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
links_found = False |
|
|
|
|
|
articles = soup.find_all("article") |
|
if articles: |
|
for article in articles: |
|
links = article.find_all("a", href=True) |
|
for link in links: |
|
href = link['href'] |
|
if '/blog/' in href: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in all_links: |
|
all_links.append(href) |
|
print(f"Found article: {href}") |
|
links_found = True |
|
|
|
|
|
if not links_found: |
|
post_links = soup.select("a.blog-post-link, a.post-link, .post-title a, .entry-title a") |
|
for link in post_links: |
|
href = link.get('href') |
|
if href and '/blog/' in href: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in all_links: |
|
all_links.append(href) |
|
print(f"Found article: {href}") |
|
links_found = True |
|
|
|
|
|
if not links_found: |
|
all_post_links = soup.find_all("a", href=True) |
|
for link in all_post_links: |
|
href = link.get('href') |
|
if href and '/blog/' in href and not href.endswith('/page/') and not '/category/' in href: |
|
if href.startswith('/'): |
|
href = f"https://developer.nvidia.com{href}" |
|
if href not in all_links: |
|
all_links.append(href) |
|
print(f"Found article: {href}") |
|
links_found = True |
|
|
|
if not links_found: |
|
print(f"No articles found on page {page}") |
|
break |
|
|
|
if len(all_links) >= num_articles: |
|
break |
|
|
|
|
|
next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next") |
|
if not next_page: |
|
print("No next page found") |
|
break |
|
|
|
page += 1 |
|
time.sleep(2) |
|
|
|
except Exception as e: |
|
print(f"Error on page {page}: {e}") |
|
break |
|
|
|
return all_links[:num_articles] |
|
|
|
def get_corresponding_url(url, from_lang='ja-jp', to_lang='en-us'): |
|
""" |
|
Convert URL between languages |
|
""" |
|
if from_lang == 'ja-jp' and '/ja-jp/' in url: |
|
return url.replace('/ja-jp/', '/') |
|
elif from_lang == 'en-us' and '/blog/' in url: |
|
return url.replace('/blog/', '/ja-jp/blog/') |
|
return url |
|
|
|
if __name__ == '__main__': |
|
num_articles = 1000 |
|
|
|
|
|
print("Getting Japanese blog posts...") |
|
jp_links = get_blog_posts(lang='ja-jp', num_articles=num_articles) |
|
print(f"Found {len(jp_links)} Japanese articles") |
|
|
|
article_pairs = [] |
|
|
|
for jp_link in jp_links: |
|
try: |
|
print(f"\nProcessing Japanese article: {jp_link}") |
|
|
|
|
|
en_link = get_corresponding_url(jp_link, from_lang='ja-jp', to_lang='en-us') |
|
print(f"Corresponding English URL: {en_link}") |
|
|
|
|
|
jp_title, jp_content = get_article_content(jp_link) |
|
|
|
if not jp_title or not jp_content: |
|
print("Skipping due to missing Japanese content") |
|
continue |
|
|
|
en_title, en_content = get_article_content(en_link) |
|
|
|
if not en_title or not en_content: |
|
print("Skipping due to missing English content") |
|
continue |
|
|
|
|
|
article_pairs.append({ |
|
'en_url': en_link, |
|
'en_title': en_title, |
|
'en_content': en_content, |
|
'jp_url': jp_link, |
|
'jp_title': jp_title, |
|
'jp_content': jp_content, |
|
}) |
|
print(f"Successfully paired: {jp_title}") |
|
|
|
|
|
if len(article_pairs) % 10 == 0: |
|
print(f"Saving progress with {len(article_pairs)} pairs...") |
|
temp_dataset = Dataset.from_list(article_pairs) |
|
temp_dataset.to_json(f"nvidia_ja_jp_en_us_dev_blog_dataset_partial_{len(article_pairs)}.jsonl", |
|
orient="records", force_ascii=False) |
|
|
|
except Exception as e: |
|
print(f"Error processing article pair: {e}") |
|
|
|
|
|
time.sleep(2) |
|
|
|
print(f"\nCollected {len(article_pairs)} article pairs") |
|
|
|
|
|
if article_pairs: |
|
hf_dataset = Dataset.from_list(article_pairs) |
|
|
|
|
|
hf_dataset.save_to_disk("nvidia_ja_jp_dev_blog_dataset") |
|
|
|
|
|
hf_dataset.to_json("nvidia_ja_jp_en_us_dev_blog_dataset.jsonl", orient="records", force_ascii=False) |
|
|
|
print("Dataset saved successfully") |
|
else: |
|
print("No article pairs collected") |
|
|
|
|