File size: 15,938 Bytes
ee1df8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
import requests
from bs4 import BeautifulSoup
import time
import json
from datasets import Dataset
import re

# Base URLs for English and Japanese versions
BASE_EN = "https://developer.nvidia.com/blog"
BASE_JP = "https://developer.nvidia.com/ja-jp/blog"
JP_RECENT_POSTS = "https://developer.nvidia.com/ja-jp/blog/recent-posts/"

# Archive URLs by year
JP_ARCHIVES = {
    "2025": "https://developer.nvidia.com/ja-jp/blog/2025/",
    "2024": "https://developer.nvidia.com/ja-jp/blog/2024/",
    "2023": "https://developer.nvidia.com/ja-jp/blog/2023/",
    "2022": "https://developer.nvidia.com/ja-jp/blog/2022/",
    "2021": "https://developer.nvidia.com/ja-jp/blog/2021/",
    # Add more years if needed
}

# Custom headers (helps to mimic a browser)
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
    'Accept-Language': 'en-US,en;q=0.9,ja;q=0.8',
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
    'Connection': 'keep-alive',
    'Upgrade-Insecure-Requests': '1',
    'Cache-Control': 'max-age=0'
}

def get_article_content(url):
    """
    Fetches the content of an article given its URL.
    Returns a tuple: (title, content). 
    """
    try:
        print(f"Fetching content from: {url}")
        response = requests.get(url, headers=headers)
        if response.status_code != 200:
            print(f"Failed to fetch {url} (Status code: {response.status_code})")
            return None, None

        soup = BeautifulSoup(response.text, 'html.parser')

        # Find the title
        title_tag = soup.find("h1")
        title = title_tag.get_text(strip=True) if title_tag else "No title found"

        # Find the main content - try different possible content containers
        content_selectors = [
            "div.entry-content", 
            "div.post-content",
            "article.post",
            "div.blog-post-content"
        ]
        
        content = ""
        for selector in content_selectors:
            content_div = soup.select_one(selector)
            if content_div:
                # Remove any script, style elements, and navigation
                for element in content_div.find_all(['script', 'style', 'nav', 'footer']):
                    element.decompose()
                content = content_div.get_text(separator="\n", strip=True)
                break
        
        if not content:
            # Fallback: get the main content area
            main_content = soup.find("main") or soup.find("article") or soup.find("div", id="content")
            if main_content:
                for element in main_content.find_all(['script', 'style', 'nav', 'footer', 'header']):
                    element.decompose()
                content = main_content.get_text(separator="\n", strip=True)
            else:
                content = "No content found"
            
        return title, content

    except Exception as e:
        print(f"Exception occurred while fetching {url}: {e}")
        return None, None

def extract_article_links_from_page(soup):
    """
    Extract article links from a BeautifulSoup object
    """
    links = []
    links_found = False
    
    # Method 1: Look for article headings (h3 tags with links)
    article_headings = soup.find_all(["h2", "h3", "h4"])
    for heading in article_headings:
        link = heading.find("a", href=True)
        if link and link.get('href'):
            href = link.get('href')
            if '/blog/' in href:
                if href.startswith('/'):
                    href = f"https://developer.nvidia.com{href}"
                if href not in links:
                    links.append(href)
                    print(f"Found article from heading: {href}")
                    links_found = True
    
    # Method 2: Look for "投稿を見る" links
    view_post_links = soup.find_all("a", string=["投稿を見る", "記事を読む", "続きを読む"])
    for link in view_post_links:
        href = link.get('href')
        if href and '/blog/' in href:
            if href.startswith('/'):
                href = f"https://developer.nvidia.com{href}"
            if href not in links:
                links.append(href)
                print(f"Found article from view post link: {href}")
                links_found = True
    
    # Method 3: Look for all article links
    if not links_found:
        # Find all links that might be to articles
        all_post_links = soup.find_all("a", href=True)
        for link in all_post_links:
            href = link.get('href')
            if href and '/ja-jp/blog/' in href and not '/category/' in href and not '/recent-posts/' in href:
                # Skip archive links
                if any(f"/ja-jp/blog/{year}" in href for year in JP_ARCHIVES.keys()):
                    if href.count('/') > 5:  # This is likely an article, not just a year archive
                        if href.startswith('/'):
                            href = f"https://developer.nvidia.com{href}"
                        if href not in links:
                            links.append(href)
                            print(f"Found article: {href}")
                            links_found = True
                else:
                    if href.startswith('/'):
                        href = f"https://developer.nvidia.com{href}"
                    if href not in links:
                        links.append(href)
                        print(f"Found article: {href}")
                        links_found = True
    
    return links

def get_articles_from_archive(archive_url, num_articles=100):
    """
    Gets articles from a specific archive URL (like a year archive)
    """
    all_links = []
    page = 1
    
    while len(all_links) < num_articles:
        try:
            if page == 1:
                url = archive_url
            else:
                url = f"{archive_url}page/{page}/"
                
            print(f"Fetching archive page {page}: {url}")
            response = requests.get(url, headers=headers)
            
            if response.status_code != 200:
                print(f"Failed to fetch page {page}: {response.status_code}")
                break
                
            soup = BeautifulSoup(response.text, 'html.parser')
            
            # Extract article links from the page
            page_links = extract_article_links_from_page(soup)
            
            if not page_links:
                print(f"No articles found on page {page}")
                break
                
            # Add new links to our collection
            for link in page_links:
                if link not in all_links:
                    all_links.append(link)
                    if len(all_links) >= num_articles:
                        break
            
            if len(all_links) >= num_articles:
                break
                
            # Check if there's a next page
            next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next")
            if not next_page:
                print("No next page found")
                break
                
            page += 1
            time.sleep(2)  # Be polite
            
        except Exception as e:
            print(f"Error on page {page}: {e}")
            break
            
    return all_links[:num_articles]

def get_japanese_articles(num_articles=1000):
    """
    Gets Japanese articles from all available sources
    """
    all_links = []
    
    # First, get articles from the recent posts page
    print("Getting articles from recent posts page...")
    recent_links = get_articles_from_archive(JP_RECENT_POSTS, num_articles)
    all_links.extend(recent_links)
    print(f"Found {len(recent_links)} articles from recent posts")
    
    # If we need more articles, go through the yearly archives
    if len(all_links) < num_articles:
        for year, url in JP_ARCHIVES.items():
            if len(all_links) >= num_articles:
                break
                
            print(f"\nGetting articles from {year} archive...")
            year_links = get_articles_from_archive(url, num_articles - len(all_links))
            
            # Add new links to our collection
            for link in year_links:
                if link not in all_links:
                    all_links.append(link)
                    if len(all_links) >= num_articles:
                        break
                        
            print(f"Found {len(year_links)} articles from {year}")
            time.sleep(2)  # Be polite between years
    
    return all_links[:num_articles]

def get_blog_posts(lang='ja-jp', num_articles=100):
    """
    Gets blog posts using a different approach - directly searching for post links
    """
    if lang == 'ja-jp':
        # For Japanese, use our specialized function
        return get_japanese_articles(num_articles)
    else:
        # For English, use the regular approach
        base_url = BASE_EN
        all_links = []
        page = 1
        
        while len(all_links) < num_articles:
            try:
                if page == 1:
                    url = base_url
                else:
                    url = f"{base_url}/page/{page}"
                    
                print(f"Fetching blog listing page {page}: {url}")
                response = requests.get(url, headers=headers)
                
                if response.status_code != 200:
                    print(f"Failed to fetch page {page}: {response.status_code}")
                    break
                    
                soup = BeautifulSoup(response.text, 'html.parser')
                
                # Look for blog post links - try different selectors
                links_found = False
                
                # Method 1: Look for article elements
                articles = soup.find_all("article")
                if articles:
                    for article in articles:
                        links = article.find_all("a", href=True)
                        for link in links:
                            href = link['href']
                            if '/blog/' in href:
                                if href.startswith('/'):
                                    href = f"https://developer.nvidia.com{href}"
                                if href not in all_links:
                                    all_links.append(href)
                                    print(f"Found article: {href}")
                                    links_found = True
                
                # Method 2: Look for blog post cards or listings
                if not links_found:
                    post_links = soup.select("a.blog-post-link, a.post-link, .post-title a, .entry-title a")
                    for link in post_links:
                        href = link.get('href')
                        if href and '/blog/' in href:
                            if href.startswith('/'):
                                href = f"https://developer.nvidia.com{href}"
                            if href not in all_links:
                                all_links.append(href)
                                print(f"Found article: {href}")
                                links_found = True
                
                # Method 3: Find all links that might be blog posts
                if not links_found:
                    all_post_links = soup.find_all("a", href=True)
                    for link in all_post_links:
                        href = link.get('href')
                        if href and '/blog/' in href and not href.endswith('/page/') and not '/category/' in href:
                            if href.startswith('/'):
                                href = f"https://developer.nvidia.com{href}"
                            if href not in all_links:
                                all_links.append(href)
                                print(f"Found article: {href}")
                                links_found = True
                
                if not links_found:
                    print(f"No articles found on page {page}")
                    break
                
                if len(all_links) >= num_articles:
                    break
                    
                # Check if there's a next page
                next_page = soup.select_one("a.next, .pagination .next a, .nav-links .next")
                if not next_page:
                    print("No next page found")
                    break
                    
                page += 1
                time.sleep(2)  # Be polite
                
            except Exception as e:
                print(f"Error on page {page}: {e}")
                break
                
        return all_links[:num_articles]

def get_corresponding_url(url, from_lang='ja-jp', to_lang='en-us'):
    """
    Convert URL between languages
    """
    if from_lang == 'ja-jp' and '/ja-jp/' in url:
        return url.replace('/ja-jp/', '/')
    elif from_lang == 'en-us' and '/blog/' in url:
        return url.replace('/blog/', '/ja-jp/blog/')
    return url

if __name__ == '__main__':
    num_articles = 1000  # Adjust as needed

    # Try a different approach - get Japanese articles first
    print("Getting Japanese blog posts...")
    jp_links = get_blog_posts(lang='ja-jp', num_articles=num_articles)
    print(f"Found {len(jp_links)} Japanese articles")
    
    article_pairs = []
    
    for jp_link in jp_links:
        try:
            print(f"\nProcessing Japanese article: {jp_link}")
            
            # Get corresponding English URL
            en_link = get_corresponding_url(jp_link, from_lang='ja-jp', to_lang='en-us')
            print(f"Corresponding English URL: {en_link}")
            
            # Get contents from both versions
            jp_title, jp_content = get_article_content(jp_link)
            
            if not jp_title or not jp_content:
                print("Skipping due to missing Japanese content")
                continue
                
            en_title, en_content = get_article_content(en_link)
            
            if not en_title or not en_content:
                print("Skipping due to missing English content")
                continue
            
            # If both pages were fetched successfully, save the pair
            article_pairs.append({
                'en_url': en_link,
                'en_title': en_title,
                'en_content': en_content,
                'jp_url': jp_link,
                'jp_title': jp_title,
                'jp_content': jp_content,
            })
            print(f"Successfully paired: {jp_title}")
            
            # Save progress periodically
            if len(article_pairs) % 10 == 0:
                print(f"Saving progress with {len(article_pairs)} pairs...")
                temp_dataset = Dataset.from_list(article_pairs)
                temp_dataset.to_json(f"nvidia_ja_jp_en_us_dev_blog_dataset_partial_{len(article_pairs)}.jsonl", 
                                    orient="records", force_ascii=False)
            
        except Exception as e:
            print(f"Error processing article pair: {e}")
        
        # Be polite to the server
        time.sleep(2)

    print(f"\nCollected {len(article_pairs)} article pairs")
    
    # Create and save dataset
    if article_pairs:
        hf_dataset = Dataset.from_list(article_pairs)
        
        # Save as Hugging Face dataset
        hf_dataset.save_to_disk("nvidia_ja_jp_dev_blog_dataset")
        
        # Save as JSONL
        hf_dataset.to_json("nvidia_ja_jp_en_us_dev_blog_dataset.jsonl", orient="records", force_ascii=False)
        
        print("Dataset saved successfully")
    else:
        print("No article pairs collected")