essays.py 4.04 KB
Newer Older
Baber's avatar
Baber committed
1
2
3
4
5
6
7
8
9
10
11
12
13
# Copyright (c) 2024, NVIDIA CORPORATION.  All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
Baber's avatar
Baber committed
14
import asyncio
Baber's avatar
Baber committed
15
16
17
import glob
import os
import shutil
Baber's avatar
Baber committed
18
from functools import cache
Baber's avatar
Baber committed
19
from typing import Dict
Baber's avatar
Baber committed
20
21

import html2text
Baber's avatar
Baber committed
22
import httpx
Baber's avatar
Baber committed
23
from bs4 import BeautifulSoup
Baber's avatar
Baber committed
24
from tqdm.asyncio import tqdm as async_tqdm
Baber's avatar
Baber committed
25
26


Baber's avatar
Baber committed
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
async def fetch_url(client: httpx.AsyncClient, url: str) -> str:
    response = await client.get(url)
    response.raise_for_status()
    return response.text


async def process_html_essay(
    client: httpx.AsyncClient, url: str, h: html2text.HTML2Text, temp_folder: str
) -> None:
    filename = url.split("/")[-1].replace(".html", ".txt")
    try:
        content = await fetch_url(client, url)
        soup = BeautifulSoup(content, "html.parser")
        specific_tag = soup.find("font")
        if specific_tag:
            parsed = h.handle(str(specific_tag))

            with open(
                os.path.join(temp_folder, filename), "w", encoding="utf-8"
            ) as file:
                file.write(parsed)
    except Exception as e:
        print(f"Failed to download {filename}: {str(e)}")


async def process_text_essay(
    client: httpx.AsyncClient, url: str, temp_folder: str
) -> None:
    filename = url.split("/")[-1]
    try:
        content = await fetch_url(client, url)
        with open(os.path.join(temp_folder, filename), "w", encoding="utf-8") as file:
            file.write(content)
    except Exception as e:
        print(f"Failed to download {filename}: {str(e)}")


async def get_essays() -> Dict[str, str]:
Baber's avatar
Baber committed
65
66
67
68
69
70
71
72
73
74
75
76
    temp_folder_repo = "essay_repo"
    temp_folder_html = "essay_html"
    os.makedirs(temp_folder_repo, exist_ok=True)
    os.makedirs(temp_folder_html, exist_ok=True)

    h = html2text.HTML2Text()
    h.ignore_images = True
    h.ignore_tables = True
    h.escape_all = True
    h.reference_links = False
    h.mark_code = False

Baber's avatar
Baber committed
77
    url_list = "https://raw.githubusercontent.com/NVIDIA/RULER/main/scripts/data/synthetic/json/PaulGrahamEssays_URLs.txt"
Baber's avatar
Baber committed
78

Baber's avatar
Baber committed
79
80
81
82
    async with httpx.AsyncClient(timeout=30.0, follow_redirects=True) as client:
        # Fetch URL list
        content = await fetch_url(client, url_list)
        urls = content.splitlines()
Baber's avatar
Baber committed
83

Baber's avatar
Baber committed
84
85
86
        # Separate HTML and text URLs
        html_urls = [url for url in urls if ".html" in url]
        text_urls = [url for url in urls if ".html" not in url]
Baber's avatar
Baber committed
87

Baber's avatar
Baber committed
88
89
90
91
92
        # Process HTML essays
        html_tasks = [
            process_html_essay(client, url, h, temp_folder_html) for url in html_urls
        ]
        await async_tqdm.gather(*html_tasks, desc="Downloading HTML essays")
Baber's avatar
Baber committed
93

Baber's avatar
Baber committed
94
95
96
97
98
        # Process text essays
        text_tasks = [
            process_text_essay(client, url, temp_folder_repo) for url in text_urls
        ]
        await async_tqdm.gather(*text_tasks, desc="Downloading text essays")
Baber's avatar
Baber committed
99

Baber's avatar
Baber committed
100
    # Collect results
Baber's avatar
Baber committed
101
102
103
    files_repo = sorted(glob.glob(os.path.join(temp_folder_repo, "*.txt")))
    files_html = sorted(glob.glob(os.path.join(temp_folder_html, "*.txt")))

Baber's avatar
Baber committed
104
105
106
107
108
109
    # print(
    #     f"Downloaded {len(files_repo)} essays from `https://github.com/gkamradt/LLMTest_NeedleInAHaystack/`"
    # )
    # print(f"Downloaded {len(files_html)} essays from `http://www.paulgraham.com/`")

    # Combine all texts
Baber's avatar
Baber committed
110
111
    text = ""
    for file in files_repo + files_html:
Baber's avatar
Baber committed
112
        with open(file, "r", encoding="utf-8") as f:
Baber's avatar
Baber committed
113
114
            text += f.read()

Baber's avatar
Baber committed
115
    # Cleanup
Baber's avatar
Baber committed
116
117
    shutil.rmtree(temp_folder_repo)
    shutil.rmtree(temp_folder_html)
Baber's avatar
Baber committed
118

Baber's avatar
Baber committed
119
120
    return {"text": text}

Baber's avatar
Baber committed
121

Baber's avatar
Baber committed
122
@cache
Baber's avatar
Baber committed
123
124
125
def get_all_essays() -> Dict[str, str]:
    """Synchronous wrapper for get_essays()"""
    return asyncio.run(get_essays())