forked from unclecode/crawl4ai
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.py
160 lines (134 loc) · 5.55 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
from fastapi import FastAPI, HTTPException, Request
from fastapi.responses import HTMLResponse
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse
from pydantic import BaseModel, HttpUrl
from typing import List, Optional
from crawl4ai.web_crawler import WebCrawler
from crawl4ai.models import UrlModel
import asyncio
from concurrent.futures import ThreadPoolExecutor, as_completed
import chromedriver_autoinstaller
from functools import lru_cache
from crawl4ai.database import get_total_count, clear_db
import os
import uuid
# Task management
tasks = {}
# Configuration
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
MAX_CONCURRENT_REQUESTS = 10 # Adjust this to change the maximum concurrent requests
current_requests = 0
lock = asyncio.Lock()
app = FastAPI()
# Mount the pages directory as a static directory
app.mount("/pages", StaticFiles(directory=__location__ + "/pages"), name="pages")
chromedriver_autoinstaller.install() # Ensure chromedriver is installed
class UrlsInput(BaseModel):
urls: List[HttpUrl]
provider_model: str
api_token: str
include_raw_html: Optional[bool] = False
forced: bool = False
extract_blocks: bool = True
word_count_threshold: Optional[int] = 5
@lru_cache()
def get_crawler():
# Initialize and return a WebCrawler instance
return WebCrawler(db_path='crawler_data.db')
@app.get("/", response_class=HTMLResponse)
async def read_index():
with open(f"{__location__}/pages/index.html", "r") as file:
html_content = file.read()
return HTMLResponse(content=html_content, status_code=200)
@app.get("/total-count")
async def get_total_url_count():
count = get_total_count(db_path='crawler_data.db')
return JSONResponse(content={"count": count})
# Add endpoit to clear db
@app.get("/clear-db")
async def clear_database():
clear_db(db_path='crawler_data.db')
return JSONResponse(content={"message": "Database cleared."})
@app.post("/crawl")
async def crawl_urls(urls_input: UrlsInput, request: Request):
global current_requests
# Raise error if api_token is not provided
if not urls_input.api_token:
raise HTTPException(status_code=401, detail="API token is required.")
async with lock:
if current_requests >= MAX_CONCURRENT_REQUESTS:
raise HTTPException(status_code=429, detail="Too many requests - please try again later.")
current_requests += 1
try:
# Prepare URL models for crawling
url_models = [UrlModel(url=url, forced=urls_input.forced) for url in urls_input.urls]
# Use ThreadPoolExecutor to run the synchronous WebCrawler in async manner
with ThreadPoolExecutor() as executor:
loop = asyncio.get_event_loop()
futures = [
loop.run_in_executor(executor, get_crawler().fetch_page, url_model, urls_input.provider_model, urls_input.api_token, urls_input.extract_blocks, urls_input.word_count_threshold)
for url_model in url_models
]
results = await asyncio.gather(*futures)
# if include_raw_html is False, remove the raw HTML content from the results
if not urls_input.include_raw_html:
for result in results:
result.html = None
return {"results": [result.dict() for result in results]}
finally:
async with lock:
current_requests -= 1
@app.post("/crawl_async")
async def crawl_urls(urls_input: UrlsInput, request: Request):
global current_requests
if not urls_input.api_token:
raise HTTPException(status_code=401, detail="API token is required.")
async with lock:
if current_requests >= MAX_CONCURRENT_REQUESTS:
raise HTTPException(status_code=429, detail="Too many requests - please try again later.")
current_requests += 1
task_id = str(uuid.uuid4())
tasks[task_id] = {"status": "pending", "results": None}
try:
url_models = [UrlModel(url=url, forced=urls_input.forced) for url in urls_input.urls]
loop = asyncio.get_running_loop()
loop.create_task(
process_crawl_task(url_models, urls_input.provider_model, urls_input.api_token, task_id, urls_input.extract_blocks)
)
return {"task_id": task_id}
finally:
async with lock:
current_requests -= 1
async def process_crawl_task(url_models, provider, api_token, task_id, extract_blocks_flag):
try:
with ThreadPoolExecutor() as executor:
loop = asyncio.get_running_loop()
futures = [
loop.run_in_executor(executor, get_crawler().fetch_page, url_model, provider, api_token, extract_blocks_flag)
for url_model in url_models
]
results = await asyncio.gather(*futures)
tasks[task_id] = {"status": "done", "results": results}
except Exception as e:
tasks[task_id] = {"status": "failed", "error": str(e)}
@app.get("/task/{task_id}")
async def get_task_status(task_id: str):
task = tasks.get(task_id)
if not task:
raise HTTPException(status_code=404, detail="Task not found")
if task['status'] == 'done':
return {
"status": task['status'],
"results": [result.dict() for result in task['results']]
}
elif task['status'] == 'failed':
return {
"status": task['status'],
"error": task['error']
}
else:
return {"status": task['status']}
if __name__ == "__main__":
import uvicorn
uvicorn.run(app, host="0.0.0.0", port=8000)