-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbasic_crawl.py
More file actions
92 lines (74 loc) · 2.77 KB
/
basic_crawl.py
File metadata and controls
92 lines (74 loc) · 2.77 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
"""
Crawl Example
This example demonstrates:
- Using the Crawl SDK to discover and scrape multiple pages
- Configuring crawl scope (domain, subdomain, or path)
- Setting limits and depth for crawling
- Using sitemap discovery
- Filtering pages with include/exclude paths
Site: Y Combinator Jobs (https://www.ycombinator.com/jobs)
"""
import asyncio
import json
import os
import sys
from dotenv import load_dotenv
from maxun import Crawl, Config, CrawlConfig
async def main():
crawler = Crawl(Config(
api_key=os.environ["MAXUN_API_KEY"],
base_url=os.environ.get("MAXUN_BASE_URL", "https://app.maxun.dev/api/sdk/"),
))
robot = await crawler.create(
"YC Companies Crawler",
"https://www.ycombinator.com/jobs",
CrawlConfig(
mode="domain",
limit=10,
max_depth=3,
include_paths=[],
exclude_paths=[],
use_sitemap=True,
follow_links=True,
respect_robots=True,
),
)
print(f"Crawl robot created: {robot.id}")
print("Starting crawl...")
result = await robot.run()
print("\n=== Crawl Completed ===")
print(f"Status: {result.get('status')}")
print(f"Run ID: {result.get('runId')}")
crawl_data = (result.get("data") or {}).get("crawlData")
if crawl_data:
if isinstance(crawl_data, dict):
all_pages = []
for value in crawl_data.values():
if isinstance(value, list):
all_pages.extend(value)
print(f"Pages crawled: {len(all_pages)}")
print("\nCrawled URLs:")
for i, page in enumerate(all_pages, 1):
url = (page.get("metadata") or {}).get("url") or page.get("url") or f"Page {i}"
print(f" {i}. {url}")
if (page.get("metadata") or {}).get("title"):
print(f" Title: {page['metadata']['title']}")
if page.get("wordCount"):
print(f" Words: {page['wordCount']}")
elif isinstance(crawl_data, list):
print(f"Pages crawled: {len(crawl_data)}")
print("\nCrawled URLs:")
for i, page in enumerate(crawl_data, 1):
url = (page.get("metadata") or {}).get("url") or page.get("url") or f"Page {i}"
print(f" {i}. {url}")
else:
print(f"Crawl data format: {type(crawl_data)}")
print(f"Crawl data: {json.dumps(crawl_data, indent=2)}")
else:
print("No crawl data found")
print(f"Result data keys: {list((result.get('data') or {}).keys())}")
load_dotenv()
if not os.environ.get("MAXUN_API_KEY"):
print("Error: MAXUN_API_KEY environment variable is required", file=sys.stderr)
sys.exit(1)
asyncio.run(main())