Skip to content

Commit 7360372

Browse files
committed
Add requests-compatible API for drop-in replacement
- Add src/scrappey/requests.py with Response, Session, and requests-like API - Support get(), post(), put(), delete(), patch(), head(), options() - Response object mimics requests.Response (text, json(), status_code, etc.) - Session class for cookie persistence across requests - Map requests params to Scrappey options (headers, proxies, cookies, etc.) - Warn on unsupported parameters (files, auth, stream, verify) - Add examples/python/requests_example.py with comprehensive examples - Update README with migration guide and usage documentation - Export requests module from scrappey package
1 parent 40da270 commit 7360372

3 files changed

Lines changed: 1196 additions & 1 deletion

File tree

Lines changed: 256 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,256 @@
1+
"""
2+
Scrappey requests-compatible API example.
3+
4+
This example shows how to use Scrappey as a drop-in replacement for the
5+
popular `requests` library. Simply change your import and your existing
6+
code will work with Scrappey's Cloudflare bypass and antibot capabilities!
7+
8+
Requirements:
9+
pip install scrappey
10+
11+
Environment:
12+
Set SCRAPPEY_API_KEY environment variable with your API key
13+
"""
14+
15+
import os
16+
17+
# =============================================================================
18+
# MIGRATION EXAMPLE
19+
# =============================================================================
20+
#
21+
# Before (using requests):
22+
# import requests
23+
# response = requests.get("https://example.com")
24+
#
25+
# After (using Scrappey):
26+
# from scrappey import requests
27+
# response = requests.get("https://example.com")
28+
#
29+
# That's it! Your code now uses Scrappey for automatic Cloudflare bypass!
30+
# =============================================================================
31+
32+
from scrappey import requests
33+
34+
# Ensure API key is set
35+
if not os.environ.get("SCRAPPEY_API_KEY"):
36+
print("Please set SCRAPPEY_API_KEY environment variable")
37+
print("Example: export SCRAPPEY_API_KEY=your_api_key")
38+
exit(1)
39+
40+
41+
def basic_get_example():
42+
"""Basic GET request - works exactly like requests.get()"""
43+
print("\n=== Basic GET Request ===\n")
44+
45+
response = requests.get("https://httpbin.org/get")
46+
47+
print(f"Status Code: {response.status_code}")
48+
print(f"OK: {response.ok}")
49+
print(f"URL: {response.url}")
50+
print(f"Encoding: {response.encoding}")
51+
print(f"Elapsed: {response.elapsed}")
52+
print(f"Headers: {dict(list(response.headers.items())[:3])}...") # First 3 headers
53+
54+
# Access response body
55+
print(f"\nResponse text (first 200 chars):\n{response.text[:200]}...")
56+
57+
58+
def get_with_params_example():
59+
"""GET request with query parameters"""
60+
print("\n=== GET with Query Parameters ===\n")
61+
62+
response = requests.get(
63+
"https://httpbin.org/get",
64+
params={"name": "scrappey", "version": "1.0"},
65+
)
66+
67+
print(f"URL with params: {response.url}")
68+
data = response.json()
69+
print(f"Server received args: {data.get('args', {})}")
70+
71+
72+
def post_form_example():
73+
"""POST request with form data"""
74+
print("\n=== POST with Form Data ===\n")
75+
76+
response = requests.post(
77+
"https://httpbin.org/post",
78+
data={"username": "testuser", "password": "testpass"},
79+
)
80+
81+
print(f"Status Code: {response.status_code}")
82+
data = response.json()
83+
print(f"Server received form: {data.get('form', {})}")
84+
85+
86+
def post_json_example():
87+
"""POST request with JSON data"""
88+
print("\n=== POST with JSON Data ===\n")
89+
90+
response = requests.post(
91+
"https://httpbin.org/post",
92+
json={"key": "value", "nested": {"data": [1, 2, 3]}},
93+
)
94+
95+
print(f"Status Code: {response.status_code}")
96+
data = response.json()
97+
print(f"Server received JSON: {data.get('json', {})}")
98+
99+
100+
def custom_headers_example():
101+
"""Request with custom headers"""
102+
print("\n=== Custom Headers ===\n")
103+
104+
response = requests.get(
105+
"https://httpbin.org/headers",
106+
headers={
107+
"X-Custom-Header": "my-value",
108+
"Accept": "application/json",
109+
},
110+
)
111+
112+
print(f"Status Code: {response.status_code}")
113+
data = response.json()
114+
print(f"Server saw headers: {data.get('headers', {})}")
115+
116+
117+
def session_example():
118+
"""Using sessions for cookie persistence"""
119+
print("\n=== Session with Cookie Persistence ===\n")
120+
121+
# Create a session - cookies persist across requests
122+
session = requests.Session()
123+
124+
try:
125+
# Set a cookie via the server
126+
print("Setting cookie via /cookies/set...")
127+
session.get("https://httpbin.org/cookies/set/session_id/abc123")
128+
129+
# Verify cookie is sent in subsequent request
130+
print("Verifying cookie in next request...")
131+
response = session.get("https://httpbin.org/cookies")
132+
133+
data = response.json()
134+
print(f"Cookies in session: {data.get('cookies', {})}")
135+
136+
# Session-level headers persist too
137+
session.headers.update({"X-Session-Header": "persistent"})
138+
response = session.get("https://httpbin.org/headers")
139+
data = response.json()
140+
print(f"Session header present: {'X-Session-Header' in str(data)}")
141+
142+
finally:
143+
# Always close the session to clean up Scrappey resources
144+
session.close()
145+
print("Session closed")
146+
147+
148+
def response_methods_example():
149+
"""Demonstrate Response object methods"""
150+
print("\n=== Response Object Methods ===\n")
151+
152+
response = requests.get("https://httpbin.org/json")
153+
154+
# .json() method
155+
data = response.json()
156+
print(f"JSON data: {data}")
157+
158+
# .raise_for_status() - doesn't raise for 200
159+
try:
160+
response.raise_for_status()
161+
print("No error raised for 200 status")
162+
except requests.HTTPError as e:
163+
print(f"Error: {e}")
164+
165+
# Test with error status
166+
print("\nTesting 404 response...")
167+
error_response = requests.get("https://httpbin.org/status/404")
168+
print(f"Status: {error_response.status_code}")
169+
print(f"OK: {error_response.ok}")
170+
171+
try:
172+
error_response.raise_for_status()
173+
except requests.HTTPError as e:
174+
print(f"HTTPError raised: {e}")
175+
176+
177+
def cookies_example():
178+
"""Sending cookies with request"""
179+
print("\n=== Cookies ===\n")
180+
181+
response = requests.get(
182+
"https://httpbin.org/cookies",
183+
cookies={"my_cookie": "cookie_value", "another": "value2"},
184+
)
185+
186+
data = response.json()
187+
print(f"Server received cookies: {data.get('cookies', {})}")
188+
189+
# Cookies from response
190+
print(f"Response cookies: {response.cookies.get_dict()}")
191+
192+
193+
def error_handling_example():
194+
"""Error handling"""
195+
print("\n=== Error Handling ===\n")
196+
197+
try:
198+
response = requests.get("https://httpbin.org/status/500")
199+
response.raise_for_status()
200+
except requests.HTTPError as e:
201+
print(f"HTTP Error: {e}")
202+
print(f"Response status: {e.response.status_code}")
203+
204+
# Connection errors are also caught
205+
# try:
206+
# response = requests.get("https://invalid-domain-that-does-not-exist.com")
207+
# except requests.ConnectionError as e:
208+
# print(f"Connection Error: {e}")
209+
210+
211+
def cloudflare_protected_site():
212+
"""
213+
Example: Accessing a Cloudflare-protected site.
214+
215+
This is where Scrappey shines! Sites protected by Cloudflare, Datadome,
216+
PerimeterX, etc. are automatically bypassed.
217+
"""
218+
print("\n=== Cloudflare Protected Site ===\n")
219+
220+
# This would fail with regular requests, but works with Scrappey!
221+
response = requests.get("https://nowsecure.nl/") # Known CF-protected site
222+
223+
print(f"Status Code: {response.status_code}")
224+
print(f"Successfully bypassed protection: {response.ok}")
225+
print(f"Response length: {len(response.text)} characters")
226+
227+
228+
def main():
229+
"""Run all examples."""
230+
print("=" * 60)
231+
print("Scrappey requests-compatible API Examples")
232+
print("=" * 60)
233+
234+
# Basic examples
235+
basic_get_example()
236+
get_with_params_example()
237+
post_form_example()
238+
post_json_example()
239+
custom_headers_example()
240+
cookies_example()
241+
response_methods_example()
242+
error_handling_example()
243+
244+
# Session example
245+
session_example()
246+
247+
# Cloudflare bypass example
248+
cloudflare_protected_site()
249+
250+
print("\n" + "=" * 60)
251+
print("All examples completed!")
252+
print("=" * 60)
253+
254+
255+
if __name__ == "__main__":
256+
main()

src/scrappey/__init__.py

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,13 +29,25 @@ async def main():
2929
asyncio.run(main())
3030
```
3131
32+
Drop-in replacement for requests library:
33+
```python
34+
# Instead of: import requests
35+
from scrappey import requests
36+
37+
# Use exactly like the requests library
38+
response = requests.get("https://example.com")
39+
print(response.text)
40+
print(response.status_code)
41+
```
42+
3243
Links:
3344
- Website: https://scrappey.com
3445
- Documentation: https://wiki.scrappey.com/getting-started
3546
- Request Builder: https://app.scrappey.com/#/builder
3647
- GitHub: https://github.com/pim97/scrappey-wrapper-python
3748
"""
3849

50+
from . import requests
3951
from .async_client import AsyncScrappey
4052
from .client import Scrappey
4153
from .exceptions import (
@@ -76,12 +88,14 @@ async def main():
7688
WhileAction,
7789
)
7890

79-
__version__ = "1.0.0"
91+
__version__ = "1.0.1"
8092

8193
__all__ = [
8294
# Main clients
8395
"Scrappey",
8496
"AsyncScrappey",
97+
# Requests-compatible API
98+
"requests",
8599
# Exceptions
86100
"ScrappeyError",
87101
"ScrappeyConnectionError",

0 commit comments

Comments
 (0)