From 99188e04e48a7a14a5e221fe404374169c2c0fb6 Mon Sep 17 00:00:00 2001 From: "pixeebot[bot]" <104101892+pixeebot[bot]@users.noreply.github.com> Date: Fri, 7 Feb 2025 03:26:53 +0000 Subject: [PATCH] Sandbox URL Creation --- pyproject.toml | 1 + .../02_xml/02_html/e_parse_html.py | 4 +-- python3/12_Logging/d_sentry/a_example.py | 5 ++- .../05_mocking/example_3/api_access.py | 4 +-- .../05_mocking/example_4/blog_script.py | 4 +-- .../06_pytest/monkey_patching_test.py | 3 +- .../PracticalProblems/da_web_crawler.py | 5 ++- .../PracticalProblems/db_web_crawler.py | 5 ++- .../01weatherWithOutAuthentication.py | 5 ++- .../02_requests/NASA/nasa_client.py | 9 +++-- .../02_requests/access_sharepoint.py | 5 ++- .../02_requests/all_request_methods.py | 35 ++++++++++--------- .../02_requests/b_streaming_request.py | 5 ++- .../02_requests/currency_ex.py | 7 ++-- .../02_requests/downloadWebPage.py | 4 +-- .../02_requests/duckduckgo_ex2.py | 4 +-- .../a_consuming_APIs/02_requests/example.py | 5 ++- .../a_consuming_APIs/02_requests/example2.py | 5 ++- .../a_consuming_APIs/02_requests/example_1.py | 5 ++- .../02_requests/google_search.py | 4 +-- .../02_requests/ip2geolocation.py | 5 ++- .../02_requests/openstreetmap.py | 5 ++- .../02_requests/openstreetmap2.py | 5 ++- .../answers/answers_01.py | 14 ++++---- .../answers/answers_02.py | 6 ++-- .../answers/answers_05.py | 11 +++--- .../answers/answers_06.py | 10 +++--- .../examples/examples_01.py | 14 ++++---- .../examples/examples_02.py | 6 ++-- .../examples/examples_05.py | 9 +++-- .../examples/examples_06.py | 12 +++---- .../exercises/exercises_01.py | 5 ++- .../02_requests/z_bitcoin_status.py | 5 ++- .../c_get_my_ip_details.py | 4 +-- .../a_consuming_APIs/b_ISS/a_iss_now.py | 5 ++- .../b_ISS/b_astros_on_ISS_now.py | 5 ++- .../b_ISS/c_iss_passage_info.py | 5 ++- .../c_managing_headers/a_get_ip.py | 5 ++- .../c_managing_headers/b_get_user_agent.py | 5 ++- .../c_changed_user_agent.py | 5 ++- .../e_http_status_codes/a_cats.py | 5 ++- .../e_http_status_codes/b_dogs.py | 5 ++- .../f_request_methods/a_example.py | 35 ++++++++++--------- .../f_request_methods/b_json_test_api.py | 5 ++- .../g_downloading_files/get_xml_data.py | 5 ++- .../i_api_authentication/basic_auth.py | 5 ++- .../a_get_webpage/scrap_billboards.py | 4 +-- .../d_web_scraping/covid_cases.py | 5 ++- .../16_Web_Services/d_web_scraping/gaana.py | 5 ++- .../get_top10_google_searches.py | 4 +-- .../d_web_scraping/linkedin_scraper.py | 5 ++- .../d_web_scraping/one_million_websites.py | 5 ++- .../d_web_scraping/python_planet_indexes.py | 4 +-- .../d_web_scraping/stackoverflow_tags.py | 5 ++- .../d_web_scraping/stock_scraper.py | 5 ++- .../d_web_scraping/tweets_scrapper.py | 7 ++-- .../d_web_scraping/webScraping4.py | 4 +-- .../d_web_scraping/webScraping5.py | 5 ++- .../d_web_scraping/youtubeSentiment.py | 5 ++- .../d_using_flask/f_Authentication/app.py | 4 +-- .../using_fastapi/project6_TODO_client.py | 3 +- .../h_feedparsing/parse_RSS_feed.py | 5 ++- .../h_feedparsing/rdflib_sparql_access.py | 25 ++----------- .../k_Projects/download_files.py | 7 ++-- .../b_requests_layer_usage.py | 4 +-- .../a1_get_random_person.py | 4 +-- .../c_process_pool_usage.py | 4 +-- .../g_thread_pool_usage.py | 4 +-- .../zip_codes_based_scrapping.py | 5 ++- python3/Projects/ISS/main.py | 5 ++- python3/Projects/flask_mongo/src/test_api.py | 7 ++-- .../Projects/flask_mongo/src/usage_client.py | 4 +-- python3/Projects/icc_cricket-notifications.py | 4 +-- python3/Projects/news_notifier.py | 6 ++-- 74 files changed, 210 insertions(+), 269 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index bc0c066c..93dfdaeb 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -13,6 +13,7 @@ pytest = "^7.1.3" requests = "^2.28.1" jupyter = "^1.0.0" ipython = "^8.5.0" +security = "==1.3.1" [tool.poetry.group.dev.dependencies] diff --git a/python3/11_File_Operations/02_structured_files/02_xml/02_html/e_parse_html.py b/python3/11_File_Operations/02_structured_files/02_xml/02_html/e_parse_html.py index 3a86faf2..5e4788ee 100644 --- a/python3/11_File_Operations/02_structured_files/02_xml/02_html/e_parse_html.py +++ b/python3/11_File_Operations/02_structured_files/02_xml/02_html/e_parse_html.py @@ -3,10 +3,10 @@ Purpose: HtML parsing, using lxml - lxml has simple syntax and faster in performance """ -import requests from lxml import html +from security import safe_requests -page = requests.get("https://html.com/", timeout=60) +page = safe_requests.get("https://html.com/", timeout=60) tree = html.fromstring(page.content) with open("html_webpage.html", mode="w", encoding="utf-8") as f: diff --git a/python3/12_Logging/d_sentry/a_example.py b/python3/12_Logging/d_sentry/a_example.py index fa5dbc4e..056dd508 100644 --- a/python3/12_Logging/d_sentry/a_example.py +++ b/python3/12_Logging/d_sentry/a_example.py @@ -1,9 +1,8 @@ import logging import time - -import requests import sentry_sdk from sentry_sdk.integrations.logging import LoggingIntegration +from security import safe_requests # Initialize Sentry sentry_logging = LoggingIntegration( @@ -87,7 +86,7 @@ # Example 9: Tracking HTTP requests: def get_weather_info(): - response = requests.get( + response = safe_requests.get( "https://api.openweathermap.org/data/2.5/weather?q=London&appid=YOUR_API_KEY", timeout=60, ) diff --git a/python3/14_Code_Quality/05_mocking/example_3/api_access.py b/python3/14_Code_Quality/05_mocking/example_3/api_access.py index 912bfc8f..c374fecd 100644 --- a/python3/14_Code_Quality/05_mocking/example_3/api_access.py +++ b/python3/14_Code_Quality/05_mocking/example_3/api_access.py @@ -1,6 +1,6 @@ -import requests +from security import safe_requests def api(): - response = requests.get("https://www.google.com/", timeout=60) + response = safe_requests.get("https://www.google.com/", timeout=60) return response.status_code diff --git a/python3/14_Code_Quality/05_mocking/example_4/blog_script.py b/python3/14_Code_Quality/05_mocking/example_4/blog_script.py index 213e15eb..76248396 100644 --- a/python3/14_Code_Quality/05_mocking/example_4/blog_script.py +++ b/python3/14_Code_Quality/05_mocking/example_4/blog_script.py @@ -1,4 +1,4 @@ -import requests +from security import safe_requests class Blog: @@ -6,7 +6,7 @@ def __init__(self, name): self.name = name def posts(self): - response = requests.get( + response = safe_requests.get( "https://jsonplaceholder.typicode.com/posts", timeout=60 ) diff --git a/python3/14_Code_Quality/06_pytest/monkey_patching_test.py b/python3/14_Code_Quality/06_pytest/monkey_patching_test.py index 5750bea6..bea0eadc 100644 --- a/python3/14_Code_Quality/06_pytest/monkey_patching_test.py +++ b/python3/14_Code_Quality/06_pytest/monkey_patching_test.py @@ -1,9 +1,10 @@ import requests +from security import safe_requests def get_number_fact(number): url = f"http://numbersapi.com/{number}?json" - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) json_resp = response.json() if json_resp["found"]: diff --git a/python3/15_Regular_Expressions/PracticalProblems/da_web_crawler.py b/python3/15_Regular_Expressions/PracticalProblems/da_web_crawler.py index 35e4caba..d20780b0 100644 --- a/python3/15_Regular_Expressions/PracticalProblems/da_web_crawler.py +++ b/python3/15_Regular_Expressions/PracticalProblems/da_web_crawler.py @@ -1,13 +1,12 @@ import re - -import requests +from security import safe_requests # get url # url = input("Enter a URL (include `http://`): ") url = "https://stackoverflow.com" # connect to the url -website = requests.get(url, timeout=60) +website = safe_requests.get(url, timeout=60) # read html html = website.text diff --git a/python3/15_Regular_Expressions/PracticalProblems/db_web_crawler.py b/python3/15_Regular_Expressions/PracticalProblems/db_web_crawler.py index 0b7b66fd..b6f565cf 100644 --- a/python3/15_Regular_Expressions/PracticalProblems/db_web_crawler.py +++ b/python3/15_Regular_Expressions/PracticalProblems/db_web_crawler.py @@ -1,13 +1,12 @@ import re - -import requests +from security import safe_requests # get url # url = input("Enter a URL (include `http://`): ") url = "https://stackoverflow.com" # connect to the url -website = requests.get(url, timeout=60) +website = safe_requests.get(url, timeout=60) # read html html = website.text diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/01weatherWithOutAuthentication.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/01weatherWithOutAuthentication.py index 436fba9f..6d20bccd 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/01weatherWithOutAuthentication.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/01weatherWithOutAuthentication.py @@ -16,14 +16,13 @@ """ import json - -import requests +from security import safe_requests # pip install -U requests def get_data_n_write_to_file(URL): - response = requests.get(URL, timeout=60) + response = safe_requests.get(URL, timeout=60) print(response.status_code) if 200 <= response.status_code < 300: # storing in a json file diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/NASA/nasa_client.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/NASA/nasa_client.py index 3354baff..c49b259d 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/NASA/nasa_client.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/NASA/nasa_client.py @@ -1,7 +1,6 @@ # -*- coding: utf-8 -*- from pprint import pprint - -import requests +from security import safe_requests # TO get API key, sign-up at https://api.nasa.gov/index.html#apply-for-an-api-key NASA_API_KEY = "jdm0WrfEUq3rfZnUP8XYhvAU7QEnG7SUNY1lmiHP" @@ -25,7 +24,7 @@ } URL = API_ROOT + SEARCH_ENDPOINT -response = requests.get(URL, params=request_params, timeout=60) +response = safe_requests.get(URL, params=request_params, timeout=60) # pprint(response.json()) with open("nasa_search_response.json", "w") as f: f.write(response.text) @@ -33,7 +32,7 @@ # media details URL = API_ROOT + ASSET_ENDPOINT.format(nasa_id="as11-40-5874") -response = requests.get(URL, timeout=60) +response = safe_requests.get(URL, timeout=60) pprint(response.json()) response_data = response.json() @@ -42,7 +41,7 @@ image_urls.append(each.get("href")) for each_image_url in image_urls: - response_image = requests.get(each_image_url, timeout=60) + response_image = safe_requests.get(each_image_url, timeout=60) image_name = each_image_url.split("/")[-1] if response.headers["content-type"] == "application/json": with open(f"{image_name}", "wb") as g: diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/access_sharepoint.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/access_sharepoint.py index fb69e129..eee10bb8 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/access_sharepoint.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/access_sharepoint.py @@ -4,8 +4,7 @@ """ import sys import urllib - -import requests +from security import safe_requests def download_sharepoint_file(sharepoint_url, username, password, file_path): @@ -15,7 +14,7 @@ def download_sharepoint_file(sharepoint_url, username, password, file_path): file_path = file_path.replace(" ", "%20") - response = requests.get( + response = safe_requests.get( file_path, auth=HttpNtlmAuth(domain + "\\" + username, password), stream=True, diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/all_request_methods.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/all_request_methods.py index d30d3789..bd6d159d 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/all_request_methods.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/all_request_methods.py @@ -1,10 +1,11 @@ import requests +from security import safe_requests # Base URL for the HTTPBin API base_url = "https://httpbin.org" # Send a GET request to the /get endpoint -response = requests.get(f"{base_url}/get", timeout=60) +response = safe_requests.get(f"{base_url}/get", timeout=60) print(response.json()) # Send a POST request to the /post endpoint with a JSON payload @@ -35,42 +36,42 @@ print(response.json()) # Send a GET request to the /status/{code} endpoint to simulate an error -response = requests.get(f"{base_url}/status/404", timeout=60) +response = safe_requests.get(f"{base_url}/status/404", timeout=60) print(response.status_code) # Send a GET request to the /stream/{n} endpoint to stream n lines of text -response = requests.get(f"{base_url}/stream/5", stream=True, timeout=60) +response = safe_requests.get(f"{base_url}/stream/5", stream=True, timeout=60) for line in response.iter_lines(): print(line.decode()) # Send a GET request to the /delay/{n} endpoint to simulate a delay of n seconds -response = requests.get(f"{base_url}/delay/3", timeout=60) +response = safe_requests.get(f"{base_url}/delay/3", timeout=60) print(response.json()) # Send a GET request to the /bytes/{n} endpoint to retrieve n bytes of random data -response = requests.get(f"{base_url}/bytes/10", timeout=60) +response = safe_requests.get(f"{base_url}/bytes/10", timeout=60) print(response.content) # Send a GET request to the /cookies endpoint to retrieve cookies set by the server -response = requests.get(f"{base_url}/cookies", timeout=60) +response = safe_requests.get(f"{base_url}/cookies", timeout=60) print(response.cookies) # Send a GET request to the /cookies/set endpoint to set a cookie -response = requests.get( +response = safe_requests.get( f"{base_url}/cookies/set", cookies={"name": "Alice"}, timeout=60 ) print(response.cookies) # Send a GET request to the /basic-auth/{user}/{passwd} endpoint to authenticate with HTTP basic authentication -response = requests.get( +response = safe_requests.get( f"{base_url}/basic-auth/user/passwd", auth=("user", "passwd"), timeout=60 ) print(response.json()) # Send a GET request to the /bearer endpoint to authenticate with a bearer token headers = {"Authorization": "Bearer mytoken"} -response = requests.get(f"{base_url}/bearer", headers=headers, timeout=60) +response = safe_requests.get(f"{base_url}/bearer", headers=headers, timeout=60) print(response.json()) # Send a POST request to the /anything endpoint to receive and return any data @@ -89,13 +90,13 @@ print(response.content) # Send a GET request to the /redirect/{n} endpoint to follow n redirects -response = requests.get(f"{base_url}/redirect/3", timeout=60) +response = safe_requests.get(f"{base_url}/redirect/3", timeout=60) print(response.history) print(response.url) # Send a GET request to the /redirect-to endpoint to redirect to a URL params = {"url": "https://www.google.com"} -response = requests.get(f"{base_url}/redirect-to", params=params, timeout=60) +response = safe_requests.get(f"{base_url}/redirect-to", params=params, timeout=60) print(response.url) # Send a POST request to the /post endpoint with a file upload @@ -104,31 +105,31 @@ print(response.json()) # Send a GET request to the /user-agent endpoint to retrieve the user agent string -response = requests.get(f"{base_url}/user-agent", timeout=60) +response = safe_requests.get(f"{base_url}/user-agent", timeout=60) print(response.json()) # Send a GET request to the /ip endpoint to retrieve the client's IP address -response = requests.get(f"{base_url}/ip", timeout=60) +response = safe_requests.get(f"{base_url}/ip", timeout=60) print(response.json()) # Send a GET request to the /headers endpoint with custom headers headers = {"X-My-Header": "123"} -response = requests.get(f"{base_url}/headers", headers=headers, timeout=60) +response = safe_requests.get(f"{base_url}/headers", headers=headers, timeout=60) print(response.json()) # Send a GET request to the /hidden-basic-auth/{user}/{passwd} endpoint to authenticate with HTTP basic authentication -response = requests.get( +response = safe_requests.get( f"{base_url}/hidden-basic-auth/user/passwd", auth=("user", "passwd"), timeout=60 ) print(response.json()) # Send a GET request to the /digest-auth/{qop}/{user}/{passwd}/{algorithm} endpoint to authenticate with digest authentication -response = requests.get( +response = safe_requests.get( f"{base_url}/digest-auth/auth/user/passwd/MD5", auth=("user", "passwd"), timeout=10 ) print(response.json()) # Send a GET request to the /bearer-auth/{token} endpoint to authenticate with a bearer token headers = {"Authorization": "Bearer mytoken"} -response = requests.get(f"{base_url}/bearer-auth/mytoken", headers=headers, timeout=60) +response = safe_requests.get(f"{base_url}/bearer-auth/mytoken", headers=headers, timeout=60) print(response.json()) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/b_streaming_request.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/b_streaming_request.py index 93e6a2e3..dabf9f1b 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/b_streaming_request.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/b_streaming_request.py @@ -1,6 +1,5 @@ import json - -import requests +from security import safe_requests # r = requests.get('https://httpbin.org/stream/5', stream=True) @@ -26,7 +25,7 @@ print("\n\n\n") -r = requests.get("https://httpbin.org/stream/5", stream=True, timeout=60) +r = safe_requests.get("https://httpbin.org/stream/5", stream=True, timeout=60) if r.encoding is None: r.encoding = "utf-8" diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/currency_ex.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/currency_ex.py index 40f65765..88543980 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/currency_ex.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/currency_ex.py @@ -4,8 +4,7 @@ https://currencylayer.com/quickstart """ from datetime import datetime - -import requests +from security import safe_requests ACCESS_KEY = "96b5ca6a3116caa7a9b8985fd294243e" API_URL = "http://www.apilayer.net/api/" @@ -13,7 +12,7 @@ def get_live_currency_quote(): URL = API_URL + "live?access_key=" + ACCESS_KEY - response = requests.get(URL, timeout=60) + response = safe_requests.get(URL, timeout=60) # pprint(response.json()) quotes = response.json().get("quotes") USDINR_quote = quotes.get("USDINR") @@ -36,7 +35,7 @@ def get_live_currency_quote2(requesting_data="live"): "format": 1, # 'source': 'INR' } - response = requests.get(URL, params=request_params, timeout=60).json() + response = safe_requests.get(URL, params=request_params, timeout=60).json() if response.get("error", {}): return response.get("error", {}).get("info") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/downloadWebPage.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/downloadWebPage.py index 1e548f45..b785079d 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/downloadWebPage.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/downloadWebPage.py @@ -1,4 +1,4 @@ -import requests +from security import safe_requests def download(url): @@ -6,7 +6,7 @@ def download(url): :arg url: URL of the file to be downloaded. """ - req = requests.get(url, timeout=60) + req = safe_requests.get(url, timeout=60) # First let us check non existing files. if req.status_code == 404: print("No such file found at %s" % url) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/duckduckgo_ex2.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/duckduckgo_ex2.py index c8dbbd0d..4d233f80 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/duckduckgo_ex2.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/duckduckgo_ex2.py @@ -1,9 +1,9 @@ -import requests +from security import safe_requests DUCKDUCKGO_URL = "https://api.duckduckgo.com/?q={query_string}&format=json&pretty=1" search_string = input("Enter the content to search:\n") -response = requests.get(DUCKDUCKGO_URL.format(query_string=search_string), timeout=60) +response = safe_requests.get(DUCKDUCKGO_URL.format(query_string=search_string), timeout=60) if response.status_code == 200: # print(response.text) with open("result.json", "wb") as f: diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example.py index ea5d7cdf..42bd1ab3 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example.py @@ -1,10 +1,9 @@ import time - -import requests +from security import safe_requests def download(url): - return requests.get(url, timeout=60) + return safe_requests.get(url, timeout=60) if __name__ == "__main__": diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example2.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example2.py index 9548a943..e73af529 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example2.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example2.py @@ -1,12 +1,11 @@ import time - -import requests from tomorrow import threads +from security import safe_requests @threads(5) def download(url): - return requests.get(url, timeout=5) + return safe_requests.get(url, timeout=5) if __name__ == "__main__": diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example_1.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example_1.py index b1d2158e..b1f8b12c 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example_1.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/example_1.py @@ -5,10 +5,9 @@ pip install requests """ +from security import safe_requests -import requests - -response = requests.get( +response = safe_requests.get( "https://www.timeanddate.com/worldclock/india/new-delhi", timeout=5 ) # print(response.text) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/google_search.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/google_search.py index 9d79b6ac..0b42dd04 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/google_search.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/google_search.py @@ -3,9 +3,9 @@ Purpose: Google search pip install -U requests --user """ -import requests +from security import safe_requests -response = requests.get( +response = safe_requests.get( "https://www.google.com/search?q=python+programming&oq=python+programming&aqs=chrome..69i57j69i65l3j69i61j69i60.6334j0j7&sourceid=chrome&ie=UTF-8", timeout=60, ) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/ip2geolocation.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/ip2geolocation.py index b2ad00d7..7197c936 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/ip2geolocation.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/ip2geolocation.py @@ -1,6 +1,5 @@ import csv - -import requests +from security import safe_requests def get_addresses(filename): @@ -31,7 +30,7 @@ def get_geolocation(all_the_ip_address): # get geolocation for line in all_the_ip_address: print("Grabbing geo info for row # {0}".format(counter)) - r = requests.get("https://freegeoip.net/json/{0}".format(line[0]), timeout=60) + r = safe_requests.get("https://freegeoip.net/json/{0}".format(line[0]), timeout=60) line.extend([str(r.json()["country_name"]), str(r.json()["city"])]) updated_addresses.append(line) counter += 1 diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap.py index a40b5e92..ad9ab21d 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap.py @@ -3,15 +3,14 @@ Purpose: open street map API usage https://wiki.openstreetmap.org/wiki/Nominatim#Example """ - -import requests +from security import safe_requests def get_location_coordinates(search_string): SEARCH_URL = "https://nominatim.openstreetmap.org/search?" payload = {"q": search_string, "format": "json", "polygon": 1, "addressdetails": 1} - response = requests.get(SEARCH_URL, params=payload, timeout=60).json() + response = safe_requests.get(SEARCH_URL, params=payload, timeout=60).json() # pprint(response) for each in response: result_string = """====Search Result=====\n{DISPLAY_NAME} diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap2.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap2.py index dcd3c890..c1b19da6 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap2.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/openstreetmap2.py @@ -3,8 +3,7 @@ Purpose: open street map API usage https://wiki.openstreetmap.org/wiki/Nominatim#Example """ - -import requests +from security import safe_requests def get_address_for_given_coordinates(latitude, longitude): @@ -17,7 +16,7 @@ def get_address_for_given_coordinates(latitude, longitude): "addressdetails": 1, } - response = requests.get(REVERSE_SEARCH_URL, params=payload, timeout=60).json() + response = safe_requests.get(REVERSE_SEARCH_URL, params=payload, timeout=60).json() # pprint(response) if response.get("error"): print(response.get("error")) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_01.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_01.py index 62860759..5e3c738d 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_01.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_01.py @@ -1,11 +1,11 @@ -import requests +from security import safe_requests # Exercise 1.1 # Perform a GET request to http://api.zippopotam.us/us/90210 # Check that the response status code equals 200 def test_get_locations_for_us_90210_check_status_code_equals_200(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) assert response.status_code == 200 @@ -13,7 +13,7 @@ def test_get_locations_for_us_90210_check_status_code_equals_200(): # Perform a GET request to http://api.zippopotam.us/us/90210 # Check that the value of the response header 'Content-Type' equals 'application/json' def test_get_locations_for_us_90210_check_content_type_equals_json(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) assert response.headers["Content-Type"] == "application/json" @@ -21,7 +21,7 @@ def test_get_locations_for_us_90210_check_content_type_equals_json(): # Perform a GET request to http://api.zippopotam.us/us/90210 # Check that the response body encoding is not set (equal to None) def test_get_locations_for_us_90210_check_encoding_is_not_set(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) assert response.encoding is None @@ -29,7 +29,7 @@ def test_get_locations_for_us_90210_check_encoding_is_not_set(): # Perform a GET request to http://api.zippopotam.us/us/90210 # Check that the response body element 'country' has a value equal to 'United States' def test_get_locations_for_us_90210_check_country_equals_united_states(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) response_body = response.json() assert response_body["country"] == "United States" @@ -39,7 +39,7 @@ def test_get_locations_for_us_90210_check_country_equals_united_states(): # Check that the first 'place name' element in the list of places # has a value equal to 'Beverly Hills' def test_get_locations_for_us_90210_check_city_equals_beverly_hills(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) response_body = response.json() assert response_body["places"][0]["place name"] == "Beverly Hills" @@ -50,6 +50,6 @@ def test_get_locations_for_us_90210_check_city_equals_beverly_hills(): # value with a length of 1 (i.e., there's one place that corresponds # to the US zip code 90210) def test_get_locations_for_us_90210_check_one_place_is_returned(): - response = requests.get("http://api.zippopotam.us/us/90210", timeout=5) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=5) response_body = response.json() assert len(response_body["places"]) == 1 diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_02.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_02.py index 19d69758..48d1588c 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_02.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_02.py @@ -1,7 +1,7 @@ import csv import pytest -import requests +from security import safe_requests # Exercise 2.1 # Create a test data object test_data_zip @@ -24,7 +24,7 @@ # to those that are specified in the test data object @pytest.mark.parametrize("country_code, zip_code, expected_place", test_data_zip) def test_get_location_data_check_place_name(country_code, zip_code, expected_place): - response = requests.get( + response = safe_requests.get( f"http://api.zippopotam.us/{country_code}/{zip_code}", timeout=60 ) response_body = response.json() @@ -58,7 +58,7 @@ def read_data_from_csv(): def test_get_location_data_check_place_name_with_data_from_csv( country_code, zip_code, expected_place ): - response = requests.get( + response = safe_requests.get( f"http://api.zippopotam.us/{country_code}/{zip_code}", timeout=60 ) response_body = response.json() diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_05.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_05.py index 9f553acb..3226a5e4 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_05.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_05.py @@ -1,6 +1,5 @@ import xml.etree.ElementTree as et - -import requests +from security import safe_requests # Exercise 5.1 @@ -11,7 +10,7 @@ # Check that the root element has no attributes # Check that the root element has no text def test_check_root_of_xml_response(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/accounts/12345", timeout=60 ) response_body_as_xml = et.fromstring(response.content) @@ -29,7 +28,7 @@ def test_check_root_of_xml_response(): # Find the customerId element in the tree # Check that the text of the customerId element is '12212' def test_check_specific_element_of_xml_response(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/accounts/12345", timeout=60 ) response_body_as_xml = et.fromstring(response.content) @@ -45,7 +44,7 @@ def test_check_specific_element_of_xml_response(): # Find all 'account' elements in the entire XML document # Check that there are more than 5 of these 'account' elements def test_check_number_of_accounts_for_12212_greater_than_five(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/customers/12212/accounts", timeout=60, ) @@ -62,7 +61,7 @@ def test_check_number_of_accounts_for_12212_greater_than_five(): # (Use your creativity with the last one here... There is a solution, but I couldn't # find it on Google.) def test_use_xpath_for_more_sophisticated_checks(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/customers/12212/accounts", timeout=60, ) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_06.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_06.py index 95186784..1313214f 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_06.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/answers/answers_06.py @@ -2,8 +2,8 @@ from urllib.parse import urlparse import pytest -import requests import responses +from security import safe_requests # Exercise 6.1 @@ -16,7 +16,7 @@ def test_get_data_for_us_90210_mock_returns_404(): responses.add(responses.GET, "http://api.zippopotam.us/us/90210", status=404) - response = requests.get("http://api.zippopotam.us/us/90210", timeout=60) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=60) assert response.status_code == 404 @@ -38,7 +38,7 @@ def test_get_user_with_id_1_mock_returns_404_and_error_message_in_body(): status=404, ) - response = requests.get("http://api.zippopotam.us/us/90210", timeout=60) + response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=60) assert response.json()["error"] == "No data exists for US zip code 90210" @@ -59,7 +59,7 @@ def test_responses_can_raise_error_on_demand(): ) with pytest.raises(ValueError) as ve: - requests.get("http://api.zippopotam.us/us/ABCDE", timeout=60) + safe_requests.get("http://api.zippopotam.us/us/ABCDE", timeout=60) assert str(ve.value) == "US uses numerical zip codes only" @@ -108,7 +108,7 @@ def generate_response_from(url): split_url = parsed_url.split("/") return f"{split_url[-2]} zip code {split_url[-1]} corresponds to {place}" - response = requests.get( + response = safe_requests.get( f"http://api.zippopotam.us/{country_code}/{zip_code}", timeout=60 ) assert ( diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_01.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_01.py index d6369581..b6e54cef 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_01.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_01.py @@ -1,34 +1,34 @@ -import requests +from security import safe_requests def test_get_user_with_id_1_check_status_code_equals_200(): - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) assert response.status_code == 200 def test_get_user_with_id_1_check_content_type_equals_json(): - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) assert response.headers["Content-Type"] == "application/json; charset=utf-8" def test_get_user_with_id_1_check_encoding_equals_utf8(): - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) assert response.encoding == "utf-8" def test_get_user_with_id_1_check_name_equals_leanne_graham(): - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) response_body = response.json() assert response_body["name"] == "Leanne Graham" def test_get_user_with_id_1_check_company_name_equals_romaguera_crona(): - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=5) response_body = response.json() assert response_body["company"]["name"] == "Romaguera-Crona" def test_get_all_users_check_number_of_users_equals_10(): - response = requests.get("https://jsonplaceholder.typicode.com/users", timeout=5) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users", timeout=5) response_body = response.json() assert len(response_body) == 10 diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_02.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_02.py index cd7a094c..596e68f4 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_02.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_02.py @@ -1,14 +1,14 @@ import csv import pytest -import requests +from security import safe_requests test_data_users = [(1, "Leanne Graham"), (2, "Ervin Howell"), (3, "Clementine Bauch")] @pytest.mark.parametrize("userid, expected_name", test_data_users) def test_get_data_for_user_check_name(userid, expected_name): - response = requests.get( + response = safe_requests.get( f"https://jsonplaceholder.typicode.com/users/{userid}", timeout=60 ) response_body = response.json() @@ -26,7 +26,7 @@ def read_data_from_csv(): @pytest.mark.parametrize("userid, expected_name", read_data_from_csv()) def test_get_location_data_check_place_name_with_data_from_csv(userid, expected_name): - response = requests.get( + response = safe_requests.get( f"https://jsonplaceholder.typicode.com/users/{userid}", timeout=60 ) response_body = response.json() diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_05.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_05.py index ece31b02..822fc73a 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_05.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_05.py @@ -1,10 +1,9 @@ import xml.etree.ElementTree as et - -import requests +from security import safe_requests def test_check_root_of_xml_response(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/customers/12212", timeout=60, ) @@ -16,7 +15,7 @@ def test_check_root_of_xml_response(): def test_check_specific_element_of_xml_response(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/customers/12212", timeout=60, ) @@ -29,7 +28,7 @@ def test_check_specific_element_of_xml_response(): # https://docs.python.org/3/library/xml.etree.elementtree.html#elementtree-xpath def test_use_xpath_for_more_sophisticated_checks(): - response = requests.get( + response = safe_requests.get( "http://parabank.parasoft.com/parabank/services/bank/customers/12212", timeout=60, ) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_06.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_06.py index 091b89fc..b83bc671 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_06.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/examples/examples_06.py @@ -2,9 +2,9 @@ from urllib.parse import urlparse import pytest -import requests import responses from requests.exceptions import ConnectionError +from security import safe_requests @responses.activate @@ -13,7 +13,7 @@ def test_get_user_with_id_1_mock_returns_404(): responses.GET, "https://jsonplaceholder.typicode.com/users/1", status=404 ) - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=60) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=60) assert response.status_code == 404 @@ -26,14 +26,14 @@ def test_get_user_with_id_1_mock_returns_404_and_error_message_in_body(): status=404, ) - response = requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=60) + response = safe_requests.get("https://jsonplaceholder.typicode.com/users/1", timeout=60) assert response.json()["error"] == "No data exists for user with ID 1" @responses.activate def test_unmatched_endpoint_raises_connectionerror(): with pytest.raises(ConnectionError): - requests.get("https://jsonplaceholder.typicode.com/users/99", timeout=60) + safe_requests.get("https://jsonplaceholder.typicode.com/users/99", timeout=60) @responses.activate @@ -45,7 +45,7 @@ def test_responses_can_raise_error_on_demand(): ) with pytest.raises(RuntimeError) as re: - requests.get("https://jsonplaceholder.typicode.com/users/99", timeout=60) + safe_requests.get("https://jsonplaceholder.typicode.com/users/99", timeout=60) assert str(re.value) == "A runtime error occurred" @@ -72,7 +72,7 @@ def generate_response_from(url): split_url = parsed_url.split("/") return f"You requested data for user {split_url[-1]}" - response = requests.get( + response = safe_requests.get( f"https://jsonplaceholder.typicode.com/users/{userid}", timeout=60 ) assert response.json()["value"] == f"You requested data for user {userid}" diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/exercises/exercises_01.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/exercises/exercises_01.py index 96c0caa9..874ce198 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/exercises/exercises_01.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/requests-workshop-master/exercises/exercises_01.py @@ -1,11 +1,10 @@ from pprint import pp - -import requests +from security import safe_requests # Exercise 1.1 # Perform a GET request to http://api.zippopotam.us/us/90210 # Check that the response status code equals 200 -response = requests.get("http://api.zippopotam.us/us/90210", timeout=60) +response = safe_requests.get("http://api.zippopotam.us/us/90210", timeout=60) print(response) print(f"{response.status_code =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/z_bitcoin_status.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/z_bitcoin_status.py index cfd273f6..4a51c826 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/z_bitcoin_status.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/02_requests/z_bitcoin_status.py @@ -1,11 +1,10 @@ # https://realpython.com/python-bitcoin-ifttt/ from pprint import pprint - -import requests +from security import safe_requests bitcoin_api_url = "https://api.coinmarketcap.com/v1/ticker/bitcoin/" -response = requests.get(bitcoin_api_url, timeout=60) +response = safe_requests.get(bitcoin_api_url, timeout=60) response_json = response.json() print(type(response_json)) # The API returns a list diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/a_get_ip_and_details/c_get_my_ip_details.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/a_get_ip_and_details/c_get_my_ip_details.py index f62dadc7..9286bad2 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/a_get_ip_and_details/c_get_my_ip_details.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/a_get_ip_and_details/c_get_my_ip_details.py @@ -6,12 +6,12 @@ python -m pip install -U requests --user """ -import requests +from security import safe_requests def get_response(url): file_extension = url.split("/")[-1] or "html" - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) with open("result.{}".format(file_extension), "wb") as f: f.write(response.content) f.close() diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/a_iss_now.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/a_iss_now.py index 789a0d71..d05a2326 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/a_iss_now.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/a_iss_now.py @@ -7,10 +7,9 @@ """ import sys from pprint import pp +from security import safe_requests -import requests - -response = requests.get("http://api.open-notify.org/iss-now.json", timeout=5) +response = safe_requests.get("http://api.open-notify.org/iss-now.json", timeout=5) # print(dir(response)) print(f"{response.status_code =}") print(f"{response.url =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/b_astros_on_ISS_now.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/b_astros_on_ISS_now.py index e7896584..0892bca0 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/b_astros_on_ISS_now.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/b_astros_on_ISS_now.py @@ -7,10 +7,9 @@ import sys from pprint import pp +from security import safe_requests -import requests - -response = requests.get("http://api.open-notify.org/astros.json", timeout=60) +response = safe_requests.get("http://api.open-notify.org/astros.json", timeout=60) # print(dir(response)) print(f"{response.status_code =}") print(f"{response.url =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/c_iss_passage_info.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/c_iss_passage_info.py index acc6d0fb..3e08c723 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/c_iss_passage_info.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/b_ISS/c_iss_passage_info.py @@ -8,8 +8,7 @@ http://api.open-notify.org/iss-pass.json?lat=23&lon=21 """ from pprint import pp - -import requests +from security import safe_requests URL = "http://api.open-notify.org/iss-pass.json" @@ -17,7 +16,7 @@ longitude = input("Enter the longitude:") # 17.3850° N, 78.4867° E -response = requests.get(URL, params={"lat": latitude, "lon": longitude}, timeout=5) +response = safe_requests.get(URL, params={"lat": latitude, "lon": longitude}, timeout=5) print(f"{response.url =}") print(f"{response.status_code =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/a_get_ip.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/a_get_ip.py index 5cd95b25..2079d8f4 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/a_get_ip.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/a_get_ip.py @@ -4,10 +4,9 @@ http://httpbin.org/ip """ from pprint import pprint +from security import safe_requests -import requests - -response = requests.get("http://httpbin.org/ip", timeout=60) +response = safe_requests.get("http://httpbin.org/ip", timeout=60) print(f"{response.status_code =}") print(f"{response.url =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/b_get_user_agent.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/b_get_user_agent.py index 2cd3221b..940de778 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/b_get_user_agent.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/b_get_user_agent.py @@ -5,8 +5,7 @@ """ import json import urllib.request - -import requests +from security import safe_requests # Method 1 url_handler = urllib.request.urlopen("http://httpbin.org/user-agent") @@ -15,7 +14,7 @@ print(f'You are hitting this API with {response_data["user-agent"]}') # Method 2 -response = requests.get("http://httpbin.org/user-agent", timeout=5) +response = safe_requests.get("http://httpbin.org/user-agent", timeout=5) # print(f"{response.status_code =}") # print(f"{response.url =}") diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/c_changed_user_agent.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/c_changed_user_agent.py index 918e82b7..89b9661e 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/c_changed_user_agent.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/c_managing_headers/c_changed_user_agent.py @@ -8,8 +8,7 @@ # Method 1 import urllib.request from pprint import pprint - -import requests +from security import safe_requests req = urllib.request.Request("http://httpbin.org/user-agent") req.add_header("User-agent", "Internet Explorer/2.0") @@ -21,7 +20,7 @@ print(f'You are hitting this API with {response_data["user-agent"]}') -response = requests.get( +response = safe_requests.get( "http://httpbin.org/user-agent", headers={"User-agent": "Internet Explorer/2.0"}, # faking a browser diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/a_cats.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/a_cats.py index 2c4b0073..e9ec1e46 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/a_cats.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/a_cats.py @@ -14,8 +14,7 @@ import ctypes import os from pprint import pp - -import requests +from security import safe_requests URL = "https://http.cat/{HTTP_CODE}.jpg" # url = "https://http.cat/100.jpg" @@ -26,7 +25,7 @@ def get_status_images(_foldername): url = URL.format(HTTP_CODE=each_code) # print(url) - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) # pp(dict(response.headers)) if ( response.status_code == 200 diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/b_dogs.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/b_dogs.py index fc60cf23..c1fb9908 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/b_dogs.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/e_http_status_codes/b_dogs.py @@ -10,9 +10,8 @@ 5xx - server side """ import os - -import requests from a_cats import create_folder +from security import safe_requests def get_status_dogs(foldername): @@ -78,7 +77,7 @@ def get_status_dogs(foldername): for each_status_code in existing_images_in_dogs_site: URL = f"https://httpstatusdogs.com/img/{each_status_code}.jpg" - response = requests.get(URL, timeout=60) + response = safe_requests.get(URL, timeout=60) if ( response.status_code == 200 and response.headers["content-type"] == "image/jpeg" diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/a_example.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/a_example.py index 35cb1e9f..ff30e6dd 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/a_example.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/a_example.py @@ -4,70 +4,71 @@ import urllib.request import requests +from security import safe_requests # How to: Send a GET request -print(requests.get("http://mock.kite.com/text", timeout=60).text) +print(safe_requests.get("http://mock.kite.com/text", timeout=60).text) # How to: Send a request to port 80 -r = requests.get("http://mock.kite.com:80/text", timeout=60) +r = safe_requests.get("http://mock.kite.com:80/text", timeout=60) print(r.text) # How to: Send a GET request with query parameters url = "http://mock.kite.com/queryparams" params = {"a": 1, "b": 2} -print(requests.get(url, params, timeout=60).text) +print(safe_requests.get(url, params, timeout=60).text) # How to: Send the URL parameters of a GET request in order p = (("first", "first_value"), ("second", "second_value")) -r = requests.get("http://mock.kite.com/queryparams", params=p, timeout=60) +r = safe_requests.get("http://mock.kite.com/queryparams", params=p, timeout=60) print(r.url) # How to: Send a GET request with custom headers url = "http://mock.kite.com/echo" headers = {"custom-header": "custom"} -print(requests.get(url, headers=headers, timeout=60).text) +print(safe_requests.get(url, headers=headers, timeout=60).text) # How to: Retrieve the contents of a page before redirecting -r = requests.get("http://mock.kite.com/redirect", timeout=60) +r = safe_requests.get("http://mock.kite.com/redirect", timeout=60) redirected_from = r.history[0] print(redirected_from.content) # How to: Send a GET request and do not allow redirects url = "http://mock.kite.com/redirect" -print(requests.get(url, allow_redirects=False, timeout=60).text) +print(safe_requests.get(url, allow_redirects=False, timeout=60).text) # How to make a request with a user agent in Python headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36" } -response = requests.get("http://www.kite.com", headers=headers, timeout=60) +response = safe_requests.get("http://www.kite.com", headers=headers, timeout=60) print(response) # How to: Modify the User-Agent header in a request h = {"User-Agent": "secret agent 0.07"} -r = requests.get("http://httpbin.org/user-agent", headers=h, timeout=60) +r = safe_requests.get("http://httpbin.org/user-agent", headers=h, timeout=60) print(r.content) # How to: Retrieve content of request -print(requests.get("http://mock.kite.com/text", timeout=60).content) +print(safe_requests.get("http://mock.kite.com/text", timeout=60).content) # How to: Get the time taken to complete a request -r = requests.get("http://mock.kite.com/text", timeout=60) +r = safe_requests.get("http://mock.kite.com/text", timeout=60) print(r.elapsed) # How to: Set a timeout time for a request try: url = "http://mock.kite.com/text" - r = requests.get(url, timeout=0.0001) + r = safe_requests.get(url, timeout=0.0001) except requests.exceptions.Timeout as e: print(e) # How to disable security certificate checks for requests in Python -response = requests.get("https://www.kite.com", verify=True, timeout=60) +response = safe_requests.get("https://www.kite.com", verify=True, timeout=60) print(response) # Warning Disabling security checks can potentially compromise the integrity of requests if handled incorrectly. Read more about the requests library and its usage here. @@ -75,12 +76,12 @@ url = "https://www.python.org/static/img/python-logo@2x.png" urllib.request.urlretrieve(url, "python_logo.png") -downloaded_obj = requests.get(url, timeout=60) +downloaded_obj = safe_requests.get(url, timeout=60) with open("python_logo.png", "wb") as file: file.write(downloaded_obj.content) # How to download an image using requests in Python -response = requests.get("https://i.imgur.com/ExdKOOz.png", timeout=60) +response = safe_requests.get("https://i.imgur.com/ExdKOOz.png", timeout=60) file = open("sample_image.png", "wb") file.write(response.content) file.close() @@ -88,7 +89,7 @@ # How to download large files with requests in Python url = "https://raw.githubusercontent.com/selva86/datasets/master/BostonHousing.csv" -response = requests.get(url, stream=True, timeout=60) +response = safe_requests.get(url, stream=True, timeout=60) text_file = open("data.txt", "wb") for chunk in response.iter_content(chunk_size=1024): @@ -98,7 +99,7 @@ text_file.close() # How to get a JSON from a webpage in Python -response = requests.get("http://httpbin.org/stream/1", timeout=60) +response = safe_requests.get("http://httpbin.org/stream/1", timeout=60) data = response.json() print(data) diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/b_json_test_api.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/b_json_test_api.py index a693d396..bb1a3087 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/b_json_test_api.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/f_request_methods/b_json_test_api.py @@ -17,14 +17,13 @@ DELETE /posts/1 """ from pprint import pprint - -import requests +from security import safe_requests URL = "https://jsonplaceholder.typicode.com" # text/html def get_response(url): - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) if response.ok: print( f'\n{response.headers["Content-Type"]}' diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/g_downloading_files/get_xml_data.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/g_downloading_files/get_xml_data.py index 9cb6e895..ad3771fd 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/g_downloading_files/get_xml_data.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/g_downloading_files/get_xml_data.py @@ -1,8 +1,7 @@ from numbers import Number from typing import Optional from xml.dom.minidom import parseString - -import requests +from security import safe_requests SETTINGS = { "currency": "THB", @@ -16,7 +15,7 @@ def check_exchange_rate( currency: str = None, threshold: Number = None ) -> Optional[str]: - res = requests.get(URL, timeout=60) + res = safe_requests.get(URL, timeout=60) # we have to parse XML (unfortunately I did not find a .json API) parsed = parseString( diff --git a/python3/16_Web_Services/c_REST/a_consuming_APIs/i_api_authentication/basic_auth.py b/python3/16_Web_Services/c_REST/a_consuming_APIs/i_api_authentication/basic_auth.py index 52095423..35be1e86 100644 --- a/python3/16_Web_Services/c_REST/a_consuming_APIs/i_api_authentication/basic_auth.py +++ b/python3/16_Web_Services/c_REST/a_consuming_APIs/i_api_authentication/basic_auth.py @@ -1,12 +1,11 @@ import sys - -import requests +from security import safe_requests def login(_username, _password): url = "http://httpbin.org/basic-auth/user/passwd" - response = requests.get(url, auth=(_username, _password), timeout=60) + response = safe_requests.get(url, auth=(_username, _password), timeout=60) if response.status_code != 200: print("Error found", response.status_code, file=sys.stderr) print(response.reason) diff --git a/python3/16_Web_Services/d_web_scraping/a_get_webpage/scrap_billboards.py b/python3/16_Web_Services/d_web_scraping/a_get_webpage/scrap_billboards.py index 4dbe1c54..d9e68031 100644 --- a/python3/16_Web_Services/d_web_scraping/a_get_webpage/scrap_billboards.py +++ b/python3/16_Web_Services/d_web_scraping/a_get_webpage/scrap_billboards.py @@ -1,9 +1,9 @@ import lxml -import requests from bs4 import BeautifulSoup +from security import safe_requests URL = "https://www.billboard.com/charts/hot-100/" -r = requests.get(URL, timeout=60) +r = safe_requests.get(URL, timeout=60) doc = BeautifulSoup(r.content, "lxml") structured_doc = doc.prettify() diff --git a/python3/16_Web_Services/d_web_scraping/covid_cases.py b/python3/16_Web_Services/d_web_scraping/covid_cases.py index 379369ef..eb5dd696 100644 --- a/python3/16_Web_Services/d_web_scraping/covid_cases.py +++ b/python3/16_Web_Services/d_web_scraping/covid_cases.py @@ -1,11 +1,10 @@ import time - -import requests from bs4 import BeautifulSoup +from security import safe_requests while True: # Make a request to the website - response = requests.get("https://www.worldometers.info/coronavirus/", timeout=60) + response = safe_requests.get("https://www.worldometers.info/coronavirus/", timeout=60) # Use BeautifulSoup to parse the HTML content soup = BeautifulSoup(response.content, "html.parser") diff --git a/python3/16_Web_Services/d_web_scraping/gaana.py b/python3/16_Web_Services/d_web_scraping/gaana.py index 11dcb980..8958f206 100644 --- a/python3/16_Web_Services/d_web_scraping/gaana.py +++ b/python3/16_Web_Services/d_web_scraping/gaana.py @@ -1,9 +1,8 @@ import os from time import sleep - -import requests from bs4 import BeautifulSoup from selenium import webdriver +from security import safe_requests # creates directory in pc os.mkdir(r"C:\Users\udhayPrakash\Desktop\Gaana", 755) @@ -16,7 +15,7 @@ # saves links and song names def scaper(url): - soup = BeautifulSoup(requests.get(url, timeout=60).content, "lxml") + soup = BeautifulSoup(safe_requests.get(url, timeout=60).content, "lxml") data = soup.findAll("div", {"playlist_thumb_det"}) for line in data: link = str(line.contents[1]) diff --git a/python3/16_Web_Services/d_web_scraping/get_top10_google_searches.py b/python3/16_Web_Services/d_web_scraping/get_top10_google_searches.py index 48f51dd8..078f298d 100644 --- a/python3/16_Web_Services/d_web_scraping/get_top10_google_searches.py +++ b/python3/16_Web_Services/d_web_scraping/get_top10_google_searches.py @@ -10,7 +10,7 @@ import bs4 import pyperclip -import requests +from security import safe_requests def start(): @@ -21,7 +21,7 @@ def start(): # search for the keyword copied in the clipboard keyword = pyperclip.paste() - res = requests.get("https://google.com/search?q=" + keyword, timeout=60) + res = safe_requests.get("https://google.com/search?q=" + keyword, timeout=60) soup = bs4.BeautifulSoup(res.text, "lxml") links = soup.select(".r a") tab_counts = min(10, len(links)) diff --git a/python3/16_Web_Services/d_web_scraping/linkedin_scraper.py b/python3/16_Web_Services/d_web_scraping/linkedin_scraper.py index 7830f9fb..74d2fcc2 100644 --- a/python3/16_Web_Services/d_web_scraping/linkedin_scraper.py +++ b/python3/16_Web_Services/d_web_scraping/linkedin_scraper.py @@ -1,8 +1,7 @@ import json - -import requests import urllib3 from lxml import html +from security import safe_requests urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) @@ -14,7 +13,7 @@ def linkedin_companies_parser(url): "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36" } print("Fetching :", url) - response = requests.get(url, headers=headers, verify=True, timeout=5) + response = safe_requests.get(url, headers=headers, verify=True, timeout=5) formatted_response = response.content.replace("", "") doc = html.fromstring(formatted_response) datafrom_xpath = doc.xpath( diff --git a/python3/16_Web_Services/d_web_scraping/one_million_websites.py b/python3/16_Web_Services/d_web_scraping/one_million_websites.py index adb03bfc..6a7d6150 100644 --- a/python3/16_Web_Services/d_web_scraping/one_million_websites.py +++ b/python3/16_Web_Services/d_web_scraping/one_million_websites.py @@ -1,7 +1,6 @@ import time - -import requests from bs4 import BeautifulSoup +from security import safe_requests headers = { "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8", @@ -13,7 +12,7 @@ for i in range(1, 201): url = "http://websitelists.in/website-list-" + str(i) + ".html" - response = requests.get(url, headers=headers, timeout=5) + response = safe_requests.get(url, headers=headers, timeout=5) if response.status_code != 200: print(url + str(response.status_code)) continue diff --git a/python3/16_Web_Services/d_web_scraping/python_planet_indexes.py b/python3/16_Web_Services/d_web_scraping/python_planet_indexes.py index 40e8cb82..340953d5 100644 --- a/python3/16_Web_Services/d_web_scraping/python_planet_indexes.py +++ b/python3/16_Web_Services/d_web_scraping/python_planet_indexes.py @@ -1,9 +1,9 @@ -import requests from bs4 import BeautifulSoup +from security import safe_requests def main(): - resp = requests.get("http://planetpython.org", timeout=60) + resp = safe_requests.get("http://planetpython.org", timeout=60) soup = BeautifulSoup(resp.text) diff --git a/python3/16_Web_Services/d_web_scraping/stackoverflow_tags.py b/python3/16_Web_Services/d_web_scraping/stackoverflow_tags.py index cfc724d6..61917d13 100644 --- a/python3/16_Web_Services/d_web_scraping/stackoverflow_tags.py +++ b/python3/16_Web_Services/d_web_scraping/stackoverflow_tags.py @@ -5,16 +5,15 @@ import operator import os import sys - -import requests from bs4 import BeautifulSoup +from security import safe_requests # global dictionary to store the count of tags processed so far tag_count_dict = {} def get_soup_from_link(link): - html_text = requests.get(link, timeout=5).text + html_text = safe_requests.get(link, timeout=5).text soup = BeautifulSoup(html_text, "html.parser") return soup diff --git a/python3/16_Web_Services/d_web_scraping/stock_scraper.py b/python3/16_Web_Services/d_web_scraping/stock_scraper.py index 1b382eb0..d151364a 100644 --- a/python3/16_Web_Services/d_web_scraping/stock_scraper.py +++ b/python3/16_Web_Services/d_web_scraping/stock_scraper.py @@ -1,12 +1,11 @@ from collections import defaultdict - -import requests from lxml import html +from security import safe_requests def get_stocks(url): # Make Request - page = requests.get(url, timeout=60) + page = safe_requests.get(url, timeout=60) # Parse/Scrape tree = html.fromstring(page.text) xpath = '//*[@id="mw-content-text"]/table[1]' diff --git a/python3/16_Web_Services/d_web_scraping/tweets_scrapper.py b/python3/16_Web_Services/d_web_scraping/tweets_scrapper.py index 1b03e931..5d11348e 100644 --- a/python3/16_Web_Services/d_web_scraping/tweets_scrapper.py +++ b/python3/16_Web_Services/d_web_scraping/tweets_scrapper.py @@ -4,9 +4,8 @@ import json import sys - -import requests from bs4 import BeautifulSoup +from security import safe_requests def usage(): @@ -35,7 +34,7 @@ def start(username=None): print("\n\nDownloading tweets for " + username) response = None try: - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) except Exception as e: print(repr(e)) sys.exit(1) @@ -103,7 +102,7 @@ def get_tweets_data(username, soup): next_response = None try: - next_response = requests.get(next_url, timeout=60) + next_response = safe_requests.get(next_url, timeout=60) except Exception as e: # in case there is some issue with request. None encountered so far. print(e) diff --git a/python3/16_Web_Services/d_web_scraping/webScraping4.py b/python3/16_Web_Services/d_web_scraping/webScraping4.py index 5c44abd1..83635f4f 100644 --- a/python3/16_Web_Services/d_web_scraping/webScraping4.py +++ b/python3/16_Web_Services/d_web_scraping/webScraping4.py @@ -3,9 +3,9 @@ Purpose: scraping """ import lxml.html -import requests +from security import safe_requests -res = requests.get("https://pydata.org/nyc2018/schedule/", timeout=5) +res = safe_requests.get("https://pydata.org/nyc2018/schedule/", timeout=5) print(res.status_code) print(res.text) tree = lxml.html.fromstring(res.text) diff --git a/python3/16_Web_Services/d_web_scraping/webScraping5.py b/python3/16_Web_Services/d_web_scraping/webScraping5.py index 588921c9..994f277e 100644 --- a/python3/16_Web_Services/d_web_scraping/webScraping5.py +++ b/python3/16_Web_Services/d_web_scraping/webScraping5.py @@ -1,10 +1,9 @@ """ Purpose: scraping """ - -import requests from bs4 import BeautifulSoup +from security import safe_requests -res = requests.get("https://www.whoishostingthis.com/tools/user-agent/", timeout=60) +res = safe_requests.get("https://www.whoishostingthis.com/tools/user-agent/", timeout=60) soup = BeautifulSoup(res.text, "lxml") print(soup.prettify()) diff --git a/python3/16_Web_Services/d_web_scraping/youtubeSentiment.py b/python3/16_Web_Services/d_web_scraping/youtubeSentiment.py index 71058861..355805df 100644 --- a/python3/16_Web_Services/d_web_scraping/youtubeSentiment.py +++ b/python3/16_Web_Services/d_web_scraping/youtubeSentiment.py @@ -1,7 +1,6 @@ import sys - -import requests from bs4 import BeautifulSoup as bs4 +from security import safe_requests """ Example usage: @@ -19,7 +18,7 @@ def get_arguments(): def get_comments(url): - html = requests.get( + html = safe_requests.get( "https://plus.googleapis.com/u/0/_/widget/render/comments?first_party_property=YOUTUBE&href=" + url, timeout=60, diff --git a/python3/16_Web_Services/f_web_application/d_using_flask/f_Authentication/app.py b/python3/16_Web_Services/f_web_application/d_using_flask/f_Authentication/app.py index 2250fa45..52e34422 100644 --- a/python3/16_Web_Services/f_web_application/d_using_flask/f_Authentication/app.py +++ b/python3/16_Web_Services/f_web_application/d_using_flask/f_Authentication/app.py @@ -2,12 +2,12 @@ from uuid import uuid4 import jwt -import requests from flask import Flask, redirect, session, url_for from jwt import PyJWKClient from jwt.exceptions import DecodeError from requests_oauthlib import OAuth2Session from werkzeug.exceptions import InternalServerError, Unauthorized +from security import safe_requests app = Flask(__name__) app.config["SECRET_KEY"] = str(uuid4()) @@ -22,7 +22,7 @@ def get_well_known_metadata(): - response = requests.get(IDP_CONFIG["well_known_url"], timeout=60) + response = safe_requests.get(IDP_CONFIG["well_known_url"], timeout=60) response.raise_for_status() return response.json() diff --git a/python3/16_Web_Services/f_web_application/using_fastapi/project6_TODO_client.py b/python3/16_Web_Services/f_web_application/using_fastapi/project6_TODO_client.py index d62561db..f29a17d7 100644 --- a/python3/16_Web_Services/f_web_application/using_fastapi/project6_TODO_client.py +++ b/python3/16_Web_Services/f_web_application/using_fastapi/project6_TODO_client.py @@ -1,4 +1,5 @@ import requests +from security import safe_requests URL = "http://localhost:8000" @@ -7,7 +8,7 @@ response = requests.post(f"{URL}/items", json=item, timeout=60) # Get item -response = requests.get(f"{URL}/items/1", timeout=60) +response = safe_requests.get(f"{URL}/items/1", timeout=60) # Update item updated_item = {"id": 1, "name": "Bar", "price": 39.99} diff --git a/python3/16_Web_Services/h_feedparsing/parse_RSS_feed.py b/python3/16_Web_Services/h_feedparsing/parse_RSS_feed.py index 0f38b5eb..166637ac 100644 --- a/python3/16_Web_Services/h_feedparsing/parse_RSS_feed.py +++ b/python3/16_Web_Services/h_feedparsing/parse_RSS_feed.py @@ -2,8 +2,7 @@ # importing the required modules import csv import xml.etree.ElementTree as ET - -import requests +from security import safe_requests def loadRSS(): @@ -11,7 +10,7 @@ def loadRSS(): url = "http://www.hindustantimes.com/rss/topnews/rssfeed.xml" # creating HTTP response object from given url - resp = requests.get(url, timeout=60) + resp = safe_requests.get(url, timeout=60) # saving the xml file with open("topnewsfeed.xml", "wb") as f: diff --git a/python3/16_Web_Services/h_feedparsing/rdflib_sparql_access.py b/python3/16_Web_Services/h_feedparsing/rdflib_sparql_access.py index 3c1650fa..2dd97668 100644 --- a/python3/16_Web_Services/h_feedparsing/rdflib_sparql_access.py +++ b/python3/16_Web_Services/h_feedparsing/rdflib_sparql_access.py @@ -1,28 +1,7 @@ import rdflib +from security import safe_requests g = rdflib.Graph() -# RDF - Resource Description Framework (RDF) - -# g.parse("http://danbri.org/foaf.rdf#") - -# knows_query = """ -# SELECT DISTINCT ?aname ?bname -# WHERE { -# ?a foaf:knows ?b . -# ?a foaf:name ?aname . -# ?b foaf:name ?bname . -# }""" - -# qres = g.query(knows_query) -# for row in qres: -# print(f"{row.aname} knows {row.bname}") - - -# g.parse("https://query.wikidata.org/sparql") -# print(len(g)) - -import requests - -resp = requests.get("https://query.wikidata.org/sparql", timeout=60) +resp = safe_requests.get("https://query.wikidata.org/sparql", timeout=60) print(vars(resp)) diff --git a/python3/16_Web_Services/k_Projects/download_files.py b/python3/16_Web_Services/k_Projects/download_files.py index 413b107b..58dd5fdd 100644 --- a/python3/16_Web_Services/k_Projects/download_files.py +++ b/python3/16_Web_Services/k_Projects/download_files.py @@ -1,9 +1,8 @@ import os import re import zipfile - -import requests import wget +from security import safe_requests def download_all_zips_files(): @@ -12,9 +11,9 @@ def download_all_zips_files(): Finally the function writes all the files to NVD folder on the local disk :return: """ - r = requests.get("https://nvd.nist.gov/vuln/data-feeds#JSON_FEED", timeout=60) + r = safe_requests.get("https://nvd.nist.gov/vuln/data-feeds#JSON_FEED", timeout=60) for filename in re.findall("nvdcve-1.1-[0-9]*\.json\.zip", r.text): - r_file = requests.get( + r_file = safe_requests.get( "https://nvd.nist.gov/feeds/json/cve/1.1/" + filename, stream=True, timeout=60, diff --git a/python3/18_aws_cloud/a_AWS_Lambdas/b_zipping_lambdas/b_requests_layer_usage/b_requests_layer_usage.py b/python3/18_aws_cloud/a_AWS_Lambdas/b_zipping_lambdas/b_requests_layer_usage/b_requests_layer_usage.py index ff500fd9..d8201189 100644 --- a/python3/18_aws_cloud/a_AWS_Lambdas/b_zipping_lambdas/b_requests_layer_usage/b_requests_layer_usage.py +++ b/python3/18_aws_cloud/a_AWS_Lambdas/b_zipping_lambdas/b_requests_layer_usage/b_requests_layer_usage.py @@ -1,8 +1,8 @@ -import requests +from security import safe_requests def lambda_handler(event, context): - response = requests.get("https://www.google.com", timeout=60) + response = safe_requests.get("https://www.google.com", timeout=60) return {"statusCode": 200, "body": response.content.decode()} diff --git a/python3/18_aws_cloud/a_AWS_Lambdas/c_using_lambda_layers/a1_get_random_person.py b/python3/18_aws_cloud/a_AWS_Lambdas/c_using_lambda_layers/a1_get_random_person.py index 9d8895ec..5beb9087 100644 --- a/python3/18_aws_cloud/a_AWS_Lambdas/c_using_lambda_layers/a1_get_random_person.py +++ b/python3/18_aws_cloud/a_AWS_Lambdas/c_using_lambda_layers/a1_get_random_person.py @@ -1,9 +1,9 @@ -import requests +from security import safe_requests def lambda_handler(event, context): url = "https://randomuser.me/api/" - response = requests.get(url, timeout=60) + response = safe_requests.get(url, timeout=60) if response.ok: data = response.json() return { diff --git a/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/c_process_pool_usage.py b/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/c_process_pool_usage.py index 94785f4f..8080ba31 100644 --- a/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/c_process_pool_usage.py +++ b/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/c_process_pool_usage.py @@ -4,15 +4,15 @@ from pathlib import Path import numpy as np -import requests from PIL import Image +from security import safe_requests IMAGE_FOLDER = Path(".").absolute() / "demo" def download_image(img_url: str, save_loc: Path) -> np.ndarray: img_url = img_url.replace("\n", "") - img_bytes = requests.get(img_url, timeout=60).content + img_bytes = safe_requests.get(img_url, timeout=60).content img_name = img_url.split("/")[-1].replace("\n", "") save_loc.mkdir(parents=True, exist_ok=True) diff --git a/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/g_thread_pool_usage.py b/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/g_thread_pool_usage.py index 52d7799e..752a6c1d 100644 --- a/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/g_thread_pool_usage.py +++ b/python3/19_Concurrency_and_Parallel_Programming/01_MultiThreading/i_concurrent_futures/g_thread_pool_usage.py @@ -4,15 +4,15 @@ from pathlib import Path import numpy as np -import requests from PIL import Image +from security import safe_requests IMAGE_FOLDER = Path(".").absolute() / "demo" def download_image(img_url: str, save_loc: Path) -> np.ndarray: img_url = img_url.replace("\n", "") - img_bytes = requests.get(img_url, timeout=60).content + img_bytes = safe_requests.get(img_url, timeout=60).content img_name = img_url.split("/")[-1].replace("\n", "") save_loc.mkdir(parents=True, exist_ok=True) diff --git a/python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/zip_codes_based_scrapping.py b/python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/zip_codes_based_scrapping.py index 51bb53ba..824b62b4 100644 --- a/python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/zip_codes_based_scrapping.py +++ b/python3/19_Concurrency_and_Parallel_Programming/02_multiprocessing/zip_codes_based_scrapping.py @@ -4,16 +4,15 @@ import time from multiprocessing.dummy import Pool - -import requests from datadiff import diff +from security import safe_requests def getzip(code): try: code = str(code) url = f"https://maps.googleapis.com/maps/api/geocode/json?address={code}" - res = requests.get(url, timeout=60).json()["results"] + res = safe_requests.get(url, timeout=60).json()["results"] if len(res) < 1: # Re-try print("Retrying") return getzip(code) diff --git a/python3/Projects/ISS/main.py b/python3/Projects/ISS/main.py index d3302801..a80e396c 100644 --- a/python3/Projects/ISS/main.py +++ b/python3/Projects/ISS/main.py @@ -4,8 +4,7 @@ """ import argparse from datetime import datetime - -import requests +from security import safe_requests class ISS(object): @@ -13,7 +12,7 @@ def __init__(self): self.base_url = "http://api.open-notify.org" def get_response_data(self, endpoint, _params=None): - response = requests.get(self.base_url + endpoint, params=_params, timeout=5) + response = safe_requests.get(self.base_url + endpoint, params=_params, timeout=5) if response.headers["Content-Type"] == "application/json": response_content = response.json() return response_content diff --git a/python3/Projects/flask_mongo/src/test_api.py b/python3/Projects/flask_mongo/src/test_api.py index 1e94e0c9..8628972b 100644 --- a/python3/Projects/flask_mongo/src/test_api.py +++ b/python3/Projects/flask_mongo/src/test_api.py @@ -1,20 +1,19 @@ #!/usr/bin/python3 import unittest - -import requests +from security import safe_requests class TestApi(unittest.TestCase): def test01_get_all_records(self): """unit test to verify all records endpoint""" - response = requests.get("http://127.0.0.1:5000/", timeout=5) + response = safe_requests.get("http://127.0.0.1:5000/", timeout=5) response_json = response.json() self.assertTrue(isinstance(response_json, list)) self.assertEqual(len(response_json), 1000) def test02_get_specific_records(self): """unit test to verify /users endpoint""" - response = requests.get( + response = safe_requests.get( "http://127.0.0.1:5000/users/5f481bcafcedab42c8652d73", timeout=5 ) response_json = response.json() diff --git a/python3/Projects/flask_mongo/src/usage_client.py b/python3/Projects/flask_mongo/src/usage_client.py index dc066a9d..0968c0f4 100644 --- a/python3/Projects/flask_mongo/src/usage_client.py +++ b/python3/Projects/flask_mongo/src/usage_client.py @@ -1,10 +1,10 @@ #!/usr/bin/python3 -import requests +from security import safe_requests # response = requests.get('http://127.0.0.1:5000/') # print(response.json()) -response = requests.get( +response = safe_requests.get( "http://127.0.0.1:5000/users/5f481bcafcedab42c8652d73", timeout=5 ) print(response.json()) diff --git a/python3/Projects/icc_cricket-notifications.py b/python3/Projects/icc_cricket-notifications.py index 41bfb903..7b4ee5bc 100644 --- a/python3/Projects/icc_cricket-notifications.py +++ b/python3/Projects/icc_cricket-notifications.py @@ -8,8 +8,8 @@ from time import sleep import gntp.notifier -import requests from bs4 import BeautifulSoup +from security import safe_requests # register @@ -39,7 +39,7 @@ def show_message(score): url = "http://static.cricinfo.com/rss/livescores.xml" while True: - r = requests.get(url, timeout=60) + r = safe_requests.get(url, timeout=60) if r.status_code != 200: break soup = BeautifulSoup(r.text) diff --git a/python3/Projects/news_notifier.py b/python3/Projects/news_notifier.py index 9fa50302..38b901f3 100644 --- a/python3/Projects/news_notifier.py +++ b/python3/Projects/news_notifier.py @@ -6,8 +6,8 @@ from time import sleep import pynotify -import requests from scrapy.selector import HtmlXPathSelector +from security import safe_requests def sendmessage(title, message): @@ -19,9 +19,9 @@ def sendmessage(title, message): url = "http://www.thehindu.com/" while True: - r = requests.get(url, timeout=5) + r = safe_requests.get(url, timeout=5) while r.status_code != 200: - r = requests.get(url, timeout=5) + r = safe_requests.get(url, timeout=5) response = r.text xxs = HtmlXPathSelector(text=response)