Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions Air pollution prediction/CodeAP.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import requests
import matplotlib.pyplot as plt
from security import safe_requests

city = input("Enter you city : ")
url = 'http://api.waqi.info/feed/' + city + '/?token='
api_key = input("Enter your API key: ")

main_url = url + api_key
r = requests.get(main_url)
r = safe_requests.get(main_url)
data = r.json()['data']
aqi = data['aqi']
iaqi = data['iaqi']
Expand Down
122 changes: 61 additions & 61 deletions All_GitHub_Repos/all_github_repos.py
Original file line number Diff line number Diff line change
@@ -1,61 +1,61 @@
#!/usr/bin/env python3
# to convert it into a script by running sudo chmod +x all_github_repos.py
import requests
import sys
from github import Github
# imports
# pip3/pip install PyGithub is installed to work with the contents of the Github repositories
username = sys.argv[1]
# reading the username as a commandline argument
url = f"https://api.github.com/users/{username}"
user_data = requests.get(url).json()
# to retrieve data contained in the url in json format
def repository_names(user):
repo_names = []
for repo in user.get_repos():
repo_names.append(repo)
return repo_names
# fetching the names of all the repositories
def repository_details(user):
all_repo_details = []
repo_names = repository_names(user)
for repo in repo_names:
repo_details = {}
repo_details["Name"] = repo.full_name.split("/")[1]
repo_details["Description"] = repo.description
repo_details["Created on"] = repo.created_at
repo_details["Programming language"] = repo.language
repo_details["Forked"] = str(repo.forks) + " time(s)"
all_repo_details.append(repo_details)
return all_repo_details
# fetching the details of all the repositories
user = Github().get_user(username)
RD = repository_details(user)
# fetching the details of all repositories
# stored as a list of dictionaries
if __name__ == "__main__":
for content in RD:
# pprint.pprint(content)
for title, description in content.items():
print(title, ":", description)
print(
"\n-------------------------------------------------------------------------------------------------------------------\n"
)
#!/usr/bin/env python3

# to convert it into a script by running sudo chmod +x all_github_repos.py

import sys
from github import Github
from security import safe_requests

# imports
# pip3/pip install PyGithub is installed to work with the contents of the Github repositories

username = sys.argv[1]
# reading the username as a commandline argument

url = f"https://api.github.com/users/{username}"

user_data = safe_requests.get(url).json()
# to retrieve data contained in the url in json format


def repository_names(user):
repo_names = []
for repo in user.get_repos():
repo_names.append(repo)
return repo_names


# fetching the names of all the repositories


def repository_details(user):
all_repo_details = []
repo_names = repository_names(user)
for repo in repo_names:
repo_details = {}
repo_details["Name"] = repo.full_name.split("/")[1]
repo_details["Description"] = repo.description
repo_details["Created on"] = repo.created_at
repo_details["Programming language"] = repo.language
repo_details["Forked"] = str(repo.forks) + " time(s)"
all_repo_details.append(repo_details)
return all_repo_details


# fetching the details of all the repositories


user = Github().get_user(username)

RD = repository_details(user)
# fetching the details of all repositories
# stored as a list of dictionaries

if __name__ == "__main__":
for content in RD:
# pprint.pprint(content)
for title, description in content.items():
print(title, ":", description)
print(
"\n-------------------------------------------------------------------------------------------------------------------\n"
)
4 changes: 2 additions & 2 deletions Amazon Best Sellers Scraper/script.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
import requests
from bs4 import BeautifulSoup
from security import safe_requests


def scrape_amazon_bestsellers(category_url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"}
response = requests.get(category_url, headers=headers)
response = safe_requests.get(category_url, headers=headers)

if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
Expand Down
4 changes: 2 additions & 2 deletions Amazon Price Tracker/script.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,15 @@
import requests
from bs4 import BeautifulSoup
import time
import smtplib
from email.mime.text import MIMEText
from security import safe_requests


def get_amazon_product_price(product_url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
response = requests.get(product_url, headers=headers)
response = safe_requests.get(product_url, headers=headers)

if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
Expand Down
4 changes: 2 additions & 2 deletions Amazon Wishlist Notifier/script.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import requests
from bs4 import BeautifulSoup
import time
import smtplib
from email.mime.text import MIMEText
from security import safe_requests

# Replace with your own email and password
SENDER_EMAIL = 'your_sender_email@gmail.com'
Expand All @@ -17,7 +17,7 @@ def get_wishlist_items():
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"
}
response = requests.get(WISHLIST_URL, headers=headers)
response = safe_requests.get(WISHLIST_URL, headers=headers)

if response.status_code == 200:
soup = BeautifulSoup(response.content, 'html.parser')
Expand Down
4 changes: 2 additions & 2 deletions Amazon-Price-Tracker/amazonprice.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
import requests
from bs4 import BeautifulSoup
import time
import smtplib
import pywhatkit
import datetime
from security import safe_requests

# header = {
# "
Expand All @@ -20,7 +20,7 @@
headers = {"User-Agent": f'{user_agent}'}
Url = input("Drop the Url of product you wish to buy...!\n")

page = requests.get(Url, headers=headers)
page = safe_requests.get(Url, headers=headers)
soup = BeautifulSoup(page.content, "html.parser")

# print(soup)
Expand Down
10 changes: 5 additions & 5 deletions Amazon_product_scraper/products.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import requests
from bs4 import BeautifulSoup
from security import safe_requests


class Product:
Expand All @@ -15,7 +15,7 @@ def get_product(self):
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
}
r = requests.get(url, headers=headers)
r = safe_requests.get(url, headers=headers)
soup = BeautifulSoup(r.content, "html.parser")
product = soup.find("div", {"class": "s-product-image-container"})
product_link = product.find(
Expand Down Expand Up @@ -55,7 +55,7 @@ def get_product_details(self):
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
}
r = requests.get(product_link, headers=headers)
r = safe_requests.get(product_link, headers=headers)
soup = BeautifulSoup(r.content, "html.parser")
product_name = soup.find(
"span", {"id": "productTitle"}).text.strip()
Expand Down Expand Up @@ -104,7 +104,7 @@ def get_product_image(self):
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
}
r = requests.get(product_link, headers=headers)
r = safe_requests.get(product_link, headers=headers)
soup = BeautifulSoup(r.content, "html.parser")
product_image = soup.find(
"img", {"class": "a-dynamic-image a-stretch-horizontal"}
Expand Down Expand Up @@ -144,7 +144,7 @@ def customer_review(self):
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \
(KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36"
}
r = requests.get(product_link, headers=headers)
r = safe_requests.get(product_link, headers=headers)
soup = BeautifulSoup(r.content, "html.parser")

review_elements = soup.find_all("div", {"data-hook": "review"})
Expand Down
4 changes: 2 additions & 2 deletions AskUbuntu-Scraper/questions.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from bs4 import BeautifulSoup
import requests
import json
from security import safe_requests


class AskUbuntu:
Expand Down Expand Up @@ -34,7 +34,7 @@ def getQuestions(self):
"""
url = "https://askubuntu.com/questions/tagged/" + self.topic
try:
res = requests.get(url)
res = safe_requests.get(url)
soup = BeautifulSoup(res.text, "html.parser")

questions_data = {"questions": []}
Expand Down
3 changes: 2 additions & 1 deletion Automated_scraper.py/script.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,13 @@
import requests
from bs4 import BeautifulSoup
import time
from security import safe_requests


def display_content(url, selector):
try:
# Send a GET request to the URL
response = requests.get(url)
response = safe_requests.get(url)

# Check if the request was successful
if response.status_code == 200:
Expand Down
4 changes: 2 additions & 2 deletions BITCOIN-price-tracker/tracker.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,14 @@
import requests
from bs4 import BeautifulSoup
import time
from security import safe_requests

# create a function to get price of cryptocurrency


def get_latest_crypto_price(coin):
url = 'https://www.google.com/search?q=' + (coin) + 'price'
# make a request to the website
HTML = requests.get(url)
HTML = safe_requests.get(url)
# Parsse the HTML
soup = BeautifulSoup(HTML.text, 'html.parser')
# find the current price
Expand Down
6 changes: 3 additions & 3 deletions Book_Scraper/book.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# for scraping books
from bs4 import BeautifulSoup as bs
import requests
# to identify emoji unicode characters
import emoji
import pyfiglet
import itertools
import threading
import time
import sys
from security import safe_requests


def is_emoji(text):
Expand All @@ -17,7 +17,7 @@ def is_emoji(text):

def link_to_get(link):
"""This function will get the url of the image & book download direct link using the given link for book download"""
response = requests.get(link)
response = safe_requests.get(link)
th_html = bs(response.text, "html.parser")
td_all = th_html.find_all("td", id="info")
td_all = td_all[0]
Expand Down Expand Up @@ -60,7 +60,7 @@ def book_get(name, mainres=100, results=5):
# getting request and response
url = f"http://libgen.is/search.php?req={name}&lg_topic=libgen&open=0&view=simple&res={mainres}&phrase=1&column=def"
# print(url)
response = requests.get(url)
response = safe_requests.get(url)
bs_html = bs(response.text, "html.parser")

if "Search string must contain minimum 3 characters.." in bs_html.body:
Expand Down
6 changes: 3 additions & 3 deletions CNN Scraper/cnn.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
from bs4 import BeautifulSoup
import requests
from security import safe_requests


class NewsCNN:
Expand Down Expand Up @@ -36,7 +36,7 @@ def news_by_location(self, country: str):
obj_keys = ["news", "link"]
location = country.lower()
URL = f"https://edition.cnn.com/world/{location}"
page = requests.get(URL)
page = safe_requests.get(URL)
parse = BeautifulSoup(page.content, "html.parser")
heads = parse.find_all("span", attrs={"data-editable": "headline"})
links1 = parse.find_all(
Expand Down Expand Up @@ -95,7 +95,7 @@ def news_by_category(self, type: str):
sol = []
type = type.lower()
url = f"https://edition.cnn.com/{type}"
page = requests.get(url, headers=self.headers)
page = safe_requests.get(url, headers=self.headers)
parse = BeautifulSoup(page.content, "html.parser")
articles = parse.find_all(
"a", {"class": "container__link container_lead-plus-headlines__link"}
Expand Down
4 changes: 2 additions & 2 deletions Covid-19_Real-time_Notification/Covid.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
from plyer import notification
import requests
from bs4 import BeautifulSoup
import time
from englisttohindi.englisttohindi import EngtoHindi
from security import safe_requests


def notify_user(title, message):
Expand All @@ -14,7 +14,7 @@ def notify_user(title, message):


def getInfo(url):
r = requests.get(url)
r = safe_requests.get(url)
return r.text


Expand Down
3 changes: 2 additions & 1 deletion Crypocurrency-Converter-GUI/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
from guiVersion import Ui_guiVersion
import json
import requests
from security import safe_requests


class guiVersion(QMainWindow, Ui_guiVersion):
Expand Down Expand Up @@ -56,7 +57,7 @@ def currencies2(self, item2):
def api(self, cur1, cur2):
api_link = "https://min-api.cryptocompare.com/data/pricemulti?fsyms={}&tsyms={}".format(
cur1, cur2)
resp = requests.get(api_link)
resp = safe_requests.get(api_link)
# print(r.status_code)
data = json.loads(resp.content)
# print(data)
Expand Down
Loading