diff --git a/Excel_Automation_Python/main.py b/Excel_Automation_Python/main.py new file mode 100644 index 0000000..00487d0 --- /dev/null +++ b/Excel_Automation_Python/main.py @@ -0,0 +1,56 @@ +from openpyxl import Workbook, load_workbook +from openpyxl.utils import get_column_letter +from openpyxl.styles import Font + +data = { + "James": { + "English": 65, + "Physics": 78, + "Computer": 98, + "History": 89 + }, + "Rhea": { + "English": 55, + "Physics": 77, + "Computer": 87, + "History": 95 + }, + "Harsh": { + "English": 100, + "Physics": 45, + "Computer": 75, + "History": 92 + }, + "Suman": { + "English": 30, + "Physics": 25, + "Computer": 45, + "History": 100 + }, + "Ryan": { + "English": 90, + "Physics": 100, + "Computer": 92, + "History": 60 + } +} + +wb = Workbook() +ws = wb.active +ws.title = "Student Marks" + +headings = ['Name'] + list(data['James'].keys()) +ws.append(headings) + +for person in data: + marks = list(data[person].values()) + ws.append([person] + marks) + +for col in range(2, len(data['James']) + 2): + char = get_column_letter(col) + ws[char + "7"] = f"=SUM({char + '2'}:{char + '6'})/{len(data)}" + +for col in range(1, 6): + ws[get_column_letter(col) + '1'].font = Font(bold=True, color="0099CCFF") + +wb.save("StudentMarks.xlsx") diff --git a/Web Scrapper Bot/requirements.txt b/Web Scrapper Bot/requirements.txt new file mode 100644 index 0000000..218ead7 Binary files /dev/null and b/Web Scrapper Bot/requirements.txt differ diff --git a/Web Scrapper Bot/web_scrapping_bot.py b/Web Scrapper Bot/web_scrapping_bot.py new file mode 100644 index 0000000..1413664 --- /dev/null +++ b/Web Scrapper Bot/web_scrapping_bot.py @@ -0,0 +1,37 @@ +# importing the libraries and modules required +import requests +from bs4 import BeautifulSoup +from datetime import datetime +import time + +while(True): + now = datetime.now() + + # time of web-scrapping + current_time = now.strftime("%H:%M:%S") + print(f'At time : {current_time} IST') + + response = requests.get('https://coinmarketcap.com/') + text = response.text + html_data = BeautifulSoup(text, 'html.parser') + headings = html_data.find_all('tr')[0] + headings_list = [] + for x in headings: + headings_list.append(x.text) + headings_list = headings_list[:10] + + data = [] + + for x in range(1, 6): + row = html_data.find_all('tr')[x] + column_value = row.find_all('td') + dict = {} + + for i in range(10): + dict[headings_list[i]] = column_value[i].text + data.append(dict) + + for values in data: + print(values) + print('') + time.sleep(600)