diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/__init__.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/__init__.py new file mode 100644 index 000000000..ebd689ac5 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/imdb_spider.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/imdb_spider.py new file mode 100644 index 000000000..7d94d3e20 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/.IMDB/spiders/imdb_spider.py @@ -0,0 +1,36 @@ +import scrapy +from ..items import ImdbItem + +class QuotesSpider(scrapy.Spider): + name = "imdb" + page_number = 2 + start_urls = [ + 'https://www.imdb.com/list/ls061697854' + ] + + def parse(self, response): + items = ImdbItem() + title = response.css('.lister-item-header a::text').extract() + yearReleased = response.css('.text-muted.unbold::text').extract() + rating = response.css('.ipl-rating-star.small .ipl-rating-star__rating::text').extract() + votes = response.css('.text-muted+ span:nth-child(2)::text').extract() + totalGross = response.css('.text-muted .ghost~ .text-muted+ span::text').extract() + imageURL = response.css('#main .loadlate::attr(loadlate)').extract() + genre = response.css('.genre::text').extract() + + + items['title'] = title + items['yearReleased'] = yearReleased + items['rating'] = rating + items['votes'] = votes + items['totalGross'] = totalGross + items['imageURL'] = imageURL + items['genre'] = genre + + yield items + + next_page = 'https://www.imdb.com/list/ls061697854/?page=' + str(QuotesSpider.page_number) + if QuotesSpider.page_number < 32: + QuotesSpider.page_number += 1 + yield response.follow(next_page, callback=self.parse) + diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/IMDB b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/IMDB new file mode 160000 index 000000000..017229962 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/IMDB @@ -0,0 +1 @@ +Subproject commit 017229962c69eb5ff3de8612b4a4846da47eb130 diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/README.md b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/README.md new file mode 100644 index 000000000..19727a3d8 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/README.md @@ -0,0 +1,48 @@ + +# IMDB Scraper + - Scrap ALL Hollywood Hindi Dubbed Movies + - Saved Results Into DB or CSV + - Fast af +. + +### Prerequisites + +* [Scrapy](https://scrapy.org/)- Python framework for extracting the data. +* [pymongo](https://pymongo.readthedocs.io/en/stable/#)- Module for working with MongoDB + +### Installation + + +```sh +Installing Scrapy +$ pip3 intstall scrapy +``` + + + + + +```sh +Installing pymongo +$ pip3 install pymongo +``` + + + + +# How to run the script + 1. Git clone repo + 2. Goto Project_Directory/ + 3. Open Terminal/cmd + 4. scrapy crawl imdb + + if you want output in csv add -o filename.csv to the command. + + +### Screenshot/GIF showing the sample use of the script +![scrapy cmd](imdb.gif) + +Author +---- + +Apex-Code diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/imdb.gif b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/imdb.gif new file mode 100644 index 000000000..5a3a05a11 Binary files /dev/null and b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/imdb.gif differ diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/items.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/items.py new file mode 100644 index 000000000..e54fca04d --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/items.py @@ -0,0 +1,17 @@ +# Define here the models for your scraped items +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class ImdbItem(scrapy.Item): + # define the fields for your item here like: + title = scrapy.Field() + yearReleased = scrapy.Field() + rating = scrapy.Field() + votes = scrapy.Field() + totalGross = scrapy.Field() + imageURL = scrapy.Field() + genre = scrapy.Field() diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/middlewares.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/middlewares.py new file mode 100644 index 000000000..921ddd670 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/middlewares.py @@ -0,0 +1,103 @@ +# Define here the models for your spider middleware +# +# See documentation in: +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + +# useful for handling different item types with a single interface +from itemadapter import is_item, ItemAdapter + + +class ImdbSpiderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, or item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Request or item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) + + +class ImdbDownloaderMiddleware: + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the downloader middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_request(self, request, spider): + # Called for each request that goes through the downloader + # middleware. + + # Must either: + # - return None: continue processing this request + # - or return a Response object + # - or return a Request object + # - or raise IgnoreRequest: process_exception() methods of + # installed downloader middleware will be called + return None + + def process_response(self, request, response, spider): + # Called with the response returned from the downloader. + + # Must either; + # - return a Response object + # - return a Request object + # - or raise IgnoreRequest + return response + + def process_exception(self, request, exception, spider): + # Called when a download handler or a process_request() + # (from other downloader middleware) raises an exception. + + # Must either: + # - return None: continue processing this exception + # - return a Response object: stops process_exception() chain + # - return a Request object: stops process_exception() chain + pass + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/pipelines.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/pipelines.py new file mode 100644 index 000000000..272531771 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/pipelines.py @@ -0,0 +1,25 @@ +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html + + +# useful for handling different item types with a single interface +from itemadapter import ItemAdapter +import pymongo + +class ImdbPipeline: + + def __init__(self): + self.conn = pymongo.MongoClient( + 'localhost', + 27017 + ) + + db = self.conn['IMDB'] + self.collection = db['IMDB'] + + + def process_item(self, item, spider): + self.collection.insert(dict(item)) + return item diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/scrapy.cfg b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/scrapy.cfg new file mode 100644 index 000000000..043af37d3 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.io/en/latest/deploy.html + +[settings] +default = IMDB.settings + +[deploy] +#url = http://localhost:6800/ +project = IMDB diff --git a/Scripts/Web_Scrappers/IMDB-Mass-Scraper/settings.py b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/settings.py new file mode 100644 index 000000000..48e13bc59 --- /dev/null +++ b/Scripts/Web_Scrappers/IMDB-Mass-Scraper/settings.py @@ -0,0 +1,88 @@ +# Scrapy settings for IMDB project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# https://docs.scrapy.org/en/latest/topics/settings.html +# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +# https://docs.scrapy.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'IMDB' + +SPIDER_MODULES = ['IMDB.spiders'] +NEWSPIDER_MODULE = 'IMDB.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +USER_AGENT = 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'IMDB.middlewares.ImdbSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'IMDB.middlewares.ImdbDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See https://docs.scrapy.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html +ITEM_PIPELINES = { + 'IMDB.pipelines.ImdbPipeline': 300, +} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'