diff --git a/LiveHindustan/LiveHindustan/__init__.py b/LiveHindustan/LiveHindustan/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/LiveHindustan/LiveHindustan/__init__.pyc b/LiveHindustan/LiveHindustan/__init__.pyc new file mode 100644 index 0000000..469f1d4 Binary files /dev/null and b/LiveHindustan/LiveHindustan/__init__.pyc differ diff --git a/LiveHindustan/LiveHindustan/items.py b/LiveHindustan/LiveHindustan/items.py new file mode 100644 index 0000000..b2abb44 --- /dev/null +++ b/LiveHindustan/LiveHindustan/items.py @@ -0,0 +1,14 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your scraped items +# +# See documentation in: +# http://doc.scrapy.org/en/latest/topics/items.html + +import scrapy + + +class LivehindustanItem(scrapy.Item): + # define the fields for your item here like: + # name = scrapy.Field() + pass diff --git a/LiveHindustan/LiveHindustan/middlewares.py b/LiveHindustan/LiveHindustan/middlewares.py new file mode 100644 index 0000000..99ff5d6 --- /dev/null +++ b/LiveHindustan/LiveHindustan/middlewares.py @@ -0,0 +1,56 @@ +# -*- coding: utf-8 -*- + +# Define here the models for your spider middleware +# +# See documentation in: +# http://doc.scrapy.org/en/latest/topics/spider-middleware.html + +from scrapy import signals + + +class LivehindustanSpiderMiddleware(object): + # Not all methods need to be defined. If a method is not defined, + # scrapy acts as if the spider middleware does not modify the + # passed objects. + + @classmethod + def from_crawler(cls, crawler): + # This method is used by Scrapy to create your spiders. + s = cls() + crawler.signals.connect(s.spider_opened, signal=signals.spider_opened) + return s + + def process_spider_input(self, response, spider): + # Called for each response that goes through the spider + # middleware and into the spider. + + # Should return None or raise an exception. + return None + + def process_spider_output(self, response, result, spider): + # Called with the results returned from the Spider, after + # it has processed the response. + + # Must return an iterable of Request, dict or Item objects. + for i in result: + yield i + + def process_spider_exception(self, response, exception, spider): + # Called when a spider or process_spider_input() method + # (from other spider middleware) raises an exception. + + # Should return either None or an iterable of Response, dict + # or Item objects. + pass + + def process_start_requests(self, start_requests, spider): + # Called with the start requests of the spider, and works + # similarly to the process_spider_output() method, except + # that it doesn’t have a response associated. + + # Must return only requests (not items). + for r in start_requests: + yield r + + def spider_opened(self, spider): + spider.logger.info('Spider opened: %s' % spider.name) diff --git a/LiveHindustan/LiveHindustan/pipelines.py b/LiveHindustan/LiveHindustan/pipelines.py new file mode 100644 index 0000000..92463e9 --- /dev/null +++ b/LiveHindustan/LiveHindustan/pipelines.py @@ -0,0 +1,11 @@ +# -*- coding: utf-8 -*- + +# Define your item pipelines here +# +# Don't forget to add your pipeline to the ITEM_PIPELINES setting +# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html + + +class LivehindustanPipeline(object): + def process_item(self, item, spider): + return item diff --git a/LiveHindustan/LiveHindustan/settings.py b/LiveHindustan/LiveHindustan/settings.py new file mode 100644 index 0000000..4cdc37e --- /dev/null +++ b/LiveHindustan/LiveHindustan/settings.py @@ -0,0 +1,90 @@ +# -*- coding: utf-8 -*- + +# Scrapy settings for LiveHindustan project +# +# For simplicity, this file contains only settings considered important or +# commonly used. You can find more settings consulting the documentation: +# +# http://doc.scrapy.org/en/latest/topics/settings.html +# http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +# http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html + +BOT_NAME = 'LiveHindustan' + +SPIDER_MODULES = ['LiveHindustan.spiders'] +NEWSPIDER_MODULE = 'LiveHindustan.spiders' + + +# Crawl responsibly by identifying yourself (and your website) on the user-agent +#USER_AGENT = 'LiveHindustan (+http://www.yourdomain.com)' + +# Obey robots.txt rules +ROBOTSTXT_OBEY = True + +# Configure maximum concurrent requests performed by Scrapy (default: 16) +#CONCURRENT_REQUESTS = 32 + +# Configure a delay for requests for the same website (default: 0) +# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay +# See also autothrottle settings and docs +#DOWNLOAD_DELAY = 3 +# The download delay setting will honor only one of: +#CONCURRENT_REQUESTS_PER_DOMAIN = 16 +#CONCURRENT_REQUESTS_PER_IP = 16 + +# Disable cookies (enabled by default) +#COOKIES_ENABLED = False + +# Disable Telnet Console (enabled by default) +#TELNETCONSOLE_ENABLED = False + +# Override the default request headers: +#DEFAULT_REQUEST_HEADERS = { +# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', +# 'Accept-Language': 'en', +#} + +# Enable or disable spider middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html +#SPIDER_MIDDLEWARES = { +# 'LiveHindustan.middlewares.LivehindustanSpiderMiddleware': 543, +#} + +# Enable or disable downloader middlewares +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html +#DOWNLOADER_MIDDLEWARES = { +# 'LiveHindustan.middlewares.MyCustomDownloaderMiddleware': 543, +#} + +# Enable or disable extensions +# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html +#EXTENSIONS = { +# 'scrapy.extensions.telnet.TelnetConsole': None, +#} + +# Configure item pipelines +# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html +#ITEM_PIPELINES = { +# 'LiveHindustan.pipelines.LivehindustanPipeline': 300, +#} + +# Enable and configure the AutoThrottle extension (disabled by default) +# See http://doc.scrapy.org/en/latest/topics/autothrottle.html +#AUTOTHROTTLE_ENABLED = True +# The initial download delay +#AUTOTHROTTLE_START_DELAY = 5 +# The maximum download delay to be set in case of high latencies +#AUTOTHROTTLE_MAX_DELAY = 60 +# The average number of requests Scrapy should be sending in parallel to +# each remote server +#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 +# Enable showing throttling stats for every response received: +#AUTOTHROTTLE_DEBUG = False + +# Enable and configure HTTP caching (disabled by default) +# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings +#HTTPCACHE_ENABLED = True +#HTTPCACHE_EXPIRATION_SECS = 0 +#HTTPCACHE_DIR = 'httpcache' +#HTTPCACHE_IGNORE_HTTP_CODES = [] +#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage' diff --git a/LiveHindustan/LiveHindustan/settings.pyc b/LiveHindustan/LiveHindustan/settings.pyc new file mode 100644 index 0000000..5dbb49d Binary files /dev/null and b/LiveHindustan/LiveHindustan/settings.pyc differ diff --git a/LiveHindustan/LiveHindustan/spiders/__init__.py b/LiveHindustan/LiveHindustan/spiders/__init__.py new file mode 100644 index 0000000..ebd689a --- /dev/null +++ b/LiveHindustan/LiveHindustan/spiders/__init__.py @@ -0,0 +1,4 @@ +# This package will contain the spiders of your Scrapy project +# +# Please refer to the documentation for information on how to create and manage +# your spiders. diff --git a/LiveHindustan/LiveHindustan/spiders/__init__.pyc b/LiveHindustan/LiveHindustan/spiders/__init__.pyc new file mode 100644 index 0000000..9072531 Binary files /dev/null and b/LiveHindustan/LiveHindustan/spiders/__init__.pyc differ diff --git a/LiveHindustan/LiveHindustan/spiders/news_spider.py b/LiveHindustan/LiveHindustan/spiders/news_spider.py new file mode 100644 index 0000000..aa8d40b --- /dev/null +++ b/LiveHindustan/LiveHindustan/spiders/news_spider.py @@ -0,0 +1,59 @@ +# -*- coding: utf-8 -*- + +import scrapy +from scrapy.selector import Selector + +def scrape_data(xpath_info): + item = [] + for i in xpath_info: + item.append(i) + + return item + +class NewsSpider(scrapy.Spider): + name = "live_hindustan" + + allowed_domains = ['livehindustan.com'] + + start_urls = [ + 'http://www.livehindustan.com/national/news-1', + 'http://www.livehindustan.com/international/news-1', + 'http://www.livehindustan.com/sports/news-1', + 'http://www.livehindustan.com/business/news-1', + 'http://www.livehindustan.com/cricket/news-1', + 'http://www.livehindustan.com/entertainment/news-1', + 'http://www.livehindustan.com/gadgets/news-1', + 'http://www.livehindustan.com/lifestyle/news-1' + ] + + def parse(self, response): + news_titles = scrape_data(Selector(response).xpath('//div[@class="upper-first "]/h4/a/text()').extract()) + + news_urls = scrape_data(Selector(response).xpath('//div[@class="upper-first "]/h4/a/@href').extract()) + + image_urls = scrape_data(Selector(response).xpath('//div[@class="upper-first "]/a/img/@src').extract()) + + news_summary = scrape_data(Selector(response).xpath('//div[@class="upper-first "]/div/p/text()').extract()) + + news_date_time = scrape_data(Selector(response).xpath('//div[@class="list-time-tags tags-list"]/span/text()[not(ancestor::*[@class="list-tags"])]').extract()) + date_time_list = [] + for i in news_date_time: + if i != ' ': + for hm,em in zip(hindi_month,english_month): + i = i.replace(hm.decode('UTF-8'),em) + + date_time_list.append(i) + + # The below code just prints the news + # It can be processed to do something like inserting it in a database etc. + for i,j,k,l,m in zip(news_titles,news_urls,image_urls,news_summary,date_time_list): + print "News title : " + i + print "News link : " + 'http://www.livehindustan.com' + j + print "News image url : " + k + print "News summary : " + l + print "News date & time : " + m + print "\n" + + next_page = 'http://www.livehindustan.com' + response.xpath('//ul[@class="pagination"]/li/a/@href').extract()[-1] + if next_page is not None: + yield response.follow(next_page, callback=self.parse) diff --git a/LiveHindustan/LiveHindustan/spiders/news_spider.pyc b/LiveHindustan/LiveHindustan/spiders/news_spider.pyc new file mode 100644 index 0000000..17a6da3 Binary files /dev/null and b/LiveHindustan/LiveHindustan/spiders/news_spider.pyc differ diff --git a/LiveHindustan/README.md b/LiveHindustan/README.md new file mode 100644 index 0000000..578ff28 --- /dev/null +++ b/LiveHindustan/README.md @@ -0,0 +1,16 @@ +# livehindustan.com crawler + +### Steps to run the crawler +*Inside the root directory of the folder run the following commands in sequence* +``` +virtualenv -p python2 venv +``` +``` +source venv/bin/activate +``` +``` +pip install -r requirements.txt +``` +``` +scrapy crawl live_hindustan +``` diff --git a/LiveHindustan/requirements.txt b/LiveHindustan/requirements.txt new file mode 100644 index 0000000..8adbdda --- /dev/null +++ b/LiveHindustan/requirements.txt @@ -0,0 +1,26 @@ +asn1crypto==0.23.0 +attrs==17.2.0 +Automat==0.6.0 +cffi==1.11.2 +constantly==15.1.0 +cryptography==2.0.3 +cssselect==1.0.1 +enum34==1.1.6 +hyperlink==17.3.1 +idna==2.6 +incremental==17.5.0 +ipaddress==1.0.18 +lxml==4.0.0 +parsel==1.2.0 +pyasn1==0.3.7 +pyasn1-modules==0.1.4 +pycparser==2.18 +PyDispatcher==2.0.5 +pyOpenSSL==17.3.0 +queuelib==1.4.2 +Scrapy==1.4.0 +service-identity==17.0.0 +six==1.11.0 +Twisted==17.9.0 +w3lib==1.18.0 +zope.interface==4.4.3 diff --git a/LiveHindustan/scrapy.cfg b/LiveHindustan/scrapy.cfg new file mode 100644 index 0000000..b5f9cd0 --- /dev/null +++ b/LiveHindustan/scrapy.cfg @@ -0,0 +1,11 @@ +# Automatically created by: scrapy startproject +# +# For more information about the [deploy] section see: +# https://scrapyd.readthedocs.org/en/latest/deploy.html + +[settings] +default = LiveHindustan.settings + +[deploy] +#url = http://localhost:6800/ +project = LiveHindustan