1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23
|
# -*- coding: utf-8 -*-
from scrapy.contrib.spiders import CSVFeedSpider
from $project_name.items import ${ProjectName}Item
class $classname(CSVFeedSpider):
name = '$name'
allowed_domains = ['$domain']
start_urls = ['http://www.$domain/feed.csv']
# headers = ['id', 'name', 'description', 'image_link']
# delimiter = '\t'
# Do any adaptations you need here
#def adapt_response(self, response):
# return response
def parse_row(self, response, row):
i = ${ProjectName}Item()
#i['url'] = row['url']
#i['name'] = row['name']
#i['description'] = row['description']
return i
|