Запуск Scrapy из сценария на основе каналов Django

#python #django #scrapy #django-channels

Вопрос:

Я хотел добавить асинхронное поведение при удалении отдельных элементов из URL-адреса с помощью паука-скребка. Имея каналы Django, отлично работающие для других проектов, я подумал, что мог бы использовать их вместо аналогичного сервера scrapyd . Хотя scrapy делает все, что ожидалось; -генерирует запрос,- очищает его содержимое,- сохраняет его в БД и т. Д., Он генерирует ошибку, которая приводит к сбою сокета каналов Django перед отправкой чего-либо обратно клиенту. Вот сообщение об ошибке, которое я получаю внутри своего терминала:

     Web_1     | 2021-07-05 22:24:32 [scrapy.extensions.logstats] INFO: Crawled 0 pages (at 0 pages/min), scraped 0 items (at 0 items/min)
    web_1     | 2021-07-05 22:24:32 [scrapy.extensions.telnet] INFO: Telnet console listening on 127.0.0.1:6023
    web_1     | 2021-07-05 22:24:32 [aioredis] DEBUG: Cancelling waiter (<Future cancelled>, [None, None])
    web_1     | 2021-07-05 22:24:32 [aioredis] DEBUG: Waiter future is already done <Future cancelled>
    web_1     | 2021-07-05 22:24:33 [daphne.server] ERROR: Exception inside application: Task got bad yield: <Deferred at 0x7fddc8564f10>
    web_1     | Traceback (most recent call last):
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/staticfiles.py", line 44, in __call__
    web_1     |     return await self.application(scope, receive, send)
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/routing.py", line 71, in __call__
    web_1     |     return await application(scope, receive, send)
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/routing.py", line 150, in __call__
    web_1     |     return await application(
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/consumer.py", line 94, in app
    web_1     |     return await consumer(scope, receive, send)
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/consumer.py", line 58, in __call__
    web_1     |     await await_many_dispatch(
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/utils.py", line 51, in await_many_dispatch
    web_1     |     await dispatch(result)
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/consumer.py", line 73, in dispatch
    web_1     |     await handler(message)
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/generic/websocket.py", line 196, in websocket_receive
    web_1     |     await self.receive(text_data=message["text"])
    web_1     |   File "/usr/local/lib/python3.8/site-packages/channels/generic/websocket.py", line 259, in receive
    web_1     |     await self.receive_json(await self.decode_json(text_data), **kwargs)
    web_1     |   File "/usr/src/app/scraper/consumers.py", line 61, in receive_json
    web_1     |     await runner.crawl(XSpider, start_urls=[url])
    web_1     | RuntimeError: Task got bad yield: <Deferred at 0x7fddc8564f10>
    web_1     | 2021-07-05 22:24:33 [daphne.ws_protocol] DEBUG: WebSocket closed for ['172.18.0.1', 38220]
 

А вот мои файлы Django и scrapy:

 from channels.generic.websocket import WebsocketConsumer
from channels.generic.websocket import JsonWebsocketConsumer, AsyncJsonWebsocketConsumer
from channels.exceptions import StopConsumer
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import async_to_sync
from scrapy import Spider
from scrapy import Request
from scrapy.utils.project import get_project_settings
from scrapy.crawler import CrawlerRunner
from scrapy.http import HtmlResponse
from scraper.src.app.spiders.Spiders import XSpider
from scrapy.utils.log import configure_logging
from scrapy.settings import Settings
from twisted.internet import reactor



from scraper.src.app import settings as my_settings

crawler_settings = Settings()
crawler_settings.setmodule(my_settings)

configure_logging({'LOG_FORMAT': '%(levelname)s: %(message)s'})


# print(crawler_settings, 'scrapy project settings', 42, crawler_settings.__dict__['attributes']['ITEM_PIPELINES'])

class NewProductConsumer(AsyncJsonWebsocketConsumer):
    async def connect(self):
        await self.accept()

    async def receive_json(self, content):
        print(content, 41)
        url = content.get('url') 
        runner = CrawlerRunner(crawler_settings)
        await runner.crawl(XSpider, start_urls=[url])
        print('scrape finished')
        d = runner.join()
        await d.addBoth(lambda _: reactor.stop())
        await reactor.run() # the script will block here until all crawling jobs are finished
        self.send_json({'result': {}})
        
    def disconnect(self, close_code):
        self.close()
        # raise StopConsumer()
 

I tried to use many classes of channel consumers but I get a similar result. My guess would be there is some interference between Django channels and scrapy at the requirements level since they both use async functionality.