当前位置: 首页 > news >正文

重拾Scrapy框架

基于Scrapy框架实现 舔狗语录+百度翻译 输出结果到txt文档
爬虫脚本

from typing import Iterable, Any, AsyncIteratorimport scrapy
import json
from post.items import PostItemclass BaidufanyiSpider(scrapy.Spider):name = "baidufanyi"allowed_domains = ["fanyi.baidu.com", "api.oick.cn"]start_urls = ["https://api.oick.cn/api/dog"]headers = {"Content-Type": "application/json",}def __init__(self):super().__init__()def parse_post(self, response):word = response.meta["word"]for line in response.body.decode("utf-8").split("\n"):try:new_data = json.loads(line.lstrip("data: "))event = new_data["data"]["event"]if event == "Translating":pi = PostItem()pi["org"] = new_data["data"]["list"][0]["src"]pi["res"] = new_data["data"]["list"][0]["dst"]if word == new_data["data"]["list"][0]["src"]:yield piexcept Exception as e:continuedef parse(self, response):yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail)def parse_detail(self, response):word = response.text.strip('"')url = "https://fanyi.baidu.com/ait/text/translate"data = {"query": f"{word}","from": "zh","to": "en","needPhonetic": True,}yield scrapy.FormRequest(url=url, method="POST", headers=self.headers, body=json.dumps(data),callback=self.parse_post, meta={"word": word})yield scrapy.Request(url=self.start_urls[0], callback=self.parse_detail,dont_filter=True)

items.py

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.htmlimport scrapyclass PostItem(scrapy.Item):# define the fields for yo在这里插入代码片ur item here like:# name = scrapy.Field()org = scrapy.Field()res = scrapy.Field()

piplines.py

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html# useful for handling different item types with a single interface
from itemadapter import ItemAdapter
from datetime import datetimeclass PostPipeline:def open_spider(self, spider):self.f = open('result.txt', 'a', encoding='utf-8')def process_item(self, item, spider):new_item = f'{item["org"]}\t{item["res"]}\t{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}\n'self.f.write(new_item)self.f.flush()return itemdef close_spider(self, spider):self.f.close()

settings.py

# Scrapy settings for post project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.htmlBOT_NAME = "post"SPIDER_MODULES = ["post.spiders"]
NEWSPIDER_MODULE = "post.spiders"ADDONS = {}# Crawl responsibly by identifying yourself (and your website) on the user-agent
# USER_AGENT = "post (+http://www.yourdomain.com)"# Obey robots.txt rules
# ROBOTSTXT_OBEY = True# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 60 / 120
RANDOMIZE_DOWNLOAD_DELAY = True
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)
# COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
#    "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
#    "Accept-Language": "en",
# }# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    "post.middlewares.PostSpiderMiddleware": 543,
# }# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    "post.middlewares.PostDownloaderMiddleware": 543,
# }# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    "scrapy.extensions.telnet.TelnetConsole": None,
# }# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {"post.pipelines.PostPipeline": 300,
}# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = "httpcache"
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"# Set settings whose default value is deprecated to a future-proof value
FEED_EXPORT_ENCODING = "utf-8"
http://www.xdnf.cn/news/733501.html

相关文章:

  • Clish中xml文件配置的使用方法
  • Spring Cloud Alibaba 学习 —— 简单了解常用技术栈
  • 【专题】神经网络期末复习资料(题库)
  • 二、Python提供了丰富的内置工具,无需额外安装即可使用
  • 6个月Python学习计划 Day 9 - 函数进阶用法
  • 【ROS2实体机械臂驱动】rokae xCoreSDK Python测试使用
  • 单卡4090部署Qwen3-32B-AWQ(4bit量化)-vllm
  • 网易 - 灵犀办公文档
  • const ‘不可变’到底是值不变还是地址不变
  • Python使用
  • C 语言中 * count++ 引发的错误与正确指针操作解析
  • Lyra学习笔记2 GFA_AddComponents与ULyraPlayerSpawningManagerComponent
  • 玛哈特校平机实战指南:选型、应用痛点
  • 【Unity】模型渐变技术 BlendShapes变形
  • 深入理解复数加法与乘法:MATLAB演示
  • 云原生微服务架构演进之路:理念、挑战与实践
  • Hive的数据倾斜是什么?
  • 杨传辉:构建 Data × AI 能力,打造 AI 时代的一体化数据底座|OceanBase 开发者大会实录
  • Armv7l或树莓派32位RPI 4B编译faiss
  • @Pushgateway自定义脚本推送数据
  • C++继承权限与访问控制详解
  • 解决win自动重启(自用,留链接)
  • Express教程【002】:Express监听GET和POST请求
  • 基于CAPL的DDS子消息解析- Data
  • golang 基于redis实现集群中的主实例选举
  • Nginx网站服务:从入门到LNMP架构实战
  • 生动形象理解CNN
  • 文件雕刻——一种碎片文件的恢复方法
  • 为什么建立 TCP 连接时,初始序列号不固定?
  • 日志技术-LogBack、Logback快速入门、Logback配置文件、Logback日志级别