概述
放假了正事不干,天天闲着ghs,顺便记录下吧 头文件祖传的实际上没用到那么多,看着加吧
import datetime
import socketserver
import time
from json import loads
from xml import etree
import requests
#禁用安全请求警告pip install requests
import re
import base64
from PIL import Image
from io import BytesIO
import urllib.request
from pip._internal.network import session
from requests.packages.urllib3.exceptions import InsecureRequestWarning
import http.cookiejar
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
from bs4 import BeautifulSoup
import requests
from lxml import etree
from urllib import parse
from urllib import request
import urllib
from urllib import request
import socket
import pdb
import datetime
import os,base64
###获取本子封面+名字
def Getbenzi():
proxies = {
'https': 'https://127.0.0.1:1080',
'http': 'http://127.0.0.1:1080'
}
# 需要加上headers, 否则报错: UnicodeDecodeError: 'utf-8' codec can't decode byte 0xa0 in position 8974: invalid start byte
headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36'
}
google_url = 'https://nhentai.net/random/'
opener = request.build_opener(request.ProxyHandler(proxies))
request.install_opener(opener)
req = request.Request(google_url, headers=headers)
response = request.urlopen(req)
# print(response.read().decode())
tree = etree.HTML(response.read().decode())
a=tree.xpath('//div[@id="cover"]/a/img/@data-src')[0]
print(a)
b=tree.xpath('//*[@id="info"]/h2/span[2]/text()')[0]
print(b)
print('over')
# 网络上图片的地址
img_url = a
# 将远程图片下载到本地,第二个参数就是要保存到本地的文件名
urllib.request.urlretrieve(img_url, 'D:/pic.jpg')
# c=getwww()###这句注释掉
# with open("D:/pic.jpg", "rb") as f: # 转为二进制格式
# base64_data = base64.b64encode(f.read()) # 使用base64进行加密
# print(base64_data)
# print(c)
return a+b
############################################################################
#########################以下可以省略##########################################################################################
####后面的getwww()可以省略 ,这是来自lcy的接口,
###用于上传图片到国内网址并且获取他在qq上的md5码,此处暂时不公开
##获取图片网址
def getwww():
session = requests.session()
files={"file":('pic.jpg',open('D://pic.jpg','rb'),'image/jpeg')}
result=session.post('https://icy???',files=files)
# result = requests.post('https://icy???', files=files) # 成功
print(result.text)
print('ok')
return getpiccode(result.text)
##获取图片md5
def getpiccode(www):
session = requests.session()
urls={
'url':www
}
request=session.post('https://icyicy???',urls)
print(request.text)
return request.text
def socketrun():
socket.setdefaulttimeout(2)
s=socket.socket()
s.connect(("localhost",9955))
print("C:input data (with 'end' for exit the program)")
goon=True
while(goon):
print ("C:-------------------------------------")
print ("C:Please input data:")
indata=input()+"n"
bbb = Getbenzi() + "n"
s.send(bbb.encode()) #must add "n"
# data=s.recv(1024).strip('n')
# if "end"!=data :
# print("C:receive result:"+data.encode())
# else:
# goon=False;
# print("C:end...")
s.close()
return
###发送群消息
def sendmsggroup():
session = requests.session()
data = {
'groupid': '???', #群号
'msg': Getbenzi(),
}
result=session.post('https://icy???',data)
print(result)
return
###发送私聊消息
def sendmsgprivate():
session = requests.session()
data = {
# 'groupid': '???',
'msg': Getbenzi(),
'time': '2021-01-15T15:25:44',
'timemethod': 'exact',
'everydaycb': 'false',
'qqid': '???', # 人的qq号
'sendto': 'priv'
}
result = session.post('https://icy???', data)
print(result)
return
最后
以上就是务实曲奇为你收集整理的【水汐のpython】 用python抓取外网的本子站并获取本子封面和信息的全部内容,希望文章能够帮你解决【水汐のpython】 用python抓取外网的本子站并获取本子封面和信息所遇到的程序开发问题。
如果觉得靠谱客网站的内容还不错,欢迎将靠谱客网站推荐给程序员好友。
本图文内容来源于网友提供,作为学习参考使用,或来自网络收集整理,版权属于原作者所有。
发表评论 取消回复