EDIT:在这两个版本中,我都添加了保存为 CSV 文件的功能。
如果你有Selenium
and requests
那么有三种可能
- use
Selenium
登录并获取页面。
- use
requests.Session
登录并获取页面。
- use
Selenium
登录时,从 Selenium 获取会话信息并在中使用它们requests
Using Selenium
登录和获取页面要简单得多,但速度较慢requests
只需要使用
-
browser.get(url)
代替r = session.get(post_url)
-
BeautifulSoup(browser.page_source, ...)
代替BeautifulSoup(r.text, ...)
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from bs4 import BeautifulSoup
import time
import csv
#--| Setup
options = Options()
#options.add_argument("--headless")
#options.add_argument("--window-size=1980,1020")
#options.add_argument('--disable-gpu')
browser = webdriver.Chrome(executable_path=r'C:\chrome\chromedriver.exe', options=options)
#browser = webdriver.Firefox()
# --- login ---
browser.get("https://login.wordpress.org/?locale=en_US")
time.sleep(2)
user_name = browser.find_element_by_css_selector('#user_login')
user_name.send_keys("my_login")
password = browser.find_element_by_css_selector('#user_pass')
password.send_keys("my_password")
#time.sleep(5)
submit = browser.find_elements_by_css_selector('#wp-submit')[0]
submit.click()
# Example send page source to BeautifulSoup or selenium for parse
soup = BeautifulSoup(browser.page_source, 'lxml')
use_bs4 = soup.find('title')
print(use_bs4.text)
#print('*' * 25)
#use_sel = browser.find_elements_by_css_selector('div > div._1vC4OE')
#print(use_sel[0].text)
# --- pages ---
data = []
url = 'https://wordpress.org/support/plugin/advanced-gutenberg/page/{}/'
for page in range(1, 3):
print('\n--- PAGE:', page, '---\n')
# read page with list of posts
browser.get(url.format(page))
soup = BeautifulSoup(browser.page_source, 'html.parser') # 'lxml'
all_uls = soup.find('li', class_="bbp-body").find_all('ul')
for number, ul in enumerate(all_uls, 1):
print('\n--- post:', number, '---\n')
a = ul.find('a')
if a:
post_url = a['href']
post_title = a.text
print('href:', post_url)
print('text:', post_title)
print('---------')
# read page with post content
browser.get(post_url)
sub_soup = BeautifulSoup(browser.page_source, 'html.parser')
post_content = sub_soup.find(class_='bbp-topic-content').get_text(strip=True, separator='\n')
print(post_content)
# keep on list as dictionary
data.append({
'href': post_url,
'text': post_title,
'content': post_content,
})
# --- save ---
with open("wp-forum-conversations.csv", "w") as f:
writer = csv.DictWriter(f, ["text", "href", "content"])
writer.writeheader()
writer.writerows(data) # all rows at once
EDIT:
requests
工作速度更快,但需要更多工作DevTools
in Firefox
/Chrome
查看表单中的所有字段以及它发送到服务器的其他值。当日志记录正确时,它还需要查看重定向到哪里。顺便说一句:使用前不要忘记关闭 JavaScriptDevTools
因为requests
不运行 JavaScript,页面可能会以表单形式发送不同的值。 (它确实发送了不同的字段)
需要完整的User-Agent
才能正常工作。
首先,我加载登录页面并复制其中的所有值<input>
发送给他们login
and password
登录后,我检查它是否被重定向到不同的页面 - 以确认它已正确记录。您还可以检查页面是否显示您的名字。
import requests
from bs4 import BeautifulSoup
import csv
s = requests.Session()
s.headers.update({
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:80.0) Gecko/20100101 Firefox/80.0' # it needs full user-agent
})
# --- get page with login form ---
r = s.get("https://login.wordpress.org/?locale=en_US")
soup = BeautifulSoup(r.text, 'html.parser')
# get all fields in form
payload = {}
for field in soup.find_all('input'):
name = field['name']
value = field['value']
payload[name] = value
print(name, '=', value)
# --- login ---
payload['log'] = 'my_login'
payload['pwd'] = 'my_password'
r = s.post('https://login.wordpress.org/wp-login.php', data=payload)
print('redirected to:', r.url)
# --- check if logged in ---
# check if logged in - check if redirected to different page
if r.url.startswith('https://login.wordpress.org/wp-login.php'):
print('Problem to login')
exit()
# check if logged in - check displayed name
url = 'https://wordpress.org/support/plugin/advanced-gutenberg/page/1/'
r = s.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
name = soup.find('span', {'class': 'display-name'})
if not name:
print('Problem to login')
exit()
else:
print('name:', name.text)
# --- pages ---
data = []
url = 'https://wordpress.org/support/plugin/advanced-gutenberg/page/{}/'
for page in range(1, 3):
print('\n--- PAGE:', page, '---\n')
# read page with list of posts
r = s.get(url.format(page))
soup = BeautifulSoup(r.text, 'html.parser') # 'lxml'
all_uls = soup.find('li', class_="bbp-body").find_all('ul')
for number, ul in enumerate(all_uls, 1):
print('\n--- post:', number, '---\n')
a = ul.find('a')
if a:
post_url = a['href']
post_title = a.text
print('href:', post_url)
print('text:', post_title)
print('---------')
# read page with post content
r = s.get(post_url)
sub_soup = BeautifulSoup(r.text, 'html.parser')
post_content = sub_soup.find(class_='bbp-topic-content').get_text(strip=True, separator='\n')
print(post_content)
# keep on list as dictionary
data.append({
'href': post_url,
'text': post_title,
'content': post_content,
})
# --- save ---
with open("wp-forum-conversations.csv", "w") as f:
writer = csv.DictWriter(f, ["text", "href", "content"])
writer.writeheader()
writer.writerows(data) # all rows at once