Apache HttpClient 不会使用分页链接获取页面内容。我获得 200 状态但 html 没有内容
Apache HttpClient doesn't get content of pages using pagination links. I get 200 status but html has no content
我正在尝试使用 Apache HttpClient 对内容页面进行网络爬网。使用分页中的 link 请求下一页时,我得到状态 200,但 HTML 在正文中显示 500,没有内容。即使在分页中使用 links 时,Postman 也能正常工作并获取内容。
主要class
public static void main(String[] args) {
String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
MyContentFetcher myContentFetcher = new MyContentFetcher();
MyParser myParser = new MyParser();
try {
// Load Topic list page
String topicsPage = myContentFetcher.loadHTML(url);
// Getting all the topics.
Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);
// Print all the topics and ask user to choose one
for (int id : topics.keySet())
System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
System.out.println("********************");
System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String id = reader.readLine();
// Validate user input, get the link and topic and cout the choice.
if (isNumber(id)) {
int idNum = Integer.parseInt(id);
if (idNum <= topics.size() && idNum > 0) {
String topic = topics.get(idNum).getTopic();
String link = topics.get(idNum).getLink();
System.out.println("You picked: " + topic + link + "\n***************************");
// Loading first page of reviews
myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
// Getting links to other pages
Queue<String> paginationLinks = myParser.getLinks();
// --------------> WORKS FINE UNTIL HERE <--------------
// Problem starts here....
// Load list of reviews for chosen topic
while(!paginationLinks.isEmpty()) {
String page = myContentFetcher.loadHTML(paginationLinks.remove());
myParser.loadReviews(page, topic);
}
}
}
System.out.println("Exiting...");
} catch (IOException e) {
System.out.println("There was a problem...");
}
!!!!这是获取 HTML 的 class。我可能在这里做错了......
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import java.io.IOException;
import java.util.Scanner;
public class MyContentFetcher {
public MyContentFetcher() {
}
String loadHTML(String url) throws IOException {
// Create configurations for
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.setCookieSpec(CookieSpecs.STANDARD)
.build();
// Creating a HttpClient object
CloseableHttpClient httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.build();
// Creating a HttpGet object
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
Scanner sc = new Scanner(httpResponse.getEntity().getContent());
StringBuilder page = new StringBuilder("");
while(sc.hasNext())
page.append(sc.nextLine()).append(" ");
httpResponse.close();
httpClient.close();
return page.toString();
}
}
这是解析器。解析器没有任何问题(解析得很好并且根据需要)
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
public class MyParser {
private Map<String, String> topics;
private Document htmlPage;
private Element reviewBlock;
public MyParser(){}
// Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
public Map<Integer, MyNode> getTopicList(String page) {
Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
htmlPage = Jsoup.parse(page);
// Get 'a' element that is inside 'li' with a class name of browse-by-list-item
int i = 1;
MyNode info;
for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
info = new MyNode(element.select("button").text(),
element.select("a").attr("href").trim());
topics.put(i, info);
i++;
}
return topics;
}
// Loads Reviews
public void loadReviews(String page, String topic) throws IOException {
htmlPage = Jsoup.parse(page);
// Get all review blocks
System.out.println("**************************\n" + page + "\n**************************\n");
for(Element element : htmlPage.body().select(".search-results-item-body")){
reviewBlock = element;
String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
System.out.println(review);
}
}
Queue<String> getLinks(){
System.out.println("GETTING LINKS");
Queue<String> links = new LinkedList<>();
for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
links.add(element.attr("href"));
}
return links;
}
private String getLink(){
return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
}
public String getTitle(){
return reviewBlock.selectFirst("a").text();
}
public String getAuthor(){
return reviewBlock.selectFirst("div.search-result-authors").text();
}
public String getDate(){
String result = reviewBlock.select("div.search-result-date > div").text();
try {
SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
Date d = fmt.parse(result);
fmt.applyPattern("yyyy-MM-dd");
result = fmt.format(d);
} catch (ParseException e) {
System.out.println("Failed parsing the date...");
}
return result;
}
如果我有权限,这将只是一条评论。
我 运行 你的 loadHtml
函数与你提供的 URL 函数,我得到的结果或多或少等于页面的 html。
您能否提供有关 httpclient
库的更多详细信息?我正在使用 Java 12(我很肯定它也可以与 Java 8 一起使用)具有这种依赖性
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.11</version>
</dependency>
为了解决这个问题,我创建了一个会话并使用 cookie 存储,并在每次获取页面后使用 CloseableHttpResponse 关闭响应。这是使其工作的代码片段:
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.build();
httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.setMaxConnPerRoute(100)
.build();
CookieStore cookieStore = new BasicCookieStore();
httpContext = new BasicHttpContext();
httpContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore);
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Whatever");
StringBuilder page = new StringBuilder("");
try {
CloseableHttpResponse response = httpClient.execute(httpget, httpContext);
System.out.println(response.getStatusLine());
Scanner sc = new Scanner(response.getEntity().getContent());
while (sc.hasNext())
page.append(sc.nextLine()).append(" ");
sc.close();
response.close();
} catch (IOException e) {
e.printStackTrace();
}
return page.toString();
我正在尝试使用 Apache HttpClient 对内容页面进行网络爬网。使用分页中的 link 请求下一页时,我得到状态 200,但 HTML 在正文中显示 500,没有内容。即使在分页中使用 links 时,Postman 也能正常工作并获取内容。
主要class
public static void main(String[] args) {
String url = "https://www.cochranelibrary.com/cdsr/reviews/topics";
MyContentFetcher myContentFetcher = new MyContentFetcher();
MyParser myParser = new MyParser();
try {
// Load Topic list page
String topicsPage = myContentFetcher.loadHTML(url);
// Getting all the topics.
Map<Integer, MyNode> topics = myParser.getTopicList(topicsPage);
// Print all the topics and ask user to choose one
for (int id : topics.keySet())
System.out.println("-> " + id + " <- " + topics.get(id).getTopic());
System.out.println("********************");
System.out.print("Enter ID number from the list above to get reviews or enter anything else to exit:\n");
BufferedReader reader = new BufferedReader(new InputStreamReader(System.in));
String id = reader.readLine();
// Validate user input, get the link and topic and cout the choice.
if (isNumber(id)) {
int idNum = Integer.parseInt(id);
if (idNum <= topics.size() && idNum > 0) {
String topic = topics.get(idNum).getTopic();
String link = topics.get(idNum).getLink();
System.out.println("You picked: " + topic + link + "\n***************************");
// Loading first page of reviews
myParser.loadReviews(myContentFetcher.loadHTML(link), topic);
// Getting links to other pages
Queue<String> paginationLinks = myParser.getLinks();
// --------------> WORKS FINE UNTIL HERE <--------------
// Problem starts here....
// Load list of reviews for chosen topic
while(!paginationLinks.isEmpty()) {
String page = myContentFetcher.loadHTML(paginationLinks.remove());
myParser.loadReviews(page, topic);
}
}
}
System.out.println("Exiting...");
} catch (IOException e) {
System.out.println("There was a problem...");
}
!!!!这是获取 HTML 的 class。我可能在这里做错了......
import org.apache.http.client.config.CookieSpecs;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.client.methods.CloseableHttpResponse;
import org.apache.http.client.methods.HttpGet;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import java.io.IOException;
import java.util.Scanner;
public class MyContentFetcher {
public MyContentFetcher() {
}
String loadHTML(String url) throws IOException {
// Create configurations for
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.setCookieSpec(CookieSpecs.STANDARD)
.build();
// Creating a HttpClient object
CloseableHttpClient httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.build();
// Creating a HttpGet object
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Mozilla/5.0 (Linux; Android 8.1.0; Pixel Build/OPM4.171019.021.D1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.109 Mobile Safari/537.36 EdgA/42.0.0.2057");
CloseableHttpResponse httpResponse = httpClient.execute(httpget);
Scanner sc = new Scanner(httpResponse.getEntity().getContent());
StringBuilder page = new StringBuilder("");
while(sc.hasNext())
page.append(sc.nextLine()).append(" ");
httpResponse.close();
httpClient.close();
return page.toString();
}
}
这是解析器。解析器没有任何问题(解析得很好并且根据需要)
import org.jsoup.Jsoup;
import org.jsoup.nodes.Document;
import org.jsoup.nodes.Element;
import java.io.IOException;
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.*;
public class MyParser {
private Map<String, String> topics;
private Document htmlPage;
private Element reviewBlock;
public MyParser(){}
// Loads all topics from the Cochrane Library into a map -> (Topic Name, Link)
public Map<Integer, MyNode> getTopicList(String page) {
Map<Integer, MyNode> topics= new HashMap<Integer, MyNode>();
htmlPage = Jsoup.parse(page);
// Get 'a' element that is inside 'li' with a class name of browse-by-list-item
int i = 1;
MyNode info;
for(Element element : htmlPage.body().select("li.browse-by-list-item > a")) {
info = new MyNode(element.select("button").text(),
element.select("a").attr("href").trim());
topics.put(i, info);
i++;
}
return topics;
}
// Loads Reviews
public void loadReviews(String page, String topic) throws IOException {
htmlPage = Jsoup.parse(page);
// Get all review blocks
System.out.println("**************************\n" + page + "\n**************************\n");
for(Element element : htmlPage.body().select(".search-results-item-body")){
reviewBlock = element;
String review = getLink() + " | " + topic + " | " + getTitle() + " | " + getAuthor() + " | " + getDate();
System.out.println(review);
}
}
Queue<String> getLinks(){
System.out.println("GETTING LINKS");
Queue<String> links = new LinkedList<>();
for(Element element : htmlPage.body().select("li.pagination-page-list-item > a")) {
links.add(element.attr("href"));
}
return links;
}
private String getLink(){
return "https://www.cochranelibrary.com" + reviewBlock.select("a").attr("href");
}
public String getTitle(){
return reviewBlock.selectFirst("a").text();
}
public String getAuthor(){
return reviewBlock.selectFirst("div.search-result-authors").text();
}
public String getDate(){
String result = reviewBlock.select("div.search-result-date > div").text();
try {
SimpleDateFormat fmt = new SimpleDateFormat("dd MMMM yyyy", Locale.US);
Date d = fmt.parse(result);
fmt.applyPattern("yyyy-MM-dd");
result = fmt.format(d);
} catch (ParseException e) {
System.out.println("Failed parsing the date...");
}
return result;
}
如果我有权限,这将只是一条评论。
我 运行 你的 loadHtml
函数与你提供的 URL 函数,我得到的结果或多或少等于页面的 html。
您能否提供有关 httpclient
库的更多详细信息?我正在使用 Java 12(我很肯定它也可以与 Java 8 一起使用)具有这种依赖性
<dependency>
<groupId>org.apache.httpcomponents</groupId>
<artifactId>httpclient</artifactId>
<version>4.5.11</version>
</dependency>
为了解决这个问题,我创建了一个会话并使用 cookie 存储,并在每次获取页面后使用 CloseableHttpResponse 关闭响应。这是使其工作的代码片段:
RequestConfig config = RequestConfig.custom()
.setCircularRedirectsAllowed(true)
.build();
httpClient = HttpClients.custom()
.setDefaultRequestConfig(config)
.setMaxConnPerRoute(100)
.build();
CookieStore cookieStore = new BasicCookieStore();
httpContext = new BasicHttpContext();
httpContext.setAttribute(HttpClientContext.COOKIE_STORE, cookieStore);
HttpGet httpget = new HttpGet(url);
httpget.setHeader("User-Agent", "Whatever");
StringBuilder page = new StringBuilder("");
try {
CloseableHttpResponse response = httpClient.execute(httpget, httpContext);
System.out.println(response.getStatusLine());
Scanner sc = new Scanner(response.getEntity().getContent());
while (sc.hasNext())
page.append(sc.nextLine()).append(" ");
sc.close();
response.close();
} catch (IOException e) {
e.printStackTrace();
}
return page.toString();