Search code examples
pythonvpngem5

How can I download foreign web pages (such as gem5 official documents) to the local?


The official documentation of gem5 (http://www.gem5.org/) is always inaccessible in China. Even if I use vpn, I find that I can only visit occasionally, and I have to wait a long time to get in every time, and I often get page inaccessibility errors. So I want to download this document. Then I can access offline. I found a lot of tools on the Internet, but they can only download domestic webpages. Foreign webpages always show no response? Since these tools are not available, I want to use python to get all the web links, and then download them according to these links? I used a piece of code on the Internet, but this code is normal when outputting links to a domestic website, but when using gem5's official documentation link for it, the error of no response appears again, which is going to drive me crazy . I am not sure where to ask these questions? Can someone tell me what should I do?

Not only are the official documents of gem5, there are many foreign web-version documents that I want to download so that I can access them at any time? Is there any way to do it? I uploaded the code I used to github(https://github.com/Yujie-Cui/bin/blob/master/getLink.py)? Can someone help me see how I should modify it?

# __author__ = 'Administrat
# coding=utf-8
from urllib.request import urlopen
from urllib.parse import urlparse
from bs4 import BeautifulSoup
import re
import datetime
import random
import io
import os
import sys
from urllib import request
from urllib.request import urlopen
import urllib

pages = set()
random.seed(datetime.datetime.now())

sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='gb18030')
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}


# 获取页面所有内链的列表
def getInternalLinks(bsObj, includeUrl):
    includeUrl = urlparse(includeUrl).scheme + "://" + urlparse(includeUrl).netloc
    internalLinks = []
    # 找出所有以“/”开头的链接
    for link in bsObj.findAll("a", href=re.compile("^(/|.*" + includeUrl + ")")):
       if link.attrs['href'] is not None:
          if link.attrs['href'] not in internalLinks:
             if (link.attrs['href'].startswith("/")):
                internalLinks.append(includeUrl + link.attrs['href'])
             else:
                internalLinks.append(link.attrs['href'])
    return internalLinks


# 获取页面所有外链的列表
def getExternalLinks(bsObj, excludeUrl):
    externalLinks = []
    # 找出所有以“http”或者“www”开头且不包含当前URL的链接
    for link in bsObj.findAll("a", href=re.compile("^(http|www)((?!" + excludeUrl + ").)*$")):
       if link.attrs['href'] is not None:
          if link.attrs['href'] not in externalLinks:
             externalLinks.append(link.attrs['href'])
    return externalLinks


def getRandomExternalLink(startingPage):
    req = request.Request(startingPage, headers=headers)
    html = urlopen(req)
    bsObj = BeautifulSoup(html.read(), "html.parser")
    externalLinks = getExternalLinks(bsObj, urlparse(startingPage).netloc)
    if len(externalLinks) == 0:
       print("没有外部链接,准备遍历整个网站")
       domain = urlparse(startingPage).scheme + "://" + urlparse(startingPage).netloc
       internalLinks = getInternalLinks(bsObj, domain)
       return getRandomExternalLink(internalLinks[random.randint(0, len(internalLinks) - 1)])
    else:
       return externalLinks[random.randint(0, len(externalLinks) - 1)]


def followExternalOnly(startingSite):
    externalLink = getRandomExternalLink(startingSite)
    print("随机外链是: " + externalLink)
    followExternalOnly(externalLink)


# 收集网站上发现的所有外链列表
allExtLinks = set()
allIntLinks = set()


def getAllExternalLinks(siteUrl):
    # 设置代理IP访问
    # proxy_handler = urllib.request.ProxyHandler({'http': '183.77.250.45:3128'})
    proxy_handler = urllib.request.ProxyHandler({'http': '183.77.250.45:3128'})
    proxy_auth_handler = urllib.request.ProxyBasicAuthHandler()
    # proxy_auth_handler.add_password('realm', '123.123.2123.123', 'user', 'password')
    opener = urllib.request.build_opener(urllib.request.HTTPHandler, proxy_handler)
    urllib.request.install_opener(opener)

    req = request.Request(siteUrl, headers=headers)
    html = urlopen(req)
    bsObj = BeautifulSoup(html.read(), "html.parser")
    domain = urlparse(siteUrl).scheme + "://" + urlparse(siteUrl).netloc
    internalLinks = getInternalLinks(bsObj, domain)
    externalLinks = getExternalLinks(bsObj, domain)

    #收集外链
    for link in externalLinks:
       if link not in allExtLinks:
          allExtLinks.add(link)
          # print(link)
          print("extern url: " + link)
    # 收集内链
    for link in internalLinks:
       if link not in allIntLinks:
          print("intern url: " + link)
          allIntLinks.add(link)
          getAllExternalLinks(link)


# followExternalOnly("http://bbs.3s001.com/forum-36-1.html")
# allIntLinks.add("http://bbs.3s001.com/forum-36-1.html")
getAllExternalLinks("http://www.gem5.org/documentation/learning_gem5/introduction/")

Solution

  • Q: I am not sure where to ask these questions?

    A: Since only chinese have this problem, you'd better ask in chinese technical community, like v2ex. People not in china can hardly understand our problems T T.

    Q: Can someone tell me what should I do?

    A: As a member of opensource community, I'll give you another aspect to solve this question. Now a days most document is written by communities, you can get the source document file directly. I don't use gem5, but I find this 如何下载 gem5-website 到本地.