利用者:Bcxfubot/ジオシティーズ/prog
表示
< 利用者:Bcxfubot | ジオシティーズ
# [orig] replace.py
# URL張替
# [http://www.geocities.jp/fukadasoft/bridges/koumon/index.html 大場川]
# ↓
# {{Wayback|url=http://www.geocities.jp/fukadasoft/bridges/koumon/index.html |title=大場川}}
import re
import time
import pywikibot
import urllib.parse
import requests
target = "http://www.geocities.jp/"
#max = 10
#max = 10
max = 60
sleepsec = 60
######################################################
# 処理モード
#procmode = 0
procmode = 1
######################################################
def get_date(origurl):
encoded_url = urllib.parse.quote(origurl, safe="")
print("encoded_url = "+ encoded_url)
spark_url = "https://web.archive.org/__wb/sparkline?url=" + encoded_url + "&collection=web&output=json"
print("spark_url = "+ spark_url)
try:
response = requests.get(spark_url,timeout=10.0)
except Timeout:
print("ERROR: timeout")
raise
return ""
except Exception as e:
print("ERROR: Exception")
print(e)
raise
return ""
data = response.json()
print(data)
lastdate = data["last_ts"]
print(lastdate)
return lastdate
# ジオシティーズ終了前ならtrueを返す
# date=20131104214536
def is_mae( date ):
pre = int(date[0:8])
geoend = 20190331
if pre <= geoend:
return True
return False;
def make_newline( origline ):
newline = ""
result = re.search( "^(.*)\[(http:\/\/www\.geocities\.jp\/[^ ]+) ([^\]]*)\](.*)$" , origline )
if result:
pre = result.group(1)
origurl = result.group(2)
origtext = result.group(3)
post = result.group(4)
print("origurl = " + origurl)
print("origtext = " + origtext)
date = get_date( origurl )
if date == "":
return ""
# ジオシティーズ終了後にキャッシュされた場合は、ページが終了ページになってしまっているので、
# date指定せずに*にリンクするようにする 2020.2.10
# 20190331にジオシティーズが終了したので、これより後か前かを見る
print("date = " + date)
if is_mae( date ):
newline = pre + "{{Wayback|url=" + origurl + " |title=" + origtext + " |date=" + date + "}}" + post
else:
newline = pre + "{{Wayback|url=" + origurl + " |title=" + origtext + "}}" + post
print("newline = " + newline)
return newline
def replace_page(pagetitle):
site = pywikibot.Site()
page = pywikibot.Page(site, pagetitle)
#text = page.text
#print(text)
linelist = page.text.split('\n')
#print(linelist)
comment = ""
gaibu = 0
modflag = 0
outtext = ""
for line in linelist:
#print(gaibu,line)
if re.search("==[ ]*外部リンク",line):
gaibu = 1
if gaibu == 1:
if target in line:
#newline = re.sub( "\[(http:\/\/www\.geocities\.jp\/[^ ]+) (.*)\]", "{{Wayback|url=\\1 |title=\\2}}", origline)
newline = make_newline( line )
if newline != "":
if line != newline:
line = newline
comment = newline
print(gaibu,line)
modflag = 1
outtext += line + "\n"
if modflag == 1:
page.text = outtext
if procmode == 1:
page.save("外部リンクの修正 " + comment + " ([[Wikipedia:Bot|Bot]]による編集)")
# 処理対象のページ名をひとつ返す
# 処理対象がない場合は""を返す
def get_pagetitle():
path = "list"
with open(path) as f:
for s_line in f:
s_line = s_line.rstrip("\n")
#print(s_line)
#if not re.search(",sumi", s_line):
if not s_line.endswith(",sumi"):
return s_line
return ""
# 処理した行にsumiをつける
def done_pagetitle(pagetitle):
path = "list"
alltext = ""
with open(path) as f:
for s_line in f:
s_line = s_line.rstrip("\n")
#print(s_line + "\n")
#if re.search(pagetitle, s_line):
if pagetitle == s_line:
s_line = s_line + ",sumi"
alltext += s_line + "\n"
with open(path, mode='w') as f:
f.write(alltext)
return ""
def sub():
num = 0
for i in range(max):
num = num + 1
pagetitle = get_pagetitle()
print("[" + str(num) + "/" + str(max) + "]" + ":" + "pagetitle=" + pagetitle)
if pagetitle == "":
break
replace_page(pagetitle)
done_pagetitle(pagetitle)
if ( i < (max - 1) ):
print("sleep(" + str(sleepsec) + ")")
time.sleep(sleepsec)
def main():
sub()
print("done.")
if __name__ == '__main__':
main()