-
Notifications
You must be signed in to change notification settings - Fork 0
/
app.py
86 lines (72 loc) · 2.3 KB
/
app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from sys import argv
import os
import os.path
import bottle
from bottle import route, static_file
import requests
from lxml import etree
from newspaper import Article
import urllib2
import shutil
import hashlib
import StringIO
import re
from pdfminer.pdfpage import PDFPage
from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter
from pdfminer.converter import TextConverter
from pdfminer.layout import LAParams
@route('/')
def root():
return static_file("app.html", root="./static/")
@route('/import')
def import_extract():
return static_file("app.html", root="./static/")
@route('/page/<name>')
def page(name):
return static_file("app.html", root="./static/")
@route('/assets/<filepath:path>')
def asset(filepath):
return static_file(filepath, root="./static/")
@route('/scrape-pdf/<url:path>')
def scrape_pdf(url):
m = hashlib.md5()
m.update(url)
f = "./tmp/{fname}.pdf".format(fname=m.hexdigest())
if not os.path.isfile(f):
req = urllib2.urlopen(url)
with open(f, 'wb') as fp:
shutil.copyfileobj(req, fp)
with open(f, 'rb') as fp:
outfp = StringIO.StringIO()
rsrcmgr = PDFResourceManager()
interpreter = PDFPageInterpreter(rsrcmgr, TextConverter(rsrcmgr, outfp, codec="utf-8", laparams=LAParams()))
for page in PDFPage.get_pages(fp):
interpreter.process_page(page)
out = outfp.getvalue()
outfp.close()
return { 'url': url,
'content': out }
@route('/scrape-rss/<url:path>')
def scrape_rss(url):
r = requests.get(url)
rss = etree.fromstring(r.content)
items = [i for i in rss[0] if i.tag == 'item']
return { i: { attr.tag: attr.text for attr in item } for i, item in enumerate(items) }
@route('/scrape/<url:path>')
def scrape(url):
if re.search(r'\.pdf$', url) != None:
return scrape_pdf(url)
r = requests.head(url)
if re.search(r'\bxml\b', r.headers['content-type']) != None:
return scrape_rss(url)
a = Article(url, keep_article_html=True)
a.download()
a.parse()
return { 'url': url,
'title': a.title,
'image': a.top_image,
'content': a.text,
'content_html': a.article_html }
bottle.run(host='0.0.0.0', port=argv[1])