-
Notifications
You must be signed in to change notification settings - Fork 2
Expand file tree
/
Copy pathutil.py
More file actions
204 lines (157 loc) · 5.49 KB
/
util.py
File metadata and controls
204 lines (157 loc) · 5.49 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
import logging
import re
import time
import unicodedata
import requests
from requests.adapters import HTTPAdapter
from requests.packages.urllib3.util.retry import Retry
import sqlalchemy
from sqlalchemy import exc, sql
class NoDoiException(Exception):
pass
def clean_html(raw_html):
cleanr = re.compile("<.*?>")
cleantext = re.sub(cleanr, "", raw_html)
return cleantext
def safe_commit(db):
try:
db.session.commit()
return True
except (KeyboardInterrupt, SystemExit):
# let these ones through, don't save anything to db
raise
except sqlalchemy.exc.DataError:
db.session.rollback()
print("sqlalchemy.exc.DataError on commit. rolling back.")
except Exception:
db.session.rollback()
print("generic exception in commit. rolling back.")
logging.exception("commit error")
return False
def is_doi_url(url):
# test urls at https://regex101.com/r/yX5cK0/2
p = re.compile(r"https?:\/\/(?:dx.)?doi.org\/(.*)")
matches = re.findall(p, url.lower())
if len(matches) > 0:
return True
return False
def clean_doi(dirty_doi):
if not dirty_doi:
raise NoDoiException("There's no DOI at all.")
dirty_doi = remove_nonprinting_characters(dirty_doi)
dirty_doi = dirty_doi.strip()
dirty_doi = dirty_doi.lower()
# test cases for this regex are at https://regex101.com/r/zS4hA0/1
p = re.compile(r".*?(10.+)")
matches = re.findall(p, dirty_doi)
if len(matches) == 0:
raise NoDoiException("There's no valid DOI.")
match = matches[0]
try:
resp = str(match, "utf-8") # unicode is valid in dois
except (TypeError, UnicodeDecodeError):
resp = match
# remove any url fragments
if "#" in resp:
resp = resp.split("#")[0]
return resp
def elapsed(since, round_places=2):
return round(time.time() - since, round_places)
# from http://farmdev.com/talks/unicode/
def to_unicode_or_bust(obj, encoding="utf-8"):
if isinstance(obj, str):
if not isinstance(obj, str):
obj = str(obj, encoding)
return obj
def remove_nonprinting_characters(input, encoding="utf-8"):
input_was_unicode = True
if isinstance(input, str):
if not isinstance(input, str):
input_was_unicode = False
unicode_input = to_unicode_or_bust(input)
# see http://www.fileformat.info/info/unicode/category/index.htm
char_classes_to_remove = ["C", "M", "Z"]
response = "".join(
c
for c in unicode_input
if unicodedata.category(c)[0] not in char_classes_to_remove
)
if not input_was_unicode:
response = response.encode(encoding)
return response
# getting a "decoding Unicode is not supported" error in this function?
# might need to reinstall libaries as per
# http://stackoverflow.com/questions/17092849/flask-login-typeerror-decoding-unicode-is-not-supported
class HTTPMethodOverrideMiddleware(object):
allowed_methods = frozenset(
["GET", "HEAD", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
)
bodyless_methods = frozenset(["GET", "HEAD", "OPTIONS", "DELETE"])
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
method = environ.get("HTTP_X_HTTP_METHOD_OVERRIDE", "").upper()
if method in self.allowed_methods:
method = method.encode("ascii", "replace")
environ["REQUEST_METHOD"] = method
if method in self.bodyless_methods:
environ["CONTENT_LENGTH"] = "0"
return self.app(environ, start_response)
def run_sql(db, q):
q = q.strip()
if not q:
return
print("running {}".format(q))
start = time.time()
try:
con = db.engine.connect()
trans = con.begin()
con.execute(q)
trans.commit()
except exc.ProgrammingError as e:
print("error {} in run_sql, continuting".format(e))
finally:
con.close()
print("{} done in {} seconds".format(q, elapsed(start, 1)))
def get_sql_answer(db, q):
row = db.engine.execute(sql.text(q)).first()
return row[0]
def get_sql_answers(db, q):
rows = db.engine.execute(sql.text(q)).fetchall()
if not rows:
return []
return [row[0] for row in rows]
def get_multiple_authors(authors):
parsed_authors = [author["name"] for author in authors]
return ", ".join(set(parsed_authors))
def validate_author_url(author_url):
if author_url and author_url.startswith("twitter://"):
screen_name = re.findall("screen_name=([A-Za-z0-9_]{1,15}$)", author_url)[0]
return "http://www.twitter.com/{}".format(screen_name)
else:
return author_url
def validate_subject_url(author_url, subject_url):
if subject_url.startswith("twitter://"):
screen_name = re.findall(r"twitter.com\/([A-Za-z0-9_]{1,15}$)", author_url)[0]
status_id = re.findall(r"status\?id=(\d+$)", subject_url)[0]
return "http://twitter.com/{}/statuses/{}".format(screen_name, status_id)
else:
return subject_url
def requests_retry_session(
retries=3,
backoff_factor=0.1,
status_forcelist=(500, 502, 504),
session=None,
):
session = session or requests.Session()
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status_forcelist=status_forcelist,
)
adapter = HTTPAdapter(max_retries=retry)
session.mount("http://", adapter)
session.mount("https://", adapter)
return session