File tree Expand file tree Collapse file tree
Expand file tree Collapse file tree Original file line number Diff line number Diff line change 1818# noinspection PyUnresolvedReferences
1919from urllib .error import URLError , HTTPError
2020
21+ # security
22+ parser = etree .XMLParser (resolve_entities = False , no_network = True )
23+
2124ua_cloudbot = 'Cloudbot/DEV http://github.com/CloudDev/CloudBot'
2225
2326ua_firefox = 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:17.0) Gecko/17.0' \
@@ -52,7 +55,7 @@ def get_soup(*args, **kwargs):
5255
5356def get_xml (* args , ** kwargs ):
5457 kwargs ["decode" ] = False # we don't want to decode, for etree
55- return etree .fromstring (get (* args , ** kwargs ))
58+ return etree .fromstring (get (* args , ** kwargs ), parser = parser )
5659
5760
5861def get_json (* args , ** kwargs ):
Original file line number Diff line number Diff line change 44from cloudbot import hook
55from cloudbot .util import formatting
66
7+ # security
8+ parser = etree .XMLParser (resolve_entities = False , no_network = True )
79
810API_URL = "http://steamcommunity.com/id/{}/"
911ID_BASE = 76561197960265728
@@ -64,7 +66,7 @@ def get_data(user):
6466 except (requests .exceptions .HTTPError , requests .exceptions .ConnectionError ) as e :
6567 raise SteamError ("Could not get user info: {}" .format (e ))
6668
67- profile = etree .fromstring (request .content )
69+ profile = etree .fromstring (request .content , parser = parser )
6870
6971 try :
7072 data ["name" ] = profile .find ('steamID' ).text
Original file line number Diff line number Diff line change 55
66from cloudbot import hook
77
8+ # security
9+ parser = etree .XMLParser (resolve_entities = False , no_network = True )
10+
811base_url = "http://thetvdb.com/api/"
912
1013
@@ -20,7 +23,7 @@ def get_episodes_for_series(series_name, api_key):
2023 res ["error" ] = "error contacting thetvdb.com"
2124 return res
2225
23- query = etree .fromstring (request .content )
26+ query = etree .fromstring (request .content , parser = parser )
2427 series_id = query .xpath ('//seriesid/text()' )
2528
2629 if not series_id :
@@ -36,7 +39,7 @@ def get_episodes_for_series(series_name, api_key):
3639 res ["error" ] = "error contacting thetvdb.com"
3740 return res
3841
39- series = etree .fromstring (_request .content )
42+ series = etree .fromstring (_request .content , parser = parser )
4043 series_name = series .xpath ('//SeriesName/text()' )[0 ]
4144
4245 if series .xpath ('//Status/text()' )[0 ] == 'Ended' :
Original file line number Diff line number Diff line change 88from cloudbot import hook
99from cloudbot .util import formatting
1010
11+ # security
12+ parser = etree .XMLParser (resolve_entities = False , no_network = True )
13+
1114api_prefix = "http://en.wikipedia.org/w/api.php"
1215search_url = api_prefix + "?action=opensearch&format=xml"
1316random_url = api_prefix + "?action=query&format=xml&list=random&rnlimit=1&rnnamespace=0"
@@ -24,7 +27,7 @@ def wiki(text):
2427 request .raise_for_status ()
2528 except (requests .exceptions .HTTPError , requests .exceptions .ConnectionError ) as e :
2629 return "Could not get Wikipedia page: {}" .format (e )
27- x = etree .fromstring (request .text )
30+ x = etree .fromstring (request .text , parser = parser )
2831
2932 ns = '{http://opensearch.org/searchsuggest2}'
3033 items = x .findall (ns + 'Section/' + ns + 'Item' )
Original file line number Diff line number Diff line change 77from cloudbot import hook
88from cloudbot .util import web , formatting
99
10+ # security
11+ parser = etree .XMLParser (resolve_entities = False , no_network = True )
1012
1113api_url = 'http://api.wolframalpha.com/v2/query'
1214query_url = 'http://www.wolframalpha.com/input/?i={}'
@@ -28,7 +30,7 @@ def wolframalpha(text, bot):
2830 if request .status_code != requests .codes .ok :
2931 return "Error getting query: {}" .format (request .status_code )
3032
31- result = etree .fromstring (request .content )
33+ result = etree .fromstring (request .content , parser = parser )
3234
3335 # get the URL for a user to view this query in a browser
3436 short_url = web .try_shorten (query_url .format (urllib .parse .quote_plus (text )))
You can’t perform that action at this time.
0 commit comments