123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260 |
- # import htmlentitydefs
- import locale
- import dateutil.parser
- import cStringIO
- import csv
- import os
- import re
-
- from codecs import getincrementalencoder
- from HTMLParser import HTMLParser
- from random import choice
-
- from searx.version import VERSION_STRING
- from searx import settings
- from searx import logger
-
-
- logger = logger.getChild('utils')
-
- ua_versions = ('33.0',
- '34.0',
- '35.0',
- '36.0',
- '37.0')
-
- ua_os = ('Windows NT 6.3; WOW64',
- 'X11; Linux x86_64',
- 'X11; Linux x86')
-
- ua = "Mozilla/5.0 ({os}; rv:{version}) Gecko/20100101 Firefox/{version}"
-
- blocked_tags = ('script',
- 'style')
-
-
- def gen_useragent():
- # TODO
- return ua.format(os=choice(ua_os), version=choice(ua_versions))
-
-
- def searx_useragent():
- return 'searx/{searx_version} {suffix}'.format(
- searx_version=VERSION_STRING,
- suffix=settings['outgoing'].get('useragent_suffix', ''))
-
-
- def highlight_content(content, query):
-
- if not content:
- return None
- # ignoring html contents
- # TODO better html content detection
- if content.find('<') != -1:
- return content
-
- query = query.decode('utf-8')
- if content.lower().find(query.lower()) > -1:
- query_regex = u'({0})'.format(re.escape(query))
- content = re.sub(query_regex, '<span class="highlight">\\1</span>',
- content, flags=re.I | re.U)
- else:
- regex_parts = []
- for chunk in query.split():
- if len(chunk) == 1:
- regex_parts.append(u'\W+{0}\W+'.format(re.escape(chunk)))
- else:
- regex_parts.append(u'{0}'.format(re.escape(chunk)))
- query_regex = u'({0})'.format('|'.join(regex_parts))
- content = re.sub(query_regex, '<span class="highlight">\\1</span>',
- content, flags=re.I | re.U)
-
- return content
-
-
- class HTMLTextExtractor(HTMLParser):
- def __init__(self):
- HTMLParser.__init__(self)
- self.result = []
- self.tags = []
-
- def handle_starttag(self, tag, attrs):
- self.tags.append(tag)
-
- def handle_endtag(self, tag):
- if not self.tags:
- return
-
- if tag != self.tags[-1]:
- raise Exception("invalid html")
-
- self.tags.pop()
-
- def is_valid_tag(self):
- return not self.tags or self.tags[-1] not in blocked_tags
-
- def handle_data(self, d):
- if not self.is_valid_tag():
- return
- self.result.append(d)
-
- def handle_charref(self, number):
- if not self.is_valid_tag():
- return
- if number[0] in (u'x', u'X'):
- codepoint = int(number[1:], 16)
- else:
- codepoint = int(number)
- self.result.append(unichr(codepoint))
-
- def handle_entityref(self, name):
- if not self.is_valid_tag():
- return
- # codepoint = htmlentitydefs.name2codepoint[name]
- # self.result.append(unichr(codepoint))
- self.result.append(name)
-
- def get_text(self):
- return u''.join(self.result).strip()
-
-
- def html_to_text(html):
- html = html.replace('\n', ' ')
- html = ' '.join(html.split())
- s = HTMLTextExtractor()
- s.feed(html)
- return s.get_text()
-
-
- class UnicodeWriter:
- """
- A CSV writer which will write rows to CSV file "f",
- which is encoded in the given encoding.
- """
-
- def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
- # Redirect output to a queue
- self.queue = cStringIO.StringIO()
- self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
- self.stream = f
- self.encoder = getincrementalencoder(encoding)()
-
- def writerow(self, row):
- unicode_row = []
- for col in row:
- if type(col) == str or type(col) == unicode:
- unicode_row.append(col.encode('utf-8').strip())
- else:
- unicode_row.append(col)
- self.writer.writerow(unicode_row)
- # Fetch UTF-8 output from the queue ...
- data = self.queue.getvalue()
- data = data.decode("utf-8")
- # ... and reencode it into the target encoding
- data = self.encoder.encode(data)
- # write to the target stream
- self.stream.write(data)
- # empty queue
- self.queue.truncate(0)
-
- def writerows(self, rows):
- for row in rows:
- self.writerow(row)
-
-
- def get_themes(root):
- """Returns available themes list."""
-
- static_path = os.path.join(root, 'static')
- templates_path = os.path.join(root, 'templates')
-
- themes = os.listdir(os.path.join(static_path, 'themes'))
- return static_path, templates_path, themes
-
-
- def get_static_files(base_path):
- base_path = os.path.join(base_path, 'static')
- static_files = set()
- base_path_length = len(base_path) + 1
- for directory, _, files in os.walk(base_path):
- for filename in files:
- f = os.path.join(directory[base_path_length:], filename)
- static_files.add(f)
- return static_files
-
-
- def get_result_templates(base_path):
- base_path = os.path.join(base_path, 'templates')
- result_templates = set()
- base_path_length = len(base_path) + 1
- for directory, _, files in os.walk(base_path):
- if directory.endswith('result_templates'):
- for filename in files:
- f = os.path.join(directory[base_path_length:], filename)
- result_templates.add(f)
- return result_templates
-
-
- def format_date_by_locale(date_string, locale_string):
- # strftime works only on dates after 1900
- parsed_date = dateutil.parser.parse(date_string)
- if parsed_date.year <= 1900:
- return parsed_date.isoformat().split('T')[0]
-
- orig_locale = locale.getlocale()[0]
- try:
- locale.setlocale(locale.LC_ALL, locale_string)
- except:
- logger.warning('cannot set locale: {0}'.format(locale_string))
- formatted_date = parsed_date.strftime(locale.nl_langinfo(locale.D_FMT))
- try:
- locale.setlocale(locale.LC_ALL, orig_locale)
- except:
- logger.warning('cannot set original locale: {0}'.format(orig_locale))
- return formatted_date
-
-
- def dict_subset(d, properties):
- result = {}
- for k in properties:
- if k in d:
- result[k] = d[k]
- return result
-
-
- def prettify_url(url):
- if len(url) > 74:
- return u'{0}[...]{1}'.format(url[:35], url[-35:])
- else:
- return url
-
-
- # get element in list or default value
- def list_get(a_list, index, default=None):
- if len(a_list) > index:
- return a_list[index]
- else:
- return default
-
-
- def get_blocked_engines(engines, cookies):
- if 'blocked_engines' not in cookies:
- return [(engine_name, category) for engine_name in engines
- for category in engines[engine_name].categories if engines[engine_name].disabled]
-
- blocked_engine_strings = cookies.get('blocked_engines', '').split(',')
- blocked_engines = []
-
- if not blocked_engine_strings:
- return blocked_engines
-
- for engine_string in blocked_engine_strings:
- if engine_string.find('__') > -1:
- engine, category = engine_string.split('__', 1)
- if engine in engines and category in engines[engine].categories:
- blocked_engines.append((engine, category))
- elif engine_string in engines:
- for category in engines[engine_string].categories:
- blocked_engines.append((engine_string, category))
-
- return blocked_engines
|