Commit 24bd9724 authored by Ad Schellevis's avatar Ad Schellevis

(ids) python style fixes (non functional)

parent b870cd79
...@@ -37,9 +37,9 @@ from lib import rule_source_directory ...@@ -37,9 +37,9 @@ from lib import rule_source_directory
if __name__ == '__main__': if __name__ == '__main__':
RuleCache = lib.rulecache.RuleCache() RuleCache = lib.rulecache.RuleCache()
rule_config_fn = ('%s../rules.config'%rule_source_directory) rule_config_fn = ('%s../rules.config' % rule_source_directory)
rule_target_dir = ('%s../opnsense.rules'%rule_source_directory) rule_target_dir = ('%s../opnsense.rules' % rule_source_directory)
rule_yaml_list = ('%s../installed_rules.yaml'%rule_source_directory) rule_yaml_list = ('%s../installed_rules.yaml' % rule_source_directory)
# parse OPNsense rule config # parse OPNsense rule config
rule_updates = {} rule_updates = {}
...@@ -59,9 +59,9 @@ if __name__ == '__main__': ...@@ -59,9 +59,9 @@ if __name__ == '__main__':
# install ruleset # install ruleset
all_installed_files = [] all_installed_files = []
for filename in RuleCache.listLocal(): for filename in RuleCache.list_local():
output_data = [] output_data = []
for rule_info_record in RuleCache.listRules(filename=filename): for rule_info_record in RuleCache.list_rules(filename=filename):
# default behavior, do not touch rule, only copy to output # default behavior, do not touch rule, only copy to output
rule = rule_info_record['rule'] rule = rule_info_record['rule']
# change rule if in rule rule updates # change rule if in rule rule updates
...@@ -75,7 +75,7 @@ if __name__ == '__main__': ...@@ -75,7 +75,7 @@ if __name__ == '__main__':
# generate altered rule # generate altered rule
if 'enabled' in rule_updates[rule_info_record['metadata']['sid']]: if 'enabled' in rule_updates[rule_info_record['metadata']['sid']]:
if (rule_updates[rule_info_record['metadata']['sid']]['enabled']) == '0': if (rule_updates[rule_info_record['metadata']['sid']]['enabled']) == '0':
rule = ('#%s'%rule[i:]) rule = ('#%s' % rule[i:])
else: else:
rule = rule[i:] rule = rule[i:]
...@@ -83,12 +83,12 @@ if __name__ == '__main__': ...@@ -83,12 +83,12 @@ if __name__ == '__main__':
# write data to file # write data to file
all_installed_files.append(filename.split('/')[-1]) all_installed_files.append(filename.split('/')[-1])
open('%s/%s'%(rule_target_dir, filename.split('/')[-1]), 'wb').write('\n'.join(output_data)) open('%s/%s' % (rule_target_dir, filename.split('/')[-1]), 'wb').write('\n'.join(output_data))
# flush all written rule filenames into yaml file # flush all written rule filenames into yaml file
with open(rule_yaml_list,'wb') as f_out: with open(rule_yaml_list, 'wb') as f_out:
f_out.write('%YAML 1.1\n') f_out.write('%YAML 1.1\n')
f_out.write('---\n') f_out.write('---\n')
f_out.write('rule-files:\n') f_out.write('rule-files:\n')
for installed_file in all_installed_files: for installed_file in all_installed_files:
f_out.write(' - %s\n'%installed_file) f_out.write(' - %s\n' % installed_file)
...@@ -25,5 +25,5 @@ ...@@ -25,5 +25,5 @@
""" """
# define paths used by suricata # define paths used by suricata
rule_source_directory='/usr/local/etc/suricata/rules/' rule_source_directory = '/usr/local/etc/suricata/rules/'
suricata_alert_log='/var/log/suricata/eve.json' suricata_alert_log = '/var/log/suricata/eve.json'
...@@ -31,24 +31,25 @@ ...@@ -31,24 +31,25 @@
import syslog import syslog
import requests import requests
class Downloader(object): class Downloader(object):
def __init__(self, target_dir): def __init__(self, target_dir):
self._target_dir = target_dir self._target_dir = target_dir
def download(self, proto, url): def download(self, proto, url):
if proto in ('http','https'): if proto in ('http', 'https'):
frm_url = url.replace('//','/').replace(':/','://') frm_url = url.replace('//', '/').replace(':/', '://')
req = requests.get(url=frm_url) req = requests.get(url=frm_url)
if req.status_code == 200: if req.status_code == 200:
target_filename = ('%s/%s'%(self._target_dir, frm_url.split('/')[-1])).replace('//','/') target_filename = ('%s/%s' % (self._target_dir, frm_url.split('/')[-1])).replace('//', '/')
try: try:
open(target_filename,'wb').write(req.text) open(target_filename, 'wb').write(req.text)
except IOError: except IOError:
syslog.syslog(syslog.LOG_ERR, 'cannot write to %s'%(target_filename)) syslog.syslog(syslog.LOG_ERR, 'cannot write to %s' % target_filename)
return None return None
syslog.syslog(syslog.LOG_INFO, 'download completed for %s'%(frm_url)) syslog.syslog(syslog.LOG_INFO, 'download completed for %s' % frm_url)
else: else:
syslog.syslog(syslog.LOG_ERR, 'download failed for %s'%(frm_url)) syslog.syslog(syslog.LOG_ERR, 'download failed for %s' % frm_url)
@staticmethod @staticmethod
def is_supported(proto): def is_supported(proto):
...@@ -56,7 +57,7 @@ class Downloader(object): ...@@ -56,7 +57,7 @@ class Downloader(object):
:param proto: :param proto:
:return: :return:
""" """
if proto in ['http','https']: if proto in ['http', 'https']:
return True return True
else: else:
return False return False
...@@ -26,14 +26,15 @@ ...@@ -26,14 +26,15 @@
import os import os
def reverse_log_reader(filename, block_size = 8192, start_pos=None):
def reverse_log_reader(filename, block_size=8192, start_pos=None):
""" read log file in reverse order """ read log file in reverse order
:param filename: filename to parse :param filename: filename to parse
:param block_size: max block size to examine per loop :param block_size: max block size to examine per loop
:param start_pos: start at position in file (None is end of file) :param start_pos: start at position in file (None is end of file)
:return: generator :return: generator
""" """
with open(filename,'rU') as f_in: with open(filename, 'rU') as f_in:
if start_pos is None: if start_pos is None:
f_in.seek(0, os.SEEK_END) f_in.seek(0, os.SEEK_END)
file_byte_start = f_in.tell() file_byte_start = f_in.tell()
...@@ -42,7 +43,7 @@ def reverse_log_reader(filename, block_size = 8192, start_pos=None): ...@@ -42,7 +43,7 @@ def reverse_log_reader(filename, block_size = 8192, start_pos=None):
data = '' data = ''
while True: while True:
if file_byte_start-block_size < 0: if file_byte_start - block_size < 0:
block_size = file_byte_start block_size = file_byte_start
file_byte_start = 0 file_byte_start = 0
else: else:
...@@ -59,10 +60,10 @@ def reverse_log_reader(filename, block_size = 8192, start_pos=None): ...@@ -59,10 +60,10 @@ def reverse_log_reader(filename, block_size = 8192, start_pos=None):
data = data[:eol] data = data[:eol]
eol = data.rfind('\n') eol = data.rfind('\n')
# field line and position in file # field line and position in file
yield {'line':line.strip(),'pos':line_end} yield {'line': line.strip(), 'pos': line_end}
if file_byte_start == 0 and eol == -1: if file_byte_start == 0 and eol == -1:
# flush last line # flush last line
yield {'line':data.strip(),'pos':len(data)} yield {'line': data.strip(), 'pos': len(data)}
if file_byte_start == 0: if file_byte_start == 0:
break break
...@@ -33,30 +33,31 @@ import syslog ...@@ -33,30 +33,31 @@ import syslog
import glob import glob
import xml.etree.ElementTree import xml.etree.ElementTree
class Metadata(object): class Metadata(object):
def __init__(self): def __init__(self):
self._rules_dir = '%s/../metadata/rules/'%(os.path.dirname(os.path.abspath(__file__))) self._rules_dir = '%s/../metadata/rules/' % (os.path.dirname(os.path.abspath(__file__)))
def list_rules(self): def list_rules(self):
""" list all available rules """ list all available rules
:return: generator method returning all known rulefiles :return: generator method returning all known rulefiles
""" """
for filename in sorted(glob.glob('%s*.xml'%self._rules_dir)): for filename in sorted(glob.glob('%s*.xml' % self._rules_dir)):
try: try:
ruleXML=xml.etree.ElementTree.fromstring(open(filename).read()) rule_xml = xml.etree.ElementTree.fromstring(open(filename).read())
except xml.etree.ElementTree.ParseError: except xml.etree.ElementTree.ParseError:
# unparseable metadata # unparseable metadata
syslog.syslog(syslog.LOG_ERR,'suricata metadata unparsable @ %s'%filename) syslog.syslog(syslog.LOG_ERR, 'suricata metadata unparsable @ %s' % filename)
continue continue
src_location = ruleXML.find('location') src_location = rule_xml.find('location')
if src_location is None or 'url' not in src_location.attrib: if src_location is None or 'url' not in src_location.attrib:
syslog.syslog(syslog.LOG_ERR,'suricata metadata missing location @ %s'%filename) syslog.syslog(syslog.LOG_ERR, 'suricata metadata missing location @ %s' % filename)
else: else:
if ruleXML.find('files') is None: if rule_xml.find('files') is None:
syslog.syslog(syslog.LOG_ERR,'suricata metadata missing files @ %s'%filename) syslog.syslog(syslog.LOG_ERR, 'suricata metadata missing files @ %s' % filename)
else: else:
for rule_filename in ruleXML.find('files'): for rule_filename in rule_xml.find('files'):
metadata_record = dict() metadata_record = dict()
metadata_record['source'] = src_location.attrib metadata_record['source'] = src_location.attrib
metadata_record['filename'] = rule_filename.text.strip() metadata_record['filename'] = rule_filename.text.strip()
......
...@@ -26,7 +26,8 @@ ...@@ -26,7 +26,8 @@
import sys import sys
def updateParams(parameters):
def update_params(parameters):
""" update predefined parameters with given list from shell (as switches) """ update predefined parameters with given list from shell (as switches)
for example /a valA /b valB for example /a valA /b valB
converts to converts to
...@@ -35,11 +36,11 @@ def updateParams(parameters): ...@@ -35,11 +36,11 @@ def updateParams(parameters):
:param parameters: parameter dictionary :param parameters: parameter dictionary
:return: :return:
""" """
cmd=None cmd = None
for arg in sys.argv[1:]: for arg in sys.argv[1:]:
if cmd is None: if cmd is None:
cmd=arg[1:] cmd = arg[1:]
else: else:
if cmd in parameters and arg.strip() != '': if cmd in parameters and arg.strip() != '':
parameters[cmd] = arg.strip() parameters[cmd] = arg.strip()
cmd=None cmd = None
...@@ -39,50 +39,52 @@ from lib import rule_source_directory ...@@ -39,50 +39,52 @@ from lib import rule_source_directory
class RuleCache(object): class RuleCache(object):
""" """
""" """
def __init__(self): def __init__(self):
# suricata rule settings, source directory and cache json file to use # suricata rule settings, source directory and cache json file to use
self.cachefile = '%srules.sqlite'%rule_source_directory self.cachefile = '%srules.sqlite' % rule_source_directory
self._rule_fields = ['sid','msg','classtype','rev','gid','source','enabled','reference'] self._rule_fields = ['sid', 'msg', 'classtype', 'rev', 'gid', 'source', 'enabled', 'reference']
self._rule_defaults = {'classtype':'##none##'} self._rule_defaults = {'classtype': '##none##'}
def listLocal(self): @staticmethod
all_rule_files=[] def list_local():
for filename in glob.glob('%s*.rules'%(rule_source_directory)): all_rule_files = []
for filename in glob.glob('%s*.rules' % rule_source_directory):
all_rule_files.append(filename) all_rule_files.append(filename)
return all_rule_files return all_rule_files
def listRules(self, filename): def list_rules(self, filename):
""" generator function to list rule file content including metadata """ generator function to list rule file content including metadata
:param filename: :param filename:
:return: :return:
""" """
data = open(filename) data = open(filename)
for rule in data.read().split('\n'): for rule in data.read().split('\n'):
rule_info_record = {'rule':rule, 'metadata':None} rule_info_record = {'rule': rule, 'metadata': None}
if rule.find('msg:') != -1: if rule.find('msg:') != -1:
# define basic record # define basic record
record = {'enabled':True, 'source':filename.split('/')[-1]} record = {'enabled': True, 'source': filename.split('/')[-1]}
if rule.strip()[0] =='#': if rule.strip()[0] == '#':
record['enabled'] = False record['enabled'] = False
rule_metadata = rule[rule.find('msg:'):-1] rule_metadata = rule[rule.find('msg:'):-1]
for field in rule_metadata.split(';'): for field in rule_metadata.split(';'):
fieldName = field[0:field.find(':')].strip() fieldname = field[0:field.find(':')].strip()
fieldContent = field[field.find(':')+1:].strip() fieldcontent = field[field.find(':') + 1:].strip()
if fieldName in self._rule_fields: if fieldname in self._rule_fields:
if fieldContent[0] == '"': if fieldcontent[0] == '"':
content = fieldContent[1:-1] content = fieldcontent[1:-1]
else: else:
content = fieldContent content = fieldcontent
if fieldName in record: if fieldname in record:
# if same field repeats, put items in list # if same field repeats, put items in list
if type(record[fieldName]) != list: if type(record[fieldname]) != list:
record[fieldName] = [record[fieldName]] record[fieldname] = [record[fieldname]]
record[fieldName].append(content) record[fieldname].append(content)
else: else:
record[fieldName] = content record[fieldname] = content
for rule_field in self._rule_fields: for rule_field in self._rule_fields:
if rule_field not in record: if rule_field not in record:
...@@ -100,13 +102,13 @@ class RuleCache(object): ...@@ -100,13 +102,13 @@ class RuleCache(object):
yield rule_info_record yield rule_info_record
def isChanged(self): def is_changed(self):
""" check if rules on disk are probably different from rules in cache """ check if rules on disk are probably different from rules in cache
:return: boolean :return: boolean
""" """
if os.path.exists(self.cachefile): if os.path.exists(self.cachefile):
last_mtime = 0 last_mtime = 0
all_rule_files = self.listLocal() all_rule_files = self.list_local()
for filename in all_rule_files: for filename in all_rule_files:
file_mtime = os.stat(filename).st_mtime file_mtime = os.stat(filename).st_mtime
if file_mtime > last_mtime: if file_mtime > last_mtime:
...@@ -115,7 +117,7 @@ class RuleCache(object): ...@@ -115,7 +117,7 @@ class RuleCache(object):
try: try:
db = sqlite3.connect(self.cachefile) db = sqlite3.connect(self.cachefile)
cur = db.cursor() cur = db.cursor()
cur.execute('select max(timestamp), max(files) from stats') cur.execute('SELECT max(timestamp), max(files) FROM stats')
results = cur.fetchall() results = cur.fetchall()
if last_mtime == results[0][0] and len(all_rule_files) == results[0][1]: if last_mtime == results[0][0] and len(all_rule_files) == results[0][1]:
return False return False
...@@ -133,37 +135,37 @@ class RuleCache(object): ...@@ -133,37 +135,37 @@ class RuleCache(object):
db = sqlite3.connect(self.cachefile) db = sqlite3.connect(self.cachefile)
cur = db.cursor() cur = db.cursor()
cur.execute('create table stats (timestamp number, files number)') cur.execute('CREATE TABLE stats (timestamp number, files number)')
cur.execute("""create table rules (sid number, msg text, classtype text, cur.execute("""CREATE TABLE rules (sid number, msg TEXT, classtype TEXT,
rev integer, gid integer,reference text, rev INTEGER, gid INTEGER,reference TEXT,
enabled boolean,source text)""") enabled BOOLEAN,source TEXT)""")
last_mtime=0 last_mtime = 0
all_rule_files = self.listLocal() all_rule_files = self.list_local()
for filename in all_rule_files: for filename in all_rule_files:
file_mtime = os.stat(filename).st_mtime file_mtime = os.stat(filename).st_mtime
if file_mtime > last_mtime: if file_mtime > last_mtime:
last_mtime = file_mtime last_mtime = file_mtime
rules = [] rules = []
for rule_info_record in self.listRules(filename=filename): for rule_info_record in self.list_rules(filename=filename):
if rule_info_record['metadata'] is not None: if rule_info_record['metadata'] is not None:
rules.append(rule_info_record['metadata']) rules.append(rule_info_record['metadata'])
cur.executemany('insert into rules(%(fieldnames)s) ' cur.executemany('insert into rules(%(fieldnames)s) '
'values (%(fieldvalues)s)'%{'fieldnames':(','.join(self._rule_fields)), 'values (%(fieldvalues)s)' % {'fieldnames': (','.join(self._rule_fields)),
'fieldvalues':':'+(',:'.join(self._rule_fields))}, rules) 'fieldvalues': ':' + (',:'.join(self._rule_fields))}, rules)
cur.execute('insert into stats (timestamp,files) values (?,?) ',(last_mtime,len(all_rule_files))) cur.execute('INSERT INTO stats (timestamp,files) VALUES (?,?) ', (last_mtime, len(all_rule_files)))
db.commit() db.commit()
def search(self, limit, offset, filter, sort_by): def search(self, limit, offset, filter_txt, sort_by):
""" search installed rules """ search installed rules
:param limit: limit number of rows :param limit: limit number of rows
:param offset: limit offset :param offset: limit offset
:param filter: text to search, used format fieldname1,fieldname2/searchphrase include % to match on a part :param filter_txt: text to search, used format fieldname1,fieldname2/searchphrase include % to match on a part
:param sort: order by, list of fields and possible asc/desc parameter :param sort_by: order by, list of fields and possible asc/desc parameter
:return: dict :return: dict
""" """
result = {'rows':[]} result = {'rows': []}
if os.path.exists(self.cachefile): if os.path.exists(self.cachefile):
db = sqlite3.connect(self.cachefile) db = sqlite3.connect(self.cachefile)
cur = db.cursor() cur = db.cursor()
...@@ -172,7 +174,7 @@ class RuleCache(object): ...@@ -172,7 +174,7 @@ class RuleCache(object):
sql = 'select * from rules ' sql = 'select * from rules '
sql_filters = {} sql_filters = {}
for filtertag in shlex.split(filter): for filtertag in shlex.split(filter_txt):
fieldnames = filtertag.split('/')[0] fieldnames = filtertag.split('/')[0]
searchcontent = '/'.join(filtertag.split('/')[1:]) searchcontent = '/'.join(filtertag.split('/')[1:])
if len(sql_filters) > 0: if len(sql_filters) > 0:
...@@ -184,9 +186,9 @@ class RuleCache(object): ...@@ -184,9 +186,9 @@ class RuleCache(object):
if fieldname != fieldnames.split(',')[0].strip(): if fieldname != fieldnames.split(',')[0].strip():
sql += ' or ' sql += ' or '
if searchcontent.find('*') == -1: if searchcontent.find('*') == -1:
sql += 'cast('+fieldname + " as text) like :"+fieldname+" " sql += 'cast(' + fieldname + " as text) like :" + fieldname + " "
else: else:
sql += 'cast('+fieldname + " as text) like '%'|| :"+fieldname+" || '%' " sql += 'cast(' + fieldname + " as text) like '%'|| :" + fieldname + " || '%' "
sql_filters[fieldname] = searchcontent.replace('*', '') sql_filters[fieldname] = searchcontent.replace('*', '')
else: else:
# not a valid fieldname, add a tag to make sure our sql statement is valid # not a valid fieldname, add a tag to make sure our sql statement is valid
...@@ -194,28 +196,28 @@ class RuleCache(object): ...@@ -194,28 +196,28 @@ class RuleCache(object):
sql += ' ) ' sql += ' ) '
# apply sort order (if any) # apply sort order (if any)
sql_sort =[] sql_sort = []
for sortField in sort_by.split(','): for sortField in sort_by.split(','):
if sortField.split(' ')[0] in self._rule_fields: if sortField.split(' ')[0] in self._rule_fields:
if sortField.split(' ')[-1].lower() == 'desc': if sortField.split(' ')[-1].lower() == 'desc':
sql_sort.append('%s desc'%sortField.split()[0]) sql_sort.append('%s desc' % sortField.split()[0])
else: else:
sql_sort.append('%s asc'%sortField.split()[0]) sql_sort.append('%s asc' % sortField.split()[0])
# count total number of rows # count total number of rows
cur.execute('select count(*) from (%s) a'%sql, sql_filters) cur.execute('select count(*) from (%s) a' % sql, sql_filters)
result['total_rows'] = cur.fetchall()[0][0] result['total_rows'] = cur.fetchall()[0][0]
if len(sql_sort) > 0: if len(sql_sort) > 0:
sql += ' order by %s'%(','.join(sql_sort)) sql += ' order by %s' % (','.join(sql_sort))
if str(limit) != '0' and str(limit).isdigit(): if str(limit) != '0' and str(limit).isdigit():
sql += ' limit %s'%(limit) sql += ' limit %s' % limit
if str(offset) != '0' and str(offset).isdigit(): if str(offset) != '0' and str(offset).isdigit():
sql += ' offset %s'%(offset) sql += ' offset %s' % offset
# fetch results # fetch results
cur.execute(sql,sql_filters) cur.execute(sql, sql_filters)
while True: while True:
row = cur.fetchone() row = cur.fetchone()
if row is None: if row is None:
...@@ -228,7 +230,7 @@ class RuleCache(object): ...@@ -228,7 +230,7 @@ class RuleCache(object):
return result return result
def listClassTypes(self): def list_class_types(self):
""" """
:return: list of installed classtypes :return: list of installed classtypes
""" """
...@@ -236,7 +238,7 @@ class RuleCache(object): ...@@ -236,7 +238,7 @@ class RuleCache(object):
if os.path.exists(self.cachefile): if os.path.exists(self.cachefile):
db = sqlite3.connect(self.cachefile) db = sqlite3.connect(self.cachefile)
cur = db.cursor() cur = db.cursor()
cur.execute('select distinct classtype from rules') cur.execute('SELECT DISTINCT classtype FROM rules')
for record in cur.fetchall(): for record in cur.fetchall():
result.append(record[0]) result.append(record[0])
......
...@@ -40,23 +40,23 @@ from lib.log import reverse_log_reader ...@@ -40,23 +40,23 @@ from lib.log import reverse_log_reader
if __name__ == '__main__': if __name__ == '__main__':
result = [] result = []
for filename in sorted(glob.glob('%s*'%suricata_alert_log)): for filename in sorted(glob.glob('%s*' % suricata_alert_log)):
row = dict() row = dict()
row['size'] = os.stat(filename).st_size row['size'] = os.stat(filename).st_size
# always list first file and non empty next. # always list first file and non empty next.
if row['size'] > 0 or filename.split('/')[-1].count('.') == 1: if row['size'] > 0 or filename.split('/')[-1].count('.') == 1:
row['modified'] = os.stat(filename).st_mtime row['modified'] = os.stat(filename).st_mtime
row['filename'] = filename.split('/')[-1] row['filename'] = filename.split('/')[-1]
# try to find actual timestamp from file # try to find actual timestamp from file
for line in reverse_log_reader(filename=filename): for line in reverse_log_reader(filename=filename):
if line['line'] != '': if line['line'] != '':
record = ujson.loads(line['line']) record = ujson.loads(line['line'])
if record.has_key('timestamp'): if 'timestamp' in record:
row['modified'] = int(time.mktime(datetime.datetime.strptime(record['timestamp'].split('.')[0], "%Y-%m-%dT%H:%M:%S").timetuple())) row['modified'] = int(time.mktime(datetime.datetime.strptime(record['timestamp'].split('.')[0],
"%Y-%m-%dT%H:%M:%S").timetuple()))
break break
ext = filename.split('.')[-1]
ext=filename.split('.')[-1]
if ext.isdigit(): if ext.isdigit():
row['sequence'] = int(ext) row['sequence'] = int(ext)
else: else:
......
...@@ -35,10 +35,10 @@ from lib.rulecache import RuleCache ...@@ -35,10 +35,10 @@ from lib.rulecache import RuleCache
if __name__ == '__main__': if __name__ == '__main__':
rc = RuleCache() rc = RuleCache()
if rc.isChanged(): if rc.is_changed():
rc.create() rc.create()
items=rc.listClassTypes() items = rc.list_class_types()
result = {'items': items, 'count':len(items)} result = {'items': items, 'count': len(items)}
print (ujson.dumps(result)) print (ujson.dumps(result))
...@@ -43,10 +43,10 @@ if __name__ == '__main__': ...@@ -43,10 +43,10 @@ if __name__ == '__main__':
items = dict() items = dict()
for rule in md.list_rules(): for rule in md.list_rules():
items[rule['filename']] = rule items[rule['filename']] = rule
rule_filename = ('%s/%s'%(rule_source_directory, rule['filename'])).replace('//', '/') rule_filename = ('%s/%s' % (rule_source_directory, rule['filename'])).replace('//', '/')
if os.path.exists(rule_filename): if os.path.exists(rule_filename):
items[rule['filename']]['modified_local'] = os.stat(rule_filename).st_mtime items[rule['filename']]['modified_local'] = os.stat(rule_filename).st_mtime
else: else:
items[rule['filename']]['modified_local'] = None items[rule['filename']]['modified_local'] = None
result = {'items': items, 'count':len(items)} result = {'items': items, 'count': len(items)}
print (ujson.dumps(result)) print (ujson.dumps(result))
...@@ -36,17 +36,17 @@ import sre_constants ...@@ -36,17 +36,17 @@ import sre_constants
import shlex import shlex
import ujson import ujson
from lib.log import reverse_log_reader from lib.log import reverse_log_reader
from lib.params import updateParams from lib.params import update_params
from lib import suricata_alert_log from lib import suricata_alert_log
if __name__ == '__main__': if __name__ == '__main__':
# handle parameters # handle parameters
parameters = {'limit':'0','offset':'0', 'filter':'','fileid':''} parameters = {'limit': '0', 'offset': '0', 'filter': '', 'fileid': ''}
updateParams(parameters) update_params(parameters)
# choose logfile by number # choose logfile by number
if parameters['fileid'].isdigit(): if parameters['fileid'].isdigit():
suricata_log = '%s.%d'%(suricata_alert_log,int(parameters['fileid'])) suricata_log = '%s.%d' % (suricata_alert_log, int(parameters['fileid']))
else: else:
suricata_log = suricata_alert_log suricata_log = suricata_alert_log
...@@ -60,13 +60,12 @@ if __name__ == '__main__': ...@@ -60,13 +60,12 @@ if __name__ == '__main__':
else: else:
offset = 0 offset = 0
data_filters = {} data_filters = {}
data_filters_comp = {} data_filters_comp = {}
for filter in shlex.split(parameters['filter']): for filter_txt in shlex.split(parameters['filter']):
filterField = filter.split('/')[0] filterField = filter_txt.split('/')[0]
if filter.find('/') > -1: if filter_txt.find('/') > -1:
data_filters[filterField] = '/'.join(filter.split('/')[1:]) data_filters[filterField] = '/'.join(filter_txt.split('/')[1:])
filter_regexp = data_filters[filterField] filter_regexp = data_filters[filterField]
filter_regexp = filter_regexp.replace('*', '.*') filter_regexp = filter_regexp.replace('*', '.*')
filter_regexp = filter_regexp.lower() filter_regexp = filter_regexp.lower()
...@@ -74,7 +73,7 @@ if __name__ == '__main__': ...@@ -74,7 +73,7 @@ if __name__ == '__main__':
data_filters_comp[filterField] = re.compile(filter_regexp) data_filters_comp[filterField] = re.compile(filter_regexp)
except sre_constants.error: except sre_constants.error:
# remove illegal expression # remove illegal expression
#del data_filters[filterField] # del data_filters[filterField]
data_filters_comp[filterField] = re.compile('.*') data_filters_comp[filterField] = re.compile('.*')
# filter one specific log line # filter one specific log line
...@@ -84,7 +83,7 @@ if __name__ == '__main__': ...@@ -84,7 +83,7 @@ if __name__ == '__main__':
log_start_pos = None log_start_pos = None
# query suricata eve log # query suricata eve log
result = {'filters':data_filters,'rows':[],'total_rows':0,'origin':suricata_log.split('/')[-1]} result = {'filters': data_filters, 'rows': [], 'total_rows': 0, 'origin': suricata_log.split('/')[-1]}
if os.path.exists(suricata_log): if os.path.exists(suricata_log):
for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos): for line in reverse_log_reader(filename=suricata_log, start_pos=log_start_pos):
try: try:
...@@ -107,7 +106,8 @@ if __name__ == '__main__': ...@@ -107,7 +106,8 @@ if __name__ == '__main__':
for filterKeys in data_filters: for filterKeys in data_filters:
filter_hit = False filter_hit = False
for filterKey in filterKeys.split(','): for filterKey in filterKeys.split(','):
if record.has_key(filterKey) and data_filters_comp[filterKeys].match(('%s'%record[filterKey]).lower()): if filterKey in record and data_filters_comp[filterKeys].match(
('%s' % record[filterKey]).lower()):
filter_hit = True filter_hit = True
if not filter_hit: if not filter_hit:
...@@ -121,7 +121,7 @@ if __name__ == '__main__': ...@@ -121,7 +121,7 @@ if __name__ == '__main__':
break break
# only try to fetch one line when filepos is given # only try to fetch one line when filepos is given
if log_start_pos != None: if log_start_pos is not None:
break break
# output results # output results
......
...@@ -35,21 +35,24 @@ ...@@ -35,21 +35,24 @@
import ujson import ujson
from lib.rulecache import RuleCache from lib.rulecache import RuleCache
from lib.params import updateParams from lib.params import update_params
# Because rule parsing isn't very useful when the rule definitions didn't change we create a single json file # Because rule parsing isn't very useful when the rule definitions didn't change we create a single json file
# to hold the last results (combined with creation date and number of files). # to hold the last results (combined with creation date and number of files).
if __name__ == '__main__': if __name__ == '__main__':
rc = RuleCache() rc = RuleCache()
if rc.isChanged(): if rc.is_changed():
rc.create() rc.create()
# load parameters, ignore validation here the search method only processes valid input # load parameters, ignore validation here the search method only processes valid input
parameters = {'limit':'0','offset':'0','sort_by':'', 'filter':''} parameters = {'limit': '0', 'offset': '0', 'sort_by': '', 'filter': ''}
updateParams(parameters) update_params(parameters)
# rename, filter tag to filter_txt
parameters['filter_txt'] = parameters['filter']
del parameters['filter']
# dump output # dump output
result=rc.search(**parameters) result = rc.search(**parameters)
result['parameters'] = parameters result['parameters'] = parameters
print (ujson.dumps(result)) print (ujson.dumps(result))
...@@ -40,7 +40,7 @@ from lib import rule_source_directory ...@@ -40,7 +40,7 @@ from lib import rule_source_directory
# check for a running update process, this may take a while so it's better to check... # check for a running update process, this may take a while so it's better to check...
try: try:
lck = open('/tmp/suricata-rule-updater.py','w+') lck = open('/tmp/suricata-rule-updater.py', 'w+')
fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB) fcntl.flock(lck, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError: except IOError:
# already running, exit status 99 # already running, exit status 99
...@@ -48,13 +48,13 @@ except IOError: ...@@ -48,13 +48,13 @@ except IOError:
if __name__ == '__main__': if __name__ == '__main__':
# load list of configured rules from generated config # load list of configured rules from generated config
enabled_rulefiles=[] enabled_rulefiles = []
updater_conf='/usr/local/etc/suricata/rule-updater.config' updater_conf = '/usr/local/etc/suricata/rule-updater.config'
if os.path.exists(updater_conf): if os.path.exists(updater_conf):
cnf = ConfigParser() cnf = ConfigParser()
cnf.read(updater_conf) cnf.read(updater_conf)
for section in cnf.sections(): for section in cnf.sections():
if cnf.has_option(section,'enabled') and cnf.getint(section,'enabled') == 1: if cnf.has_option(section, 'enabled') and cnf.getint(section, 'enabled') == 1:
enabled_rulefiles.append(section.strip()) enabled_rulefiles.append(section.strip())
# download / remove rules # download / remove rules
...@@ -62,14 +62,14 @@ if __name__ == '__main__': ...@@ -62,14 +62,14 @@ if __name__ == '__main__':
dl = downloader.Downloader(target_dir=rule_source_directory) dl = downloader.Downloader(target_dir=rule_source_directory)
for rule in md.list_rules(): for rule in md.list_rules():
if 'url' in rule['source']: if 'url' in rule['source']:
download_proto=str(rule['source']['url']).split(':')[0].lower() download_proto = str(rule['source']['url']).split(':')[0].lower()
if dl.is_supported(download_proto): if dl.is_supported(download_proto):
if rule['filename'] not in enabled_rulefiles: if rule['filename'] not in enabled_rulefiles:
try: try:
# remove configurable but unselected file # remove configurable but unselected file
os.remove(('%s/%s'%(rule_source_directory, rule['filename'])).replace('//', '/')) os.remove(('%s/%s' % (rule_source_directory, rule['filename'])).replace('//', '/'))
except: except OSError:
pass pass
else: else:
url = ('%s/%s'%(rule['source']['url'],rule['filename'])) url = ('%s/%s' % (rule['source']['url'], rule['filename']))
dl.download(proto=download_proto, url=url) dl.download(proto=download_proto, url=url)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment