Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
O
OpnSense
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
Kulya
OpnSense
Commits
c7371f04
Commit
c7371f04
authored
Jun 09, 2015
by
Ad Schellevis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
(ids) refactor installed rulecache to use sqlite3 and add query options as parameter calls
parent
522aafcd
Changes
3
Hide whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
206 additions
and
65 deletions
+206
-65
__init__.py
src/opnsense/scripts/suricata/__init__.py
+0
-0
getRuleJSON.py
src/opnsense/scripts/suricata/getRuleJSON.py
+22
-65
rulecache.py
src/opnsense/scripts/suricata/rulecache.py
+184
-0
No files found.
src/opnsense/scripts/suricata/__init__.py
0 → 100644
View file @
c7371f04
src/opnsense/scripts/suricata/getRuleJSON.py
View file @
c7371f04
...
...
@@ -29,79 +29,36 @@
--------------------------------------------------------------------------------------
script to fetch all suricata rule information into a single json object with the following contents:
timestamp : (last timestamp of all files)
files : (number of files)
rules : all relevant metadata from the rules including the default enabled or disabled state
total_rows: total rowcount for this selection
parameters: list of parameters used
"""
import
os
import
os.path
import
glob
import
json
import
sys
from
rulecache
import
RuleCache
# suricata rule settings, source directory and cache json file to use
rule_source_dir
=
'/usr/local/etc/suricata/rules/'
rule_cache_json
=
'
%
srules.json'
%
rule_source_dir
# all relevant metadata tags to fetch
all_metadata_tags
=
[
'sid'
,
'msg'
,
'classtype'
,
'rev'
,
'gid'
]
# Because rule parsing isn't very useful when the rule definitions didn't change we create a single json file
# to hold the last results (combined with creation date and number of files).
if
__name__
==
'__main__'
:
# collect file metadata
result_structure
=
{
'timestamp'
:
0
,
'files'
:
0
,
'rules'
:
[]}
all_rule_files
=
[]
last_mtime
=
0
for
filename
in
glob
.
glob
(
'
%
s*.rules'
%
(
rule_source_dir
)):
file_mtime
=
os
.
stat
(
filename
)
.
st_mtime
if
file_mtime
>
last_mtime
:
last_mtime
=
file_mtime
all_rule_files
.
append
(
filename
)
result_structure
[
'files'
]
=
len
(
all_rule_files
)
result_structure
[
'timestamp'
]
=
last_mtime
# return last known info if nothing has changed
if
os
.
path
.
isfile
(
rule_cache_json
):
try
:
prev_rules_data
=
open
(
rule_cache_json
,
'rb'
)
.
read
()
prev_rules
=
json
.
loads
(
prev_rules_data
)
if
'timestamp'
in
prev_rules
and
'files'
in
prev_rules
:
if
prev_rules
[
'timestamp'
]
==
result_structure
[
'timestamp'
]
\
and
prev_rules
[
'files'
]
==
result_structure
[
'files'
]:
print
(
prev_rules_data
)
sys
.
exit
(
0
)
except
ValueError
:
pass
# parse all rule files and create json cache file for all data
for
filename
in
all_rule_files
:
rules
=
[]
data
=
open
(
filename
)
for
rule
in
data
.
read
()
.
split
(
'
\n
'
):
if
rule
.
find
(
'msg:'
)
!=
-
1
:
record
=
{
'enabled'
:
True
,
'source'
:
filename
.
split
(
'/'
)[
-
1
]}
if
rule
.
strip
()[
0
]
==
'#'
:
record
[
'enabled'
]
=
False
rule_metadata
=
rule
[
rule
.
find
(
'msg:'
):
-
1
]
for
field
in
rule_metadata
.
split
(
';'
):
fieldName
=
field
[
0
:
field
.
find
(
':'
)]
.
strip
()
fieldContent
=
field
[
field
.
find
(
':'
)
+
1
:]
.
strip
()
if
fieldName
in
all_metadata_tags
:
if
fieldContent
[
0
]
==
'"'
:
record
[
fieldName
]
=
fieldContent
[
1
:
-
1
]
else
:
record
[
fieldName
]
=
fieldContent
result_structure
[
'rules'
]
.
append
(
record
)
open
(
rule_cache_json
,
'wb'
)
.
write
(
json
.
dumps
(
result_structure
))
# print json data
print
(
result_structure
)
rc
=
RuleCache
()
if
rc
.
isChanged
():
rc
.
create
()
# load parameters, ignore validation here the search method only processes valid input
parameters
=
{
'limit'
:
'0'
,
'offset'
:
'0'
,
'sort_by'
:
''
,
'filter'
:
''
,
'filter_fields'
:
''
}
cmd
=
None
for
arg
in
sys
.
argv
[
1
:]:
if
cmd
is
None
:
cmd
=
arg
[
1
:]
else
:
if
cmd
in
parameters
:
parameters
[
cmd
]
=
arg
.
strip
()
cmd
=
None
# dump output
result
=
rc
.
search
(
**
parameters
)
result
[
'parameters'
]
=
parameters
print
(
json
.
dumps
(
result
))
src/opnsense/scripts/suricata/rulecache.py
0 → 100644
View file @
c7371f04
"""
Copyright (c) 2015 Ad Schellevis
part of OPNsense (https://www.opnsense.org/)
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
--------------------------------------------------------------------------------------
shared module for suricata scripts, handles the installed rules cache for easy access
"""
import
os
import
os.path
import
glob
import
sqlite3
class
RuleCache
(
object
):
"""
"""
def
__init__
(
self
):
# suricata rule settings, source directory and cache json file to use
self
.
rule_source_dir
=
'/usr/local/etc/suricata/rules/'
self
.
cachefile
=
'
%
srules.sqlite'
%
self
.
rule_source_dir
self
.
_rule_fields
=
[
'sid'
,
'msg'
,
'classtype'
,
'rev'
,
'gid'
,
'source'
,
'enabled'
]
def
listLocal
(
self
):
all_rule_files
=
[]
for
filename
in
glob
.
glob
(
'
%
s*.rules'
%
(
self
.
rule_source_dir
)):
all_rule_files
.
append
(
filename
)
return
all_rule_files
def
isChanged
(
self
):
""" check if rules on disk are probably different from rules in cache
:return: boolean
"""
if
os
.
path
.
exists
(
self
.
cachefile
):
last_mtime
=
0
all_rule_files
=
self
.
listLocal
()
for
filename
in
all_rule_files
:
file_mtime
=
os
.
stat
(
filename
)
.
st_mtime
if
file_mtime
>
last_mtime
:
last_mtime
=
file_mtime
try
:
db
=
sqlite3
.
connect
(
self
.
cachefile
)
cur
=
db
.
cursor
()
cur
.
execute
(
'select max(timestamp), max(files) from stats'
)
results
=
cur
.
fetchall
()
if
last_mtime
==
results
[
0
][
0
]
and
len
(
all_rule_files
)
==
results
[
0
][
1
]:
return
False
except
sqlite3
.
DatabaseError
:
# if some reason the cache is unreadble, continue and report changed
pass
return
True
def
create
(
self
):
""" create new cache
:return: None
"""
if
os
.
path
.
exists
(
self
.
cachefile
):
os
.
remove
(
self
.
cachefile
)
db
=
sqlite3
.
connect
(
self
.
cachefile
)
cur
=
db
.
cursor
()
cur
.
execute
(
'create table stats (timestamp number, files number)'
)
cur
.
execute
(
"""create table rules (sid number, msg text, classtype text,
rev integer, gid integer,enabled boolean,source text)"""
)
last_mtime
=
0
all_rule_files
=
self
.
listLocal
()
for
filename
in
all_rule_files
:
file_mtime
=
os
.
stat
(
filename
)
.
st_mtime
if
file_mtime
>
last_mtime
:
last_mtime
=
file_mtime
rules
=
[]
data
=
open
(
filename
)
for
rule
in
data
.
read
()
.
split
(
'
\n
'
):
if
rule
.
find
(
'msg:'
)
!=
-
1
:
record
=
{
'enabled'
:
True
,
'source'
:
filename
.
split
(
'/'
)[
-
1
]}
if
rule
.
strip
()[
0
]
==
'#'
:
record
[
'enabled'
]
=
False
rule_metadata
=
rule
[
rule
.
find
(
'msg:'
):
-
1
]
for
field
in
rule_metadata
.
split
(
';'
):
fieldName
=
field
[
0
:
field
.
find
(
':'
)]
.
strip
()
fieldContent
=
field
[
field
.
find
(
':'
)
+
1
:]
.
strip
()
if
fieldName
in
self
.
_rule_fields
:
if
fieldContent
[
0
]
==
'"'
:
record
[
fieldName
]
=
fieldContent
[
1
:
-
1
]
else
:
record
[
fieldName
]
=
fieldContent
for
rule_field
in
self
.
_rule_fields
:
if
rule_field
not
in
record
:
record
[
rule_field
]
=
None
rules
.
append
(
record
)
cur
.
executemany
(
'insert into rules(
%(fieldnames)
s) '
'values (
%(fieldvalues)
s)'
%
{
'fieldnames'
:(
','
.
join
(
self
.
_rule_fields
)),
'fieldvalues'
:
':'
+
(
',:'
.
join
(
self
.
_rule_fields
))},
rules
)
cur
.
execute
(
'insert into stats (timestamp,files) values (?,?) '
,(
last_mtime
,
len
(
all_rule_files
)))
db
.
commit
()
def
search
(
self
,
limit
,
offset
,
filter
,
filter_fields
,
sort_by
):
""" search installed rules
:param limit: limit number of rows
:param offset: limit offset
:param filter: text to search
:param filter_fields: list of fields to apply filter
:param sort: order by, list of fields and possible asc/desc parameter
:return: dict
"""
result
=
{
'rows'
:[]}
db
=
sqlite3
.
connect
(
self
.
cachefile
)
cur
=
db
.
cursor
()
# construct query including filters
sql
=
'select * from rules '
sql_filters
=
{}
for
field
in
map
(
lambda
x
:
x
.
lower
()
.
strip
(),
filter_fields
.
split
(
','
)):
if
field
in
self
.
_rule_fields
:
if
len
(
sql_filters
)
>
0
:
sql
+=
' or '
else
:
sql
+=
' where '
sql
+=
'cast('
+
field
+
" as text) like '
%
'|| :"
+
field
+
" || '
%
' "
sql_filters
[
field
]
=
filter
# apply sort order (if any)
sql_sort
=
[]
for
sortField
in
sort_by
.
split
(
','
):
if
sortField
.
split
(
' '
)[
0
]
in
self
.
_rule_fields
:
if
sortField
.
split
(
' '
)[
-
1
]
.
lower
()
==
'desc'
:
sql_sort
.
append
(
'
%
s desc'
%
sortField
.
split
()[
0
])
else
:
sql_sort
.
append
(
'
%
s asc'
%
sortField
.
split
()[
0
])
# count total number of rows
cur
.
execute
(
'select count(*) from (
%
s) a'
%
sql
,
sql_filters
)
result
[
'total_rows'
]
=
cur
.
fetchall
()[
0
][
0
]
if
len
(
sql_sort
)
>
0
:
sql
+=
' order by
%
s'
%
(
','
.
join
(
sql_sort
))
if
str
(
limit
)
!=
'0'
and
str
(
limit
)
.
isdigit
():
sql
+=
' limit
%
s'
%
(
limit
)
if
str
(
offset
)
!=
'0'
and
str
(
offset
)
.
isdigit
():
sql
+=
' offset
%
s'
%
(
offset
)
# fetch results
cur
.
execute
(
sql
,
sql_filters
)
while
True
:
row
=
cur
.
fetchone
()
if
row
is
None
:
break
record
=
{}
for
fieldNum
in
range
(
len
(
cur
.
description
)):
record
[
cur
.
description
[
fieldNum
][
0
]]
=
row
[
fieldNum
]
result
[
'rows'
]
.
append
(
record
)
return
result
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment