Compare commits
82 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
740781af56 | ||
|
|
36c9c229b8 | ||
|
|
183fdcbdef | ||
|
|
a2a697900a | ||
|
|
6fef4db8a0 | ||
|
|
e879ff1e9e | ||
|
|
9bfe0627ae | ||
|
|
0179f4299a | ||
|
|
56017e57a0 | ||
|
|
cda91e0906 | ||
|
|
5d47adb5c9 | ||
|
|
54e73c2f54 | ||
|
|
2d075079f1 | ||
|
|
2a8ee4b22b | ||
|
|
1ec31d7be9 | ||
|
|
02286b0c59 | ||
|
|
1d0c5dea9f | ||
|
|
1c4a12c4b7 | ||
|
|
3f2ac45d71 | ||
|
|
518f4dc039 | ||
|
|
2cdeef4ffe | ||
|
|
03579126fd | ||
|
|
e3c27e1817 | ||
|
|
aeaf308679 | ||
|
|
f5e47bea40 | ||
|
|
50cf13a7f2 | ||
|
|
abd8041772 | ||
|
|
847c6438e7 | ||
|
|
ef8309df27 | ||
|
|
0dff6cf983 | ||
|
|
4c04acbd9e | ||
|
|
1c4f231572 | ||
|
|
51b8e169d2 | ||
|
|
b4611ae9b7 | ||
|
|
cd6722017b | ||
|
|
290edffccf | ||
|
|
64a6222bf9 | ||
|
|
adb686b7c9 | ||
|
|
d4af341b0f | ||
|
|
fea7e93c8d | ||
|
|
8b6b8d0f2e | ||
|
|
4dcbd865cc | ||
|
|
39b19444fe | ||
|
|
644d5a5462 | ||
|
|
8e18451e3f | ||
|
|
3dbdd01f97 | ||
|
|
a89079c005 | ||
|
|
a8c0926b4f | ||
|
|
dd2959a31b | ||
|
|
51099f42c3 | ||
|
|
63f170cc7a | ||
|
|
3c1489e588 | ||
|
|
e4f1e03f62 | ||
|
|
83d48ec990 | ||
|
|
b20d2b2684 | ||
|
|
2b918c70ae | ||
|
|
1100c133ba | ||
|
|
88899f0e89 | ||
|
|
59dc0059bc | ||
|
|
986fb304c0 | ||
|
|
d6435d2885 | ||
|
|
affb456499 | ||
|
|
705ed0a0ac | ||
|
|
dfffe5b508 | ||
|
|
fca102edba | ||
|
|
554b6345a2 | ||
|
|
aa954dc84c | ||
|
|
b5506a1368 | ||
|
|
0b55f94828 | ||
|
|
a67052f48c | ||
|
|
6eff6a9329 | ||
|
|
69d32d4511 | ||
|
|
d7a613b710 | ||
|
|
669c019287 | ||
|
|
fcc4901a10 | ||
|
|
4359503484 | ||
|
|
b13f93a2d3 | ||
|
|
8405e0fad6 | ||
|
|
aceb3f1826 | ||
|
|
a206675f3e | ||
|
|
f4253d74ae | ||
|
|
aaea15e516 |
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
67
.github/workflows/codeql-analysis.yml
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
# For most projects, this workflow file will not need changing; you simply need
|
||||
# to commit it to your repository.
|
||||
#
|
||||
# You may wish to alter this file to override the set of languages analyzed,
|
||||
# or to provide custom queries or build logic.
|
||||
#
|
||||
# ******** NOTE ********
|
||||
# We have attempted to detect the languages in your repository. Please check
|
||||
# the `language` matrix defined below to confirm you have the correct set of
|
||||
# supported CodeQL languages.
|
||||
#
|
||||
name: "CodeQL"
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master ]
|
||||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ master ]
|
||||
schedule:
|
||||
- cron: '32 20 * * 0'
|
||||
|
||||
jobs:
|
||||
analyze:
|
||||
name: Analyze
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
language: [ 'go' ]
|
||||
# CodeQL supports [ 'cpp', 'csharp', 'go', 'java', 'javascript', 'python' ]
|
||||
# Learn more:
|
||||
# https://docs.github.com/en/free-pro-team@latest/github/finding-security-vulnerabilities-and-errors-in-your-code/configuring-code-scanning#changing-the-languages-that-are-analyzed
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
# By default, queries listed here will override any specified in a config file.
|
||||
# Prefix the list here with "+" to use these queries and those in the config file.
|
||||
# queries: ./path/to/local/query, your-org/your-repo/queries@main
|
||||
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
||||
# ✏️ If the Autobuild fails above, remove it and uncomment the following three lines
|
||||
# and modify them (or add more) to build your code if your project
|
||||
# uses a compiled language
|
||||
|
||||
#- run: |
|
||||
# make bootstrap
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
2
.github/workflows/goreleaser.yml
vendored
2
.github/workflows/goreleaser.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15
|
||||
go-version: 1.16
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
|
||||
2
.github/workflows/test.yml
vendored
2
.github/workflows/test.yml
vendored
@@ -11,7 +11,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.15.x
|
||||
go-version: 1.16.x
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
2
.github/workflows/tidy.yml
vendored
2
.github/workflows/tidy.yml
vendored
@@ -19,4 +19,4 @@ jobs:
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
git_user_name: kotakanbe
|
||||
git_user_email: kotakanbe@gmail.com
|
||||
go_version: 1.15.6
|
||||
go_version: 1.16.x
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -1,6 +1,5 @@
|
||||
.vscode
|
||||
*.txt
|
||||
*.json
|
||||
*.sqlite3*
|
||||
*.db
|
||||
tags
|
||||
@@ -10,7 +9,7 @@ issues/
|
||||
vendor/
|
||||
log/
|
||||
results/
|
||||
*config.toml
|
||||
config.toml
|
||||
!setup/docker/*
|
||||
.DS_Store
|
||||
dist/
|
||||
|
||||
@@ -74,7 +74,6 @@ archives:
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -85,7 +84,6 @@ archives:
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -96,7 +94,6 @@ archives:
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
|
||||
@@ -107,7 +104,6 @@ archives:
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
- NOTICE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
snapshot:
|
||||
|
||||
117
GNUmakefile
117
GNUmakefile
@@ -80,3 +80,120 @@ build-trivy-to-vuls: pretest fmt
|
||||
# future-vuls
|
||||
build-future-vuls: pretest fmt
|
||||
$(GO) build -o future-vuls contrib/future-vuls/cmd/*.go
|
||||
|
||||
|
||||
# integration-test
|
||||
BASE_DIR := '${PWD}/integration/results'
|
||||
NOW=$(shell date --iso-8601=seconds)
|
||||
NOW_JSON_DIR := '${BASE_DIR}/$(NOW)'
|
||||
ONE_SEC_AFTER=$(shell date -d '+1 second' --iso-8601=seconds)
|
||||
ONE_SEC_AFTER_JSON_DIR := '${BASE_DIR}/$(ONE_SEC_AFTER)'
|
||||
|
||||
diff:
|
||||
# git clone git@github.com:vulsio/vulsctl.git
|
||||
# cd vulsctl/docker
|
||||
# ./update-all.sh
|
||||
# cd /path/to/vuls
|
||||
# vim integration/int-config.toml
|
||||
# ln -s vuls vuls.new
|
||||
# ln -s oldvuls vuls.old
|
||||
# make int
|
||||
# (ex. test 10 times: for i in `seq 10`; do make int ARGS=-quiet ; done)
|
||||
ifneq ($(shell ls -U1 ${BASE_DIR} | wc -l), 0)
|
||||
mv ${BASE_DIR}/* /tmp
|
||||
endif
|
||||
mkdir -p ${NOW_JSON_DIR}
|
||||
cp integration/data/*.json ${NOW_JSON_DIR}
|
||||
./vuls.old report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml $(ARGS)
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp integration/data/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml $(ARGS)
|
||||
find ${NOW_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
find ${ONE_SEC_AFTER_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
diff -c ${NOW_JSON_DIR} ${ONE_SEC_AFTER_JSON_DIR}
|
||||
echo "old: ${NOW_JSON_DIR} , new: ${ONE_SEC_AFTER_JSON_DIR}"
|
||||
|
||||
diff-redis:
|
||||
# docker network create redis-nw
|
||||
# docker run --name redis -d --network redis-nw -p 127.0.0.1:6379:6379 redis
|
||||
# git clone git@github.com:vulsio/vulsctl.git
|
||||
# cd vulsctl/docker
|
||||
# ./update-all-redis.sh
|
||||
# (or export DOCKER_NETWORK=redis-nw; cd /home/ubuntu/vulsctl/docker; ./update-all.sh --dbtype redis --dbpath "redis://redis/0")
|
||||
# vim integration/int-redis-config.toml
|
||||
# ln -s vuls vuls.new
|
||||
# ln -s oldvuls vuls.old
|
||||
# make int-redis
|
||||
ifneq ($(shell ls -U1 ${BASE_DIR} | wc -l), 0)
|
||||
mv ${BASE_DIR}/* /tmp
|
||||
endif
|
||||
mkdir -p ${NOW_JSON_DIR}
|
||||
cp integration/data/*.json ${NOW_JSON_DIR}
|
||||
./vuls.old report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp integration/data/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml
|
||||
find ${NOW_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
find ${ONE_SEC_AFTER_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
diff -c ${NOW_JSON_DIR} ${ONE_SEC_AFTER_JSON_DIR}
|
||||
echo "old: ${NOW_JSON_DIR} , new: ${ONE_SEC_AFTER_JSON_DIR}"
|
||||
|
||||
diff-rdb-redis:
|
||||
ifneq ($(shell ls -U1 ${BASE_DIR} | wc -l), 0)
|
||||
mv ${BASE_DIR}/* /tmp
|
||||
endif
|
||||
mkdir -p ${NOW_JSON_DIR}
|
||||
cp integration/data/*.json ${NOW_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp integration/data/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml
|
||||
# remove reportedAt line
|
||||
find ${NOW_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
find ${ONE_SEC_AFTER_JSON_DIR} -type f -exec sed -i -e '/reportedAt/d' {} \;
|
||||
# remove "Type": line
|
||||
find ${NOW_JSON_DIR} -type f -exec sed -i -e '/"Type":/d' {} \;
|
||||
find ${ONE_SEC_AFTER_JSON_DIR} -type f -exec sed -i -e '/"Type":/d' {} \;
|
||||
# remove "SQLite3Path": line
|
||||
find ${NOW_JSON_DIR} -type f -exec sed -i -e '/"SQLite3Path":/d' {} \;
|
||||
find ${ONE_SEC_AFTER_JSON_DIR} -type f -exec sed -i -e '/"SQLite3Path":/d' {} \;
|
||||
diff -c ${NOW_JSON_DIR} ${ONE_SEC_AFTER_JSON_DIR}
|
||||
echo "old: ${NOW_JSON_DIR} , new: ${ONE_SEC_AFTER_JSON_DIR}"
|
||||
for jsonfile in ${NOW_JSON_DIR}/*.json ; do \
|
||||
echo $$jsonfile; cat $$jsonfile | jq ".scannedCves | length" ; \
|
||||
done
|
||||
for jsonfile in ${ONE_SEC_AFTER_JSON_DIR}/*.json ; do \
|
||||
echo $$jsonfile; cat $$jsonfile | jq ".scannedCves | length" ; \
|
||||
done
|
||||
|
||||
|
||||
head= $(shell git rev-parse HEAD)
|
||||
prev= $(shell git rev-parse HEAD^)
|
||||
branch=$(shell git rev-parse --abbrev-ref HEAD)
|
||||
build-integration:
|
||||
git stash
|
||||
|
||||
# buld HEAD
|
||||
git checkout ${head}
|
||||
make build
|
||||
mv -f ./vuls ./vuls.${head}
|
||||
|
||||
# HEAD^
|
||||
git checkout ${prev}
|
||||
make build
|
||||
mv -f ./vuls ./vuls.${prev}
|
||||
|
||||
git checkout ${branch}
|
||||
git stash apply stash@\{0\}
|
||||
|
||||
# working tree
|
||||
make build
|
||||
|
||||
# for integration testing, vuls.new and vuls.old needed.
|
||||
# ex)
|
||||
# $ ln -s ./vuls ./vuls.new
|
||||
# $ ln -s ./vuls.${head} ./vuls.old
|
||||
# or
|
||||
# $ ln -s ./vuls.${prev} ./vuls.old
|
||||
# $ make int
|
||||
# $ make int-redis
|
||||
@@ -92,7 +92,7 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
- [RustSec Advisory Database](https://github.com/RustSec/advisory-db)
|
||||
|
||||
- WordPress
|
||||
- [WPVulnDB](https://wpvulndb.com/api)
|
||||
- [wpscan](https://wpscan.com/api)
|
||||
|
||||
### Scan mode
|
||||
|
||||
@@ -177,6 +177,10 @@ For more information such as Installation, Tutorial, Usage, visit [vuls.io](http
|
||||
|
||||
kotakanbe ([@kotakanbe](https://twitter.com/kotakanbe)) created vuls and [these fine people](https://github.com/future-architect/vuls/graphs/contributors) have contributed.
|
||||
|
||||
## Contribute
|
||||
|
||||
see [vulsdoc](https://vuls.io/docs/en/how-to-contribute.html)
|
||||
|
||||
----
|
||||
|
||||
## Stargazers over time
|
||||
|
||||
12
cache/bolt.go
vendored
12
cache/bolt.go
vendored
@@ -5,8 +5,8 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
@@ -14,12 +14,12 @@ import (
|
||||
// boltdb is used to store a cache of Changelogs of Ubuntu/Debian
|
||||
type Bolt struct {
|
||||
Path string
|
||||
Log *logrus.Entry
|
||||
Log logging.Logger
|
||||
db *bolt.DB
|
||||
}
|
||||
|
||||
// SetupBolt opens a boltdb and creates a meta bucket if not exists.
|
||||
func SetupBolt(path string, l *logrus.Entry) error {
|
||||
func SetupBolt(path string, l logging.Logger) error {
|
||||
l.Infof("Open boltDB: %s", path)
|
||||
db, err := bolt.Open(path, 0600, nil)
|
||||
if err != nil {
|
||||
@@ -47,7 +47,7 @@ func (b Bolt) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a buket that is specified by arg.
|
||||
// CreateBucketIfNotExists creates a bucket that is specified by arg.
|
||||
func (b *Bolt) createBucketIfNotExists(name string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(name))
|
||||
@@ -93,7 +93,7 @@ func (b Bolt) RefreshMeta(meta Meta) error {
|
||||
})
|
||||
}
|
||||
|
||||
// EnsureBuckets puts a Meta information and create a buket that holds changelogs.
|
||||
// EnsureBuckets puts a Meta information and create a bucket that holds changelogs.
|
||||
func (b Bolt) EnsureBuckets(meta Meta) error {
|
||||
jsonBytes, err := json.Marshal(meta)
|
||||
if err != nil {
|
||||
@@ -159,7 +159,7 @@ func (b Bolt) GetChangelog(servername, packName string) (changelog string, err e
|
||||
return
|
||||
}
|
||||
|
||||
// PutChangelog put the changelgo of specified packName into the Bucket
|
||||
// PutChangelog put the changelog of specified packName into the Bucket
|
||||
func (b Bolt) PutChangelog(servername, packName, changelog string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket([]byte(servername))
|
||||
|
||||
8
cache/bolt_test.go
vendored
8
cache/bolt_test.go
vendored
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const path = "/tmp/vuls-test-cache-11111111.db"
|
||||
@@ -29,7 +29,7 @@ var meta = Meta{
|
||||
}
|
||||
|
||||
func TestSetupBolt(t *testing.T) {
|
||||
log := logrus.NewEntry(&logrus.Logger{})
|
||||
log := logging.NewNormalLogger()
|
||||
err := SetupBolt(path, log)
|
||||
if err != nil {
|
||||
t.Errorf("Failed to setup bolt: %s", err)
|
||||
@@ -57,7 +57,7 @@ func TestSetupBolt(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureBuckets(t *testing.T) {
|
||||
log := logrus.NewEntry(&logrus.Logger{})
|
||||
log := logging.NewNormalLogger()
|
||||
if err := SetupBolt(path, log); err != nil {
|
||||
t.Errorf("Failed to setup bolt: %s", err)
|
||||
}
|
||||
@@ -98,7 +98,7 @@ func TestEnsureBuckets(t *testing.T) {
|
||||
|
||||
func TestPutGetChangelog(t *testing.T) {
|
||||
clog := "changelog-text"
|
||||
log := logrus.NewEntry(&logrus.Logger{})
|
||||
log := logging.NewNormalLogger()
|
||||
if err := SetupBolt(path, log); err != nil {
|
||||
t.Errorf("Failed to setup bolt: %s", err)
|
||||
}
|
||||
|
||||
@@ -29,7 +29,7 @@ func main() {
|
||||
flag.Parse()
|
||||
|
||||
if *v {
|
||||
fmt.Printf("vuls %s %s\n", config.Version, config.Revision)
|
||||
fmt.Printf("vuls-%s-%s\n", config.Version, config.Revision)
|
||||
os.Exit(int(subcommands.ExitSuccess))
|
||||
}
|
||||
|
||||
|
||||
30
config/awsconf.go
Normal file
30
config/awsconf.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package config
|
||||
|
||||
// AWSConf is aws config
|
||||
type AWSConf struct {
|
||||
// AWS profile to use
|
||||
Profile string `json:"profile"`
|
||||
|
||||
// AWS region to use
|
||||
Region string `json:"region"`
|
||||
|
||||
// S3 bucket name
|
||||
S3Bucket string `json:"s3Bucket"`
|
||||
|
||||
// /bucket/path/to/results
|
||||
S3ResultsDir string `json:"s3ResultsDir"`
|
||||
|
||||
// The Server-side encryption algorithm used when storing the reports in S3 (e.g., AES256, aws:kms).
|
||||
S3ServerSideEncryption string `json:"s3ServerSideEncryption"`
|
||||
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Validate configuration
|
||||
func (c *AWSConf) Validate() (errs []error) {
|
||||
// TODO
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
46
config/azureconf.go
Normal file
46
config/azureconf.go
Normal file
@@ -0,0 +1,46 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// AzureConf is azure config
|
||||
type AzureConf struct {
|
||||
// Azure account name to use. AZURE_STORAGE_ACCOUNT environment variable is used if not specified
|
||||
AccountName string `json:"accountName"`
|
||||
|
||||
// Azure account key to use. AZURE_STORAGE_ACCESS_KEY environment variable is used if not specified
|
||||
AccountKey string `json:"-"`
|
||||
|
||||
// Azure storage container name
|
||||
ContainerName string `json:"containerName"`
|
||||
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
const (
|
||||
azureAccount = "AZURE_STORAGE_ACCOUNT"
|
||||
azureKey = "AZURE_STORAGE_ACCESS_KEY"
|
||||
)
|
||||
|
||||
// Validate configuration
|
||||
func (c *AzureConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
// overwrite if env var is not empty
|
||||
if os.Getenv(azureAccount) != "" {
|
||||
c.AccountName = os.Getenv(azureAccount)
|
||||
}
|
||||
if os.Getenv(azureKey) != "" {
|
||||
c.AccountKey = os.Getenv(azureKey)
|
||||
}
|
||||
|
||||
if c.ContainerName == "" {
|
||||
errs = append(errs, xerrors.Errorf("Azure storage container name is required"))
|
||||
}
|
||||
return
|
||||
}
|
||||
33
config/chatworkconf.go
Normal file
33
config/chatworkconf.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// ChatWorkConf is ChatWork config
|
||||
type ChatWorkConf struct {
|
||||
APIToken string `json:"-"`
|
||||
Room string `json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *ChatWorkConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
if len(c.Room) == 0 {
|
||||
errs = append(errs, xerrors.New("chatWorkConf.room must not be empty"))
|
||||
}
|
||||
|
||||
if len(c.APIToken) == 0 {
|
||||
errs = append(errs, xerrors.New("chatWorkConf.ApiToken must not be empty"))
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
1070
config/config.go
1070
config/config.go
File diff suppressed because it is too large
Load Diff
@@ -2,6 +2,8 @@ package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
. "github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
func TestSyslogConfValidate(t *testing.T) {
|
||||
@@ -55,7 +57,7 @@ func TestSyslogConfValidate(t *testing.T) {
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
Conf.ToSyslog = true
|
||||
tt.conf.Enabled = true
|
||||
errs := tt.conf.Validate()
|
||||
if len(errs) != tt.expectedErrLength {
|
||||
t.Errorf("test: %d, expected %d, actual %d", i, tt.expectedErrLength, len(errs))
|
||||
|
||||
32
config/httpconf.go
Normal file
32
config/httpconf.go
Normal file
@@ -0,0 +1,32 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
)
|
||||
|
||||
// HTTPConf is HTTP config
|
||||
type HTTPConf struct {
|
||||
URL string `valid:"url" json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
const httpKey = "VULS_HTTP_URL"
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *HTTPConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
|
||||
// overwrite if env var is not empty
|
||||
if os.Getenv(httpKey) != "" {
|
||||
c.URL = os.Getenv(httpKey)
|
||||
}
|
||||
|
||||
if _, err := govalidator.ValidateStruct(c); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
package config
|
||||
|
||||
// IPS is
|
||||
type IPS string
|
||||
|
||||
const (
|
||||
// DeepSecurity is
|
||||
DeepSecurity IPS = "deepsecurity"
|
||||
)
|
||||
198
config/os.go
Normal file
198
config/os.go
Normal file
@@ -0,0 +1,198 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
// EOL has End-of-Life information
|
||||
type EOL struct {
|
||||
StandardSupportUntil time.Time
|
||||
ExtendedSupportUntil time.Time
|
||||
Ended bool
|
||||
}
|
||||
|
||||
// IsStandardSupportEnded checks now is under standard support
|
||||
func (e EOL) IsStandardSupportEnded(now time.Time) bool {
|
||||
return e.Ended ||
|
||||
!e.ExtendedSupportUntil.IsZero() && e.StandardSupportUntil.IsZero() ||
|
||||
!e.StandardSupportUntil.IsZero() && now.After(e.StandardSupportUntil)
|
||||
}
|
||||
|
||||
// IsExtendedSuppportEnded checks now is under extended support
|
||||
func (e EOL) IsExtendedSuppportEnded(now time.Time) bool {
|
||||
if e.Ended {
|
||||
return true
|
||||
}
|
||||
if e.StandardSupportUntil.IsZero() && e.ExtendedSupportUntil.IsZero() {
|
||||
return false
|
||||
}
|
||||
return !e.ExtendedSupportUntil.IsZero() && now.After(e.ExtendedSupportUntil) ||
|
||||
e.ExtendedSupportUntil.IsZero() && now.After(e.StandardSupportUntil)
|
||||
}
|
||||
|
||||
// GetEOL return EOL information for the OS-release passed by args
|
||||
// https://github.com/aquasecurity/trivy/blob/master/pkg/detector/ospkg/redhat/redhat.go#L20
|
||||
func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
switch family {
|
||||
case constant.Amazon:
|
||||
rel := "2"
|
||||
if isAmazonLinux1(release) {
|
||||
rel = "1"
|
||||
}
|
||||
eol, found = map[string]EOL{
|
||||
"1": {StandardSupportUntil: time.Date(2023, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2": {},
|
||||
}[rel]
|
||||
case constant.RedHat:
|
||||
// https://access.redhat.com/support/policy/updates/errata
|
||||
eol, found = map[string]EOL{
|
||||
"3": {Ended: true},
|
||||
"4": {Ended: true},
|
||||
"5": {Ended: true},
|
||||
"6": {
|
||||
StandardSupportUntil: time.Date(2020, 11, 30, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"7": {
|
||||
StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"8": {
|
||||
StandardSupportUntil: time.Date(2029, 5, 31, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[major(release)]
|
||||
case constant.CentOS:
|
||||
// https://en.wikipedia.org/wiki/CentOS#End-of-support_schedule
|
||||
// TODO Stream
|
||||
eol, found = map[string]EOL{
|
||||
"3": {Ended: true},
|
||||
"4": {Ended: true},
|
||||
"5": {Ended: true},
|
||||
"6": {Ended: true},
|
||||
"7": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"8": {StandardSupportUntil: time.Date(2021, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Oracle:
|
||||
eol, found = map[string]EOL{
|
||||
// Source:
|
||||
// https://www.oracle.com/a/ocom/docs/elsp-lifetime-069338.pdf
|
||||
// https://community.oracle.com/docs/DOC-917964
|
||||
"3": {Ended: true},
|
||||
"4": {Ended: true},
|
||||
"5": {Ended: true},
|
||||
"6": {
|
||||
StandardSupportUntil: time.Date(2021, 3, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 3, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"7": {
|
||||
StandardSupportUntil: time.Date(2024, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"8": {
|
||||
StandardSupportUntil: time.Date(2029, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[major(release)]
|
||||
case constant.Debian:
|
||||
eol, found = map[string]EOL{
|
||||
// https://wiki.debian.org/LTS
|
||||
"6": {Ended: true},
|
||||
"7": {Ended: true},
|
||||
"8": {Ended: true},
|
||||
"9": {StandardSupportUntil: time.Date(2022, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"10": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Raspbian:
|
||||
// Not found
|
||||
eol, found = map[string]EOL{}[major(release)]
|
||||
case constant.Ubuntu:
|
||||
// https://wiki.ubuntu.com/Releases
|
||||
eol, found = map[string]EOL{
|
||||
"14.10": {Ended: true},
|
||||
"14.04": {
|
||||
ExtendedSupportUntil: time.Date(2022, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"15.04": {Ended: true},
|
||||
"16.10": {Ended: true},
|
||||
"17.04": {Ended: true},
|
||||
"17.10": {Ended: true},
|
||||
"16.04": {
|
||||
StandardSupportUntil: time.Date(2021, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"18.04": {
|
||||
StandardSupportUntil: time.Date(2023, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2028, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"18.10": {Ended: true},
|
||||
"19.04": {Ended: true},
|
||||
"19.10": {Ended: true},
|
||||
"20.04": {
|
||||
StandardSupportUntil: time.Date(2025, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"21.04": {
|
||||
StandardSupportUntil: time.Date(2022, 1, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"21.10": {
|
||||
StandardSupportUntil: time.Date(2022, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[release]
|
||||
case constant.SUSEEnterpriseServer:
|
||||
//TODO
|
||||
case constant.Alpine:
|
||||
// https://github.com/aquasecurity/trivy/blob/master/pkg/detector/ospkg/alpine/alpine.go#L19
|
||||
// https://wiki.alpinelinux.org/wiki/Alpine_Linux:Releases
|
||||
eol, found = map[string]EOL{
|
||||
"2.0": {Ended: true},
|
||||
"2.1": {Ended: true},
|
||||
"2.2": {Ended: true},
|
||||
"2.3": {Ended: true},
|
||||
"2.4": {Ended: true},
|
||||
"2.5": {Ended: true},
|
||||
"2.6": {Ended: true},
|
||||
"2.7": {Ended: true},
|
||||
"3.0": {Ended: true},
|
||||
"3.1": {Ended: true},
|
||||
"3.2": {Ended: true},
|
||||
"3.3": {Ended: true},
|
||||
"3.4": {Ended: true},
|
||||
"3.5": {Ended: true},
|
||||
"3.6": {Ended: true},
|
||||
"3.7": {Ended: true},
|
||||
"3.8": {Ended: true},
|
||||
"3.9": {Ended: true},
|
||||
"3.10": {StandardSupportUntil: time.Date(2021, 5, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.11": {StandardSupportUntil: time.Date(2021, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.12": {StandardSupportUntil: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.13": {StandardSupportUntil: time.Date(2022, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
}[majorDotMinor(release)]
|
||||
case constant.FreeBSD:
|
||||
// https://www.freebsd.org/security/
|
||||
eol, found = map[string]EOL{
|
||||
"7": {Ended: true},
|
||||
"8": {Ended: true},
|
||||
"9": {Ended: true},
|
||||
"10": {Ended: true},
|
||||
"11": {StandardSupportUntil: time.Date(2021, 9, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func major(osVer string) (majorVersion string) {
|
||||
return strings.Split(osVer, ".")[0]
|
||||
}
|
||||
|
||||
func majorDotMinor(osVer string) (majorDotMinor string) {
|
||||
ss := strings.SplitN(osVer, ".", 3)
|
||||
if len(ss) == 1 {
|
||||
return osVer
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", ss[0], ss[1])
|
||||
}
|
||||
|
||||
func isAmazonLinux1(osRelease string) bool {
|
||||
return len(strings.Fields(osRelease)) == 1
|
||||
}
|
||||
375
config/os_test.go
Normal file
375
config/os_test.go
Normal file
@@ -0,0 +1,375 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
. "github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
type fields struct {
|
||||
family string
|
||||
release string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
now time.Time
|
||||
found bool
|
||||
stdEnded bool
|
||||
extEnded bool
|
||||
}{
|
||||
// Amazon Linux
|
||||
{
|
||||
name: "amazon linux 1 supported",
|
||||
fields: fields{family: Amazon, release: "2018.03"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "amazon linux 1 eol on 2023-6-30",
|
||||
fields: fields{family: Amazon, release: "2018.03"},
|
||||
now: time.Date(2023, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "amazon linux 2 supported",
|
||||
fields: fields{family: Amazon, release: "2 (Karoo)"},
|
||||
now: time.Date(2023, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
//RHEL
|
||||
{
|
||||
name: "RHEL7 supported",
|
||||
fields: fields{family: RedHat, release: "7"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL8 supported",
|
||||
fields: fields{family: RedHat, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL6 eol",
|
||||
fields: fields{family: RedHat, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL9 not found",
|
||||
fields: fields{family: RedHat, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//CentOS
|
||||
{
|
||||
name: "CentOS 7 supported",
|
||||
fields: fields{family: CentOS, release: "7"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 8 supported",
|
||||
fields: fields{family: CentOS, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 6 eol",
|
||||
fields: fields{family: CentOS, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 9 not found",
|
||||
fields: fields{family: CentOS, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//Oracle
|
||||
{
|
||||
name: "Oracle Linux 7 supported",
|
||||
fields: fields{family: Oracle, release: "7"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 8 supported",
|
||||
fields: fields{family: Oracle, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 6 eol",
|
||||
fields: fields{family: Oracle, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 9 not found",
|
||||
fields: fields{family: Oracle, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//Ubuntu
|
||||
{
|
||||
name: "Ubuntu 18.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 18.04 ext supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2025, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 16.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 14.04 eol",
|
||||
fields: fields{family: Ubuntu, release: "14.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 14.10 eol",
|
||||
fields: fields{family: Ubuntu, release: "14.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 12.10 not found",
|
||||
fields: fields{family: Ubuntu, release: "12.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: false,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 21.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "21.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
//Debian
|
||||
{
|
||||
name: "Debian 9 supported",
|
||||
fields: fields{family: Debian, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 10 supported",
|
||||
fields: fields{family: Debian, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 8 supported",
|
||||
fields: fields{family: Debian, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 11 supported",
|
||||
fields: fields{family: Debian, release: "11"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//alpine
|
||||
{
|
||||
name: "alpine 3.10 supported",
|
||||
fields: fields{family: Alpine, release: "3.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.11 supported",
|
||||
fields: fields{family: Alpine, release: "3.11"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.12 supported",
|
||||
fields: fields{family: Alpine, release: "3.12"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.9 eol",
|
||||
fields: fields{family: Alpine, release: "3.9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.14 not found",
|
||||
fields: fields{family: Alpine, release: "3.14"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
// freebsd
|
||||
{
|
||||
name: "freebsd 11 supported",
|
||||
fields: fields{family: FreeBSD, release: "11"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 11 eol on 2021-9-30",
|
||||
fields: fields{family: FreeBSD, release: "11"},
|
||||
now: time.Date(2021, 10, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 12 supported",
|
||||
fields: fields{family: FreeBSD, release: "12"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 10 eol",
|
||||
fields: fields{family: FreeBSD, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
eol, found := GetEOL(tt.fields.family, tt.fields.release)
|
||||
if found != tt.found {
|
||||
t.Errorf("GetEOL.found = %v, want %v", found, tt.found)
|
||||
}
|
||||
if found {
|
||||
if got := eol.IsStandardSupportEnded(tt.now); got != tt.stdEnded {
|
||||
t.Errorf("EOL.IsStandardSupportEnded() = %v, want %v", got, tt.stdEnded)
|
||||
}
|
||||
if got := eol.IsExtendedSuppportEnded(tt.now); got != tt.extEnded {
|
||||
t.Errorf("EOL.IsExtendedSupportEnded() = %v, want %v", got, tt.extEnded)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_majorDotMinor(t *testing.T) {
|
||||
type args struct {
|
||||
osVer string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantMajorDotMinor string
|
||||
}{
|
||||
{
|
||||
name: "empty",
|
||||
args: args{
|
||||
osVer: "",
|
||||
},
|
||||
wantMajorDotMinor: "",
|
||||
},
|
||||
{
|
||||
name: "major",
|
||||
args: args{
|
||||
osVer: "3",
|
||||
},
|
||||
wantMajorDotMinor: "3",
|
||||
},
|
||||
{
|
||||
name: "major dot minor",
|
||||
args: args{
|
||||
osVer: "3.1",
|
||||
},
|
||||
wantMajorDotMinor: "3.1",
|
||||
},
|
||||
{
|
||||
name: "major dot minor dot release",
|
||||
args: args{
|
||||
osVer: "3.1.4",
|
||||
},
|
||||
wantMajorDotMinor: "3.1",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if gotMajorDotMinor := majorDotMinor(tt.args.osVer); gotMajorDotMinor != tt.wantMajorDotMinor {
|
||||
t.Errorf("majorDotMinor() = %v, want %v", gotMajorDotMinor, tt.wantMajorDotMinor)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
34
config/saasconf.go
Normal file
34
config/saasconf.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// SaasConf is FutureVuls config
|
||||
type SaasConf struct {
|
||||
GroupID int64 `json:"-"`
|
||||
Token string `json:"-"`
|
||||
URL string `json:"-"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *SaasConf) Validate() (errs []error) {
|
||||
if c.GroupID == 0 {
|
||||
errs = append(errs, xerrors.New("GroupID must not be empty"))
|
||||
}
|
||||
|
||||
if len(c.Token) == 0 {
|
||||
errs = append(errs, xerrors.New("Token must not be empty"))
|
||||
}
|
||||
|
||||
if len(c.URL) == 0 {
|
||||
errs = append(errs, xerrors.New("URL must not be empty"))
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
110
config/scanmode.go
Normal file
110
config/scanmode.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// ScanMode has a type of scan mode. fast, fast-root, deep and offline
|
||||
type ScanMode struct {
|
||||
flag byte
|
||||
}
|
||||
|
||||
const (
|
||||
// Fast is fast scan mode
|
||||
Fast = byte(1 << iota)
|
||||
// FastRoot is scanmode
|
||||
FastRoot
|
||||
// Deep is scanmode
|
||||
Deep
|
||||
// Offline is scanmode
|
||||
Offline
|
||||
|
||||
fastStr = "fast"
|
||||
fastRootStr = "fast-root"
|
||||
deepStr = "deep"
|
||||
offlineStr = "offline"
|
||||
)
|
||||
|
||||
// Set mode
|
||||
func (s *ScanMode) Set(f byte) {
|
||||
s.flag |= f
|
||||
}
|
||||
|
||||
// IsFast return whether scan mode is fast
|
||||
func (s ScanMode) IsFast() bool {
|
||||
return s.flag&Fast == Fast
|
||||
}
|
||||
|
||||
// IsFastRoot return whether scan mode is fastroot
|
||||
func (s ScanMode) IsFastRoot() bool {
|
||||
return s.flag&FastRoot == FastRoot
|
||||
}
|
||||
|
||||
// IsDeep return whether scan mode is deep
|
||||
func (s ScanMode) IsDeep() bool {
|
||||
return s.flag&Deep == Deep
|
||||
}
|
||||
|
||||
// IsOffline return whether scan mode is offline
|
||||
func (s ScanMode) IsOffline() bool {
|
||||
return s.flag&Offline == Offline
|
||||
}
|
||||
|
||||
func (s *ScanMode) ensure() error {
|
||||
numTrue := 0
|
||||
for _, b := range []bool{s.IsFast(), s.IsFastRoot(), s.IsDeep()} {
|
||||
if b {
|
||||
numTrue++
|
||||
}
|
||||
}
|
||||
if numTrue == 0 {
|
||||
s.Set(Fast)
|
||||
} else if s.IsDeep() && s.IsOffline() {
|
||||
return xerrors.New("Don't specify both of deep and offline")
|
||||
} else if numTrue != 1 {
|
||||
return xerrors.New("Specify only one of offline, fast, fast-root or deep")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s ScanMode) String() string {
|
||||
ss := ""
|
||||
if s.IsFast() {
|
||||
ss = fastStr
|
||||
} else if s.IsFastRoot() {
|
||||
ss = fastRootStr
|
||||
} else if s.IsDeep() {
|
||||
ss = deepStr
|
||||
}
|
||||
if s.IsOffline() {
|
||||
ss += " " + offlineStr
|
||||
}
|
||||
return ss + " mode"
|
||||
}
|
||||
|
||||
func setScanMode(server *ServerInfo, d ServerInfo) error {
|
||||
if len(server.ScanMode) == 0 {
|
||||
server.ScanMode = Conf.Default.ScanMode
|
||||
}
|
||||
for _, m := range server.ScanMode {
|
||||
switch strings.ToLower(m) {
|
||||
case fastStr:
|
||||
server.Mode.Set(Fast)
|
||||
case fastRootStr:
|
||||
server.Mode.Set(FastRoot)
|
||||
case deepStr:
|
||||
server.Mode.Set(Deep)
|
||||
case offlineStr:
|
||||
server.Mode.Set(Offline)
|
||||
default:
|
||||
return xerrors.Errorf("scanMode: %s of %s is invalid. Specify -fast, -fast-root, -deep or offline",
|
||||
m, server.ServerName)
|
||||
}
|
||||
}
|
||||
if err := server.Mode.ensure(); err != nil {
|
||||
return xerrors.Errorf("%s in %s", err, server.ServerName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
97
config/scanmodule.go
Normal file
97
config/scanmodule.go
Normal file
@@ -0,0 +1,97 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// ScanModule has a type of scan module
|
||||
type ScanModule struct {
|
||||
flag byte
|
||||
}
|
||||
|
||||
const (
|
||||
// OSPkg is scanmodule
|
||||
OSPkg = byte(1 << iota)
|
||||
// WordPress is scanmodule
|
||||
WordPress
|
||||
// Lockfile is scanmodule
|
||||
Lockfile
|
||||
// Port is scanmodule
|
||||
Port
|
||||
|
||||
osPkgStr = "ospkg"
|
||||
wordPressStr = "wordpress"
|
||||
lockfileStr = "lockfile"
|
||||
portStr = "port"
|
||||
)
|
||||
|
||||
var allModules = []string{osPkgStr, wordPressStr, lockfileStr, portStr}
|
||||
|
||||
// Set module
|
||||
func (s *ScanModule) Set(f byte) {
|
||||
s.flag |= f
|
||||
}
|
||||
|
||||
// IsScanOSPkg return whether scanning os pkg
|
||||
func (s ScanModule) IsScanOSPkg() bool {
|
||||
return s.flag&OSPkg == OSPkg
|
||||
}
|
||||
|
||||
// IsScanWordPress return whether scanning wordpress
|
||||
func (s ScanModule) IsScanWordPress() bool {
|
||||
return s.flag&WordPress == WordPress
|
||||
}
|
||||
|
||||
// IsScanLockFile whether scanning lock file
|
||||
func (s ScanModule) IsScanLockFile() bool {
|
||||
return s.flag&Lockfile == Lockfile
|
||||
}
|
||||
|
||||
// IsScanPort whether scanning listening ports
|
||||
func (s ScanModule) IsScanPort() bool {
|
||||
return s.flag&Port == Port
|
||||
}
|
||||
|
||||
// IsZero return the struct value are all false
|
||||
func (s ScanModule) IsZero() bool {
|
||||
return !(s.IsScanOSPkg() || s.IsScanWordPress() || s.IsScanLockFile() || s.IsScanPort())
|
||||
}
|
||||
|
||||
func (s *ScanModule) ensure() error {
|
||||
if s.IsZero() {
|
||||
s.Set(OSPkg)
|
||||
s.Set(WordPress)
|
||||
s.Set(Lockfile)
|
||||
s.Set(Port)
|
||||
} else if !s.IsScanOSPkg() && s.IsScanPort() {
|
||||
return xerrors.New("When specifying the Port, Specify OSPkg as well")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setScanModules(server *ServerInfo, d ServerInfo) error {
|
||||
if len(server.ScanModules) == 0 {
|
||||
server.ScanModules = d.ScanModules
|
||||
}
|
||||
for _, m := range server.ScanModules {
|
||||
switch strings.ToLower(m) {
|
||||
case osPkgStr:
|
||||
server.Module.Set(OSPkg)
|
||||
case wordPressStr:
|
||||
server.Module.Set(WordPress)
|
||||
case lockfileStr:
|
||||
server.Module.Set(Lockfile)
|
||||
case portStr:
|
||||
server.Module.Set(Port)
|
||||
default:
|
||||
return xerrors.Errorf("scanMode: %s of %s is invalid. Specify %s",
|
||||
m, server.ServerName, allModules)
|
||||
}
|
||||
}
|
||||
if err := server.Module.ensure(); err != nil {
|
||||
return xerrors.Errorf("%s in %s", err, server.ServerName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
65
config/scanmodule_test.go
Normal file
65
config/scanmodule_test.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestScanModule_IsZero(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modes []byte
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "not zero",
|
||||
modes: []byte{OSPkg},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "zero",
|
||||
modes: []byte{},
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := ScanModule{}
|
||||
for _, b := range tt.modes {
|
||||
s.Set(b)
|
||||
}
|
||||
if got := s.IsZero(); got != tt.want {
|
||||
t.Errorf("ScanModule.IsZero() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanModule_validate(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
modes []byte
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "valid",
|
||||
modes: []byte{},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "err",
|
||||
modes: []byte{WordPress, Lockfile, Port},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
s := ScanModule{}
|
||||
for _, b := range tt.modes {
|
||||
s.Set(b)
|
||||
}
|
||||
if err := s.ensure(); (err != nil) != tt.wantErr {
|
||||
t.Errorf("ScanModule.validate() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
52
config/slackconf.go
Normal file
52
config/slackconf.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// SlackConf is slack config
|
||||
type SlackConf struct {
|
||||
HookURL string `valid:"url" json:"-" toml:"hookURL,omitempty"`
|
||||
LegacyToken string `json:"-" toml:"legacyToken,omitempty"`
|
||||
Channel string `json:"-" toml:"channel,omitempty"`
|
||||
IconEmoji string `json:"-" toml:"iconEmoji,omitempty"`
|
||||
AuthUser string `json:"-" toml:"authUser,omitempty"`
|
||||
NotifyUsers []string `toml:"notifyUsers,omitempty" json:"-"`
|
||||
Text string `json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *SlackConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
|
||||
if len(c.HookURL) == 0 && len(c.LegacyToken) == 0 {
|
||||
errs = append(errs, xerrors.New("slack.hookURL or slack.LegacyToken must not be empty"))
|
||||
}
|
||||
|
||||
if len(c.Channel) == 0 {
|
||||
errs = append(errs, xerrors.New("slack.channel must not be empty"))
|
||||
} else {
|
||||
if !(strings.HasPrefix(c.Channel, "#") ||
|
||||
c.Channel == "${servername}") {
|
||||
errs = append(errs, xerrors.Errorf(
|
||||
"channel's prefix must be '#', channel: %s", c.Channel))
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.AuthUser) == 0 {
|
||||
errs = append(errs, xerrors.New("slack.authUser must not be empty"))
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
65
config/smtpconf.go
Normal file
65
config/smtpconf.go
Normal file
@@ -0,0 +1,65 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// SMTPConf is smtp config
|
||||
type SMTPConf struct {
|
||||
SMTPAddr string `toml:"smtpAddr,omitempty" json:"-"`
|
||||
SMTPPort string `toml:"smtpPort,omitempty" valid:"port" json:"-"`
|
||||
User string `toml:"user,omitempty" json:"-"`
|
||||
Password string `toml:"password,omitempty" json:"-"`
|
||||
From string `toml:"from,omitempty" json:"-"`
|
||||
To []string `toml:"to,omitempty" json:"-"`
|
||||
Cc []string `toml:"cc,omitempty" json:"-"`
|
||||
SubjectPrefix string `toml:"subjectPrefix,omitempty" json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
func checkEmails(emails []string) (errs []error) {
|
||||
for _, addr := range emails {
|
||||
if len(addr) == 0 {
|
||||
return
|
||||
}
|
||||
if ok := govalidator.IsEmail(addr); !ok {
|
||||
errs = append(errs, xerrors.Errorf("Invalid email address. email: %s", addr))
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Validate SMTP configuration
|
||||
func (c *SMTPConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
emails := []string{}
|
||||
emails = append(emails, c.From)
|
||||
emails = append(emails, c.To...)
|
||||
emails = append(emails, c.Cc...)
|
||||
|
||||
if emailErrs := checkEmails(emails); 0 < len(emailErrs) {
|
||||
errs = append(errs, emailErrs...)
|
||||
}
|
||||
|
||||
if c.SMTPAddr == "" {
|
||||
errs = append(errs, xerrors.New("email.smtpAddr must not be empty"))
|
||||
}
|
||||
if c.SMTPPort == "" {
|
||||
errs = append(errs, xerrors.New("email.smtpPort must not be empty"))
|
||||
}
|
||||
if len(c.To) == 0 {
|
||||
errs = append(errs, xerrors.New("email.To required at least one address"))
|
||||
}
|
||||
if len(c.From) == 0 {
|
||||
errs = append(errs, xerrors.New("email.From required at least one address"))
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
130
config/syslogconf.go
Normal file
130
config/syslogconf.go
Normal file
@@ -0,0 +1,130 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log/syslog"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// SyslogConf is syslog config
|
||||
type SyslogConf struct {
|
||||
Protocol string `json:"-"`
|
||||
Host string `valid:"host" json:"-"`
|
||||
Port string `valid:"port" json:"-"`
|
||||
Severity string `json:"-"`
|
||||
Facility string `json:"-"`
|
||||
Tag string `json:"-"`
|
||||
Verbose bool `json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *SyslogConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return nil
|
||||
}
|
||||
// If protocol is empty, it will connect to the local syslog server.
|
||||
if len(c.Protocol) > 0 && c.Protocol != "tcp" && c.Protocol != "udp" {
|
||||
errs = append(errs, errors.New(`syslog.protocol must be "tcp" or "udp"`))
|
||||
}
|
||||
|
||||
// Default port: 514
|
||||
if c.Port == "" {
|
||||
c.Port = "514"
|
||||
}
|
||||
|
||||
if _, err := c.GetSeverity(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if _, err := c.GetFacility(); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
if _, err := govalidator.ValidateStruct(c); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// GetSeverity gets severity
|
||||
func (c *SyslogConf) GetSeverity() (syslog.Priority, error) {
|
||||
if c.Severity == "" {
|
||||
return syslog.LOG_INFO, nil
|
||||
}
|
||||
|
||||
switch c.Severity {
|
||||
case "emerg":
|
||||
return syslog.LOG_EMERG, nil
|
||||
case "alert":
|
||||
return syslog.LOG_ALERT, nil
|
||||
case "crit":
|
||||
return syslog.LOG_CRIT, nil
|
||||
case "err":
|
||||
return syslog.LOG_ERR, nil
|
||||
case "warning":
|
||||
return syslog.LOG_WARNING, nil
|
||||
case "notice":
|
||||
return syslog.LOG_NOTICE, nil
|
||||
case "info":
|
||||
return syslog.LOG_INFO, nil
|
||||
case "debug":
|
||||
return syslog.LOG_DEBUG, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("Invalid severity: %s", c.Severity)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFacility gets facility
|
||||
func (c *SyslogConf) GetFacility() (syslog.Priority, error) {
|
||||
if c.Facility == "" {
|
||||
return syslog.LOG_AUTH, nil
|
||||
}
|
||||
|
||||
switch c.Facility {
|
||||
case "kern":
|
||||
return syslog.LOG_KERN, nil
|
||||
case "user":
|
||||
return syslog.LOG_USER, nil
|
||||
case "mail":
|
||||
return syslog.LOG_MAIL, nil
|
||||
case "daemon":
|
||||
return syslog.LOG_DAEMON, nil
|
||||
case "auth":
|
||||
return syslog.LOG_AUTH, nil
|
||||
case "syslog":
|
||||
return syslog.LOG_SYSLOG, nil
|
||||
case "lpr":
|
||||
return syslog.LOG_LPR, nil
|
||||
case "news":
|
||||
return syslog.LOG_NEWS, nil
|
||||
case "uucp":
|
||||
return syslog.LOG_UUCP, nil
|
||||
case "cron":
|
||||
return syslog.LOG_CRON, nil
|
||||
case "authpriv":
|
||||
return syslog.LOG_AUTHPRIV, nil
|
||||
case "ftp":
|
||||
return syslog.LOG_FTP, nil
|
||||
case "local0":
|
||||
return syslog.LOG_LOCAL0, nil
|
||||
case "local1":
|
||||
return syslog.LOG_LOCAL1, nil
|
||||
case "local2":
|
||||
return syslog.LOG_LOCAL2, nil
|
||||
case "local3":
|
||||
return syslog.LOG_LOCAL3, nil
|
||||
case "local4":
|
||||
return syslog.LOG_LOCAL4, nil
|
||||
case "local5":
|
||||
return syslog.LOG_LOCAL5, nil
|
||||
case "local6":
|
||||
return syslog.LOG_LOCAL6, nil
|
||||
case "local7":
|
||||
return syslog.LOG_LOCAL7, nil
|
||||
default:
|
||||
return -1, xerrors.Errorf("Invalid facility: %s", c.Facility)
|
||||
}
|
||||
}
|
||||
33
config/telegramconf.go
Normal file
33
config/telegramconf.go
Normal file
@@ -0,0 +1,33 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// TelegramConf is Telegram config
|
||||
type TelegramConf struct {
|
||||
Token string `json:"-"`
|
||||
ChatID string `json:"-"`
|
||||
Enabled bool `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *TelegramConf) Validate() (errs []error) {
|
||||
if !c.Enabled {
|
||||
return
|
||||
}
|
||||
if len(c.ChatID) == 0 {
|
||||
errs = append(errs, xerrors.New("TelegramConf.ChatID must not be empty"))
|
||||
}
|
||||
|
||||
if len(c.Token) == 0 {
|
||||
errs = append(errs, xerrors.New("TelegramConf.Token must not be empty"))
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/knqyf263/go-cpe/naming"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
@@ -15,270 +16,206 @@ type TOMLLoader struct {
|
||||
|
||||
// Load load the configuration TOML file specified by path arg.
|
||||
func (c TOMLLoader) Load(pathToToml, keyPass string) error {
|
||||
var conf Config
|
||||
if _, err := toml.DecodeFile(pathToToml, &conf); err != nil {
|
||||
// util.Log.Infof("Loading config: %s", pathToToml)
|
||||
if _, err := toml.DecodeFile(pathToToml, &Conf); err != nil {
|
||||
return err
|
||||
}
|
||||
Conf.EMail = conf.EMail
|
||||
Conf.Slack = conf.Slack
|
||||
Conf.ChatWork = conf.ChatWork
|
||||
Conf.Telegram = conf.Telegram
|
||||
Conf.Saas = conf.Saas
|
||||
Conf.Syslog = conf.Syslog
|
||||
Conf.HTTP = conf.HTTP
|
||||
Conf.AWS = conf.AWS
|
||||
Conf.Azure = conf.Azure
|
||||
|
||||
Conf.CveDict = conf.CveDict
|
||||
Conf.OvalDict = conf.OvalDict
|
||||
Conf.Gost = conf.Gost
|
||||
Conf.Exploit = conf.Exploit
|
||||
Conf.Metasploit = conf.Metasploit
|
||||
|
||||
d := conf.Default
|
||||
Conf.Default = d
|
||||
servers := make(map[string]ServerInfo)
|
||||
|
||||
if keyPass != "" {
|
||||
d.KeyPassword = keyPass
|
||||
for _, cnf := range []VulnDictInterface{
|
||||
&Conf.CveDict,
|
||||
&Conf.OvalDict,
|
||||
&Conf.Gost,
|
||||
&Conf.Exploit,
|
||||
&Conf.Metasploit,
|
||||
} {
|
||||
cnf.Init()
|
||||
}
|
||||
|
||||
index := 0
|
||||
for serverName, v := range conf.Servers {
|
||||
if 0 < len(v.KeyPassword) {
|
||||
return xerrors.Errorf("[Deprecated] KEYPASSWORD IN CONFIG FILE ARE UNSECURE. REMOVE THEM IMMEDIATELY FOR A SECURITY REASONS. THEY WILL BE REMOVED IN A FUTURE RELEASE: %s", serverName)
|
||||
for name, server := range Conf.Servers {
|
||||
server.ServerName = name
|
||||
if err := setDefaultIfEmpty(&server, Conf.Default); err != nil {
|
||||
return xerrors.Errorf("Failed to set default value to config. server: %s, err: %w", name, err)
|
||||
}
|
||||
|
||||
s := ServerInfo{ServerName: serverName}
|
||||
if v.Type != ServerTypePseudo {
|
||||
s.Host = v.Host
|
||||
if len(s.Host) == 0 {
|
||||
return xerrors.Errorf("%s is invalid. host is empty", serverName)
|
||||
}
|
||||
|
||||
s.JumpServer = v.JumpServer
|
||||
if len(s.JumpServer) == 0 {
|
||||
s.JumpServer = d.JumpServer
|
||||
}
|
||||
|
||||
switch {
|
||||
case v.Port != "":
|
||||
s.Port = v.Port
|
||||
case d.Port != "":
|
||||
s.Port = d.Port
|
||||
default:
|
||||
s.Port = "22"
|
||||
}
|
||||
|
||||
switch {
|
||||
case v.User != "":
|
||||
s.User = v.User
|
||||
case d.User != "":
|
||||
s.User = d.User
|
||||
default:
|
||||
if s.Port != "local" {
|
||||
return xerrors.Errorf("%s is invalid. User is empty", serverName)
|
||||
}
|
||||
}
|
||||
|
||||
s.SSHConfigPath = v.SSHConfigPath
|
||||
if len(s.SSHConfigPath) == 0 {
|
||||
s.SSHConfigPath = d.SSHConfigPath
|
||||
}
|
||||
|
||||
s.KeyPath = v.KeyPath
|
||||
if len(s.KeyPath) == 0 {
|
||||
s.KeyPath = d.KeyPath
|
||||
}
|
||||
s.KeyPassword = v.KeyPassword
|
||||
if len(s.KeyPassword) == 0 {
|
||||
s.KeyPassword = d.KeyPassword
|
||||
}
|
||||
if err := setScanMode(&server, Conf.Default); err != nil {
|
||||
return xerrors.Errorf("Failed to set ScanMode: %w", err)
|
||||
}
|
||||
|
||||
s.ScanMode = v.ScanMode
|
||||
if len(s.ScanMode) == 0 {
|
||||
s.ScanMode = d.ScanMode
|
||||
if len(s.ScanMode) == 0 {
|
||||
s.ScanMode = []string{"fast"}
|
||||
}
|
||||
}
|
||||
for _, m := range s.ScanMode {
|
||||
switch m {
|
||||
case "fast":
|
||||
s.Mode.Set(Fast)
|
||||
case "fast-root":
|
||||
s.Mode.Set(FastRoot)
|
||||
case "deep":
|
||||
s.Mode.Set(Deep)
|
||||
case "offline":
|
||||
s.Mode.Set(Offline)
|
||||
default:
|
||||
return xerrors.Errorf("scanMode: %s of %s is invalid. Specify -fast, -fast-root, -deep or offline", m, serverName)
|
||||
}
|
||||
}
|
||||
if err := s.Mode.validate(); err != nil {
|
||||
return xerrors.Errorf("%s in %s", err, serverName)
|
||||
if err := setScanModules(&server, Conf.Default); err != nil {
|
||||
return xerrors.Errorf("Failed to set ScanModule: %w", err)
|
||||
}
|
||||
|
||||
s.CpeNames = v.CpeNames
|
||||
if len(s.CpeNames) == 0 {
|
||||
s.CpeNames = d.CpeNames
|
||||
if len(server.CpeNames) == 0 {
|
||||
server.CpeNames = Conf.Default.CpeNames
|
||||
}
|
||||
|
||||
s.Lockfiles = v.Lockfiles
|
||||
if len(s.Lockfiles) == 0 {
|
||||
s.Lockfiles = d.Lockfiles
|
||||
}
|
||||
|
||||
s.FindLock = v.FindLock
|
||||
|
||||
for i, n := range s.CpeNames {
|
||||
for i, n := range server.CpeNames {
|
||||
uri, err := toCpeURI(n)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse CPENames %s in %s, err: %w", n, serverName, err)
|
||||
return xerrors.Errorf("Failed to parse CPENames %s in %s, err: %w", n, name, err)
|
||||
}
|
||||
s.CpeNames[i] = uri
|
||||
server.CpeNames[i] = uri
|
||||
}
|
||||
|
||||
s.ContainersIncluded = v.ContainersIncluded
|
||||
if len(s.ContainersIncluded) == 0 {
|
||||
s.ContainersIncluded = d.ContainersIncluded
|
||||
}
|
||||
|
||||
s.ContainersExcluded = v.ContainersExcluded
|
||||
if len(s.ContainersExcluded) == 0 {
|
||||
s.ContainersExcluded = d.ContainersExcluded
|
||||
}
|
||||
|
||||
s.ContainerType = v.ContainerType
|
||||
if len(s.ContainerType) == 0 {
|
||||
s.ContainerType = d.ContainerType
|
||||
}
|
||||
|
||||
s.Containers = v.Containers
|
||||
for contName, cont := range s.Containers {
|
||||
cont.IgnoreCves = append(cont.IgnoreCves, d.IgnoreCves...)
|
||||
s.Containers[contName] = cont
|
||||
}
|
||||
|
||||
s.OwaspDCXMLPath = v.OwaspDCXMLPath
|
||||
if len(s.OwaspDCXMLPath) == 0 {
|
||||
s.OwaspDCXMLPath = d.OwaspDCXMLPath
|
||||
}
|
||||
|
||||
s.Memo = v.Memo
|
||||
if s.Memo == "" {
|
||||
s.Memo = d.Memo
|
||||
}
|
||||
|
||||
s.IgnoreCves = v.IgnoreCves
|
||||
for _, cve := range d.IgnoreCves {
|
||||
for _, cve := range Conf.Default.IgnoreCves {
|
||||
found := false
|
||||
for _, c := range s.IgnoreCves {
|
||||
for _, c := range server.IgnoreCves {
|
||||
if cve == c {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.IgnoreCves = append(s.IgnoreCves, cve)
|
||||
server.IgnoreCves = append(server.IgnoreCves, cve)
|
||||
}
|
||||
}
|
||||
|
||||
s.IgnorePkgsRegexp = v.IgnorePkgsRegexp
|
||||
for _, pkg := range d.IgnorePkgsRegexp {
|
||||
for _, pkg := range Conf.Default.IgnorePkgsRegexp {
|
||||
found := false
|
||||
for _, p := range s.IgnorePkgsRegexp {
|
||||
for _, p := range server.IgnorePkgsRegexp {
|
||||
if pkg == p {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
s.IgnorePkgsRegexp = append(s.IgnorePkgsRegexp, pkg)
|
||||
server.IgnorePkgsRegexp = append(server.IgnorePkgsRegexp, pkg)
|
||||
}
|
||||
}
|
||||
for _, reg := range s.IgnorePkgsRegexp {
|
||||
for _, reg := range server.IgnorePkgsRegexp {
|
||||
_, err := regexp.Compile(reg)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse %s in %s. err: %w", reg, serverName, err)
|
||||
return xerrors.Errorf("Failed to parse %s in %s. err: %w", reg, name, err)
|
||||
}
|
||||
}
|
||||
for contName, cont := range s.Containers {
|
||||
for contName, cont := range server.Containers {
|
||||
for _, reg := range cont.IgnorePkgsRegexp {
|
||||
_, err := regexp.Compile(reg)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse %s in %s@%s. err: %w",
|
||||
reg, contName, serverName, err)
|
||||
reg, contName, name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
opt := map[string]interface{}{}
|
||||
for k, v := range d.Optional {
|
||||
opt[k] = v
|
||||
for ownerRepo, githubSetting := range server.GitHubRepos {
|
||||
if ss := strings.Split(ownerRepo, "/"); len(ss) != 2 {
|
||||
return xerrors.Errorf("Failed to parse GitHub owner/repo: %s in %s",
|
||||
ownerRepo, name)
|
||||
}
|
||||
if githubSetting.Token == "" {
|
||||
return xerrors.Errorf("GitHub owner/repo: %s in %s token is empty",
|
||||
ownerRepo, name)
|
||||
}
|
||||
}
|
||||
for k, v := range v.Optional {
|
||||
opt[k] = v
|
||||
}
|
||||
s.Optional = opt
|
||||
|
||||
s.Enablerepo = v.Enablerepo
|
||||
if len(s.Enablerepo) == 0 {
|
||||
s.Enablerepo = d.Enablerepo
|
||||
if len(server.Enablerepo) == 0 {
|
||||
server.Enablerepo = Conf.Default.Enablerepo
|
||||
}
|
||||
if len(s.Enablerepo) != 0 {
|
||||
for _, repo := range s.Enablerepo {
|
||||
if len(server.Enablerepo) != 0 {
|
||||
for _, repo := range server.Enablerepo {
|
||||
switch repo {
|
||||
case "base", "updates":
|
||||
// nop
|
||||
default:
|
||||
return xerrors.Errorf(
|
||||
"For now, enablerepo have to be base or updates: %s, servername: %s",
|
||||
s.Enablerepo, serverName)
|
||||
"For now, enablerepo have to be base or updates: %s",
|
||||
server.Enablerepo)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s.GitHubRepos = v.GitHubRepos
|
||||
for ownerRepo, githubSetting := range s.GitHubRepos {
|
||||
if ss := strings.Split(ownerRepo, "/"); len(ss) != 2 {
|
||||
return xerrors.Errorf("Failed to parse GitHub owner/repo: %s in %s",
|
||||
ownerRepo, serverName)
|
||||
}
|
||||
if githubSetting.Token == "" {
|
||||
return xerrors.Errorf("GitHub owner/repo: %s in %s token is empty",
|
||||
ownerRepo, serverName)
|
||||
}
|
||||
}
|
||||
|
||||
s.UUIDs = v.UUIDs
|
||||
s.Type = v.Type
|
||||
|
||||
s.WordPress.WPVulnDBToken = v.WordPress.WPVulnDBToken
|
||||
s.WordPress.CmdPath = v.WordPress.CmdPath
|
||||
s.WordPress.DocRoot = v.WordPress.DocRoot
|
||||
s.WordPress.OSUser = v.WordPress.OSUser
|
||||
s.WordPress.IgnoreInactive = v.WordPress.IgnoreInactive
|
||||
|
||||
s.IgnoredJSONKeys = v.IgnoredJSONKeys
|
||||
if len(s.IgnoredJSONKeys) == 0 {
|
||||
s.IgnoredJSONKeys = d.IgnoredJSONKeys
|
||||
}
|
||||
|
||||
s.LogMsgAnsiColor = Colors[index%len(Colors)]
|
||||
server.LogMsgAnsiColor = Colors[index%len(Colors)]
|
||||
index++
|
||||
|
||||
servers[serverName] = s
|
||||
Conf.Servers[name] = server
|
||||
}
|
||||
Conf.Servers = servers
|
||||
return nil
|
||||
}
|
||||
|
||||
func setDefaultIfEmpty(server *ServerInfo, d ServerInfo) error {
|
||||
if server.Type != constant.ServerTypePseudo {
|
||||
if len(server.Host) == 0 {
|
||||
return xerrors.Errorf("server.host is empty")
|
||||
}
|
||||
|
||||
if len(server.JumpServer) == 0 {
|
||||
server.JumpServer = Conf.Default.JumpServer
|
||||
}
|
||||
|
||||
if server.Port == "" {
|
||||
if Conf.Default.Port != "" {
|
||||
server.Port = Conf.Default.Port
|
||||
} else {
|
||||
server.Port = "22"
|
||||
}
|
||||
}
|
||||
|
||||
if server.User == "" {
|
||||
server.User = Conf.Default.User
|
||||
if server.User == "" && server.Port != "local" {
|
||||
return xerrors.Errorf("server.user is empty")
|
||||
}
|
||||
}
|
||||
|
||||
if server.SSHConfigPath == "" {
|
||||
server.SSHConfigPath = Conf.Default.SSHConfigPath
|
||||
}
|
||||
|
||||
if server.KeyPath == "" {
|
||||
server.KeyPath = Conf.Default.KeyPath
|
||||
}
|
||||
}
|
||||
|
||||
if len(server.Lockfiles) == 0 {
|
||||
server.Lockfiles = Conf.Default.Lockfiles
|
||||
}
|
||||
|
||||
if len(server.ContainersIncluded) == 0 {
|
||||
server.ContainersIncluded = Conf.Default.ContainersIncluded
|
||||
}
|
||||
|
||||
if len(server.ContainersExcluded) == 0 {
|
||||
server.ContainersExcluded = Conf.Default.ContainersExcluded
|
||||
}
|
||||
|
||||
if server.ContainerType == "" {
|
||||
server.ContainerType = Conf.Default.ContainerType
|
||||
}
|
||||
|
||||
for contName, cont := range server.Containers {
|
||||
cont.IgnoreCves = append(cont.IgnoreCves, Conf.Default.IgnoreCves...)
|
||||
server.Containers[contName] = cont
|
||||
}
|
||||
|
||||
if server.OwaspDCXMLPath == "" {
|
||||
server.OwaspDCXMLPath = Conf.Default.OwaspDCXMLPath
|
||||
}
|
||||
|
||||
if server.Memo == "" {
|
||||
server.Memo = Conf.Default.Memo
|
||||
}
|
||||
|
||||
if server.WordPress == nil {
|
||||
server.WordPress = Conf.Default.WordPress
|
||||
if server.WordPress == nil {
|
||||
server.WordPress = &WordPressConf{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(server.IgnoredJSONKeys) == 0 {
|
||||
server.IgnoredJSONKeys = Conf.Default.IgnoredJSONKeys
|
||||
}
|
||||
|
||||
opt := map[string]interface{}{}
|
||||
for k, v := range Conf.Default.Optional {
|
||||
opt[k] = v
|
||||
}
|
||||
for k, v := range server.Optional {
|
||||
opt[k] = v
|
||||
}
|
||||
server.Optional = opt
|
||||
|
||||
Conf.CveDict.Init()
|
||||
Conf.OvalDict.Init()
|
||||
Conf.Gost.Init()
|
||||
Conf.Exploit.Init()
|
||||
Conf.Metasploit.Init()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
276
config/vulnDictConf.go
Normal file
276
config/vulnDictConf.go
Normal file
@@ -0,0 +1,276 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// VulnDictInterface is an interface of vulnsrc
|
||||
type VulnDictInterface interface {
|
||||
Init()
|
||||
Validate() error
|
||||
IsFetchViaHTTP() bool
|
||||
CheckHTTPHealth() error
|
||||
GetName() string
|
||||
GetType() string
|
||||
GetURL() string
|
||||
GetSQLite3Path() string
|
||||
GetDebugSQL() bool
|
||||
}
|
||||
|
||||
// VulnDict is a base struct of vuln dicts
|
||||
type VulnDict struct {
|
||||
Name string
|
||||
|
||||
// DB type of CVE dictionary (sqlite3, mysql, postgres or redis)
|
||||
Type string
|
||||
|
||||
// http://cve-dictionary.com:1323 or DB connection string
|
||||
URL string `json:"-"`
|
||||
|
||||
// /path/to/cve.sqlite3
|
||||
SQLite3Path string
|
||||
|
||||
DebugSQL bool
|
||||
}
|
||||
|
||||
// GetType returns type
|
||||
func (cnf VulnDict) GetType() string {
|
||||
return cnf.Type
|
||||
}
|
||||
|
||||
// GetName returns name
|
||||
func (cnf VulnDict) GetName() string {
|
||||
return cnf.Name
|
||||
}
|
||||
|
||||
// GetURL returns url
|
||||
func (cnf VulnDict) GetURL() string {
|
||||
return cnf.URL
|
||||
}
|
||||
|
||||
// GetSQLite3Path return the path of SQLite3
|
||||
func (cnf VulnDict) GetSQLite3Path() string {
|
||||
return cnf.SQLite3Path
|
||||
}
|
||||
|
||||
// GetDebugSQL return debugSQL flag
|
||||
func (cnf VulnDict) GetDebugSQL() bool {
|
||||
return cnf.DebugSQL
|
||||
}
|
||||
|
||||
// Validate settings
|
||||
func (cnf VulnDict) Validate() error {
|
||||
logging.Log.Infof("%s.type=%s, %s.url=%s, %s.SQLite3Path=%s",
|
||||
cnf.Name, cnf.Type, cnf.Name, cnf.URL, cnf.Name, cnf.SQLite3Path)
|
||||
|
||||
switch cnf.Type {
|
||||
case "sqlite3":
|
||||
if cnf.URL != "" {
|
||||
return xerrors.Errorf("To use SQLite3, specify %s.type=sqlite3 and %s.SQLite3Path. To use as HTTP server mode, specify %s.type=http and %s.url",
|
||||
cnf.Name, cnf.Name, cnf.Name, cnf.Name)
|
||||
}
|
||||
if ok, _ := govalidator.IsFilePath(cnf.SQLite3Path); !ok {
|
||||
return xerrors.Errorf("SQLite3 path must be a *Absolute* file path. %s.SQLite3Path: %s",
|
||||
cnf.Name, cnf.SQLite3Path)
|
||||
}
|
||||
if _, err := os.Stat(cnf.SQLite3Path); os.IsNotExist(err) {
|
||||
logging.Log.Warnf("%s.SQLite3Path=%s file not found", cnf.Name, cnf.SQLite3Path)
|
||||
}
|
||||
case "mysql":
|
||||
if cnf.URL == "" {
|
||||
return xerrors.Errorf(`MySQL connection string is needed. %s.url="user:pass@tcp(localhost:3306)/dbname"`, cnf.Name)
|
||||
}
|
||||
case "postgres":
|
||||
if cnf.URL == "" {
|
||||
return xerrors.Errorf(`PostgreSQL connection string is needed. %s.url="host=myhost user=user dbname=dbname sslmode=disable password=password"`, cnf.Name)
|
||||
}
|
||||
case "redis":
|
||||
if cnf.URL == "" {
|
||||
return xerrors.Errorf(`Redis connection string is needed. %s.url="redis://localhost/0"`, cnf.Name)
|
||||
}
|
||||
case "http":
|
||||
if cnf.URL == "" {
|
||||
return xerrors.Errorf(`URL is needed. -%s-url="http://localhost:1323"`, cnf.Name)
|
||||
}
|
||||
default:
|
||||
return xerrors.Errorf("%s.type must be either 'sqlite3', 'mysql', 'postgres', 'redis' or 'http'. %s.type: %s", cnf.Name, cnf.Name, cnf.Type)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init the struct
|
||||
func (cnf VulnDict) Init() {}
|
||||
|
||||
func (cnf *VulnDict) setDefault(sqlite3Name string) {
|
||||
if cnf.Type == "" {
|
||||
cnf.Type = "sqlite3"
|
||||
}
|
||||
if cnf.URL == "" && cnf.SQLite3Path == "" {
|
||||
wd, _ := os.Getwd()
|
||||
cnf.SQLite3Path = filepath.Join(wd, sqlite3Name)
|
||||
}
|
||||
}
|
||||
|
||||
// IsFetchViaHTTP returns if fetch via HTTP
|
||||
func (cnf VulnDict) IsFetchViaHTTP() bool {
|
||||
return cnf.Type == "http"
|
||||
}
|
||||
|
||||
// CheckHTTPHealth checks http server status
|
||||
func (cnf VulnDict) CheckHTTPHealth() error {
|
||||
if !cnf.IsFetchViaHTTP() {
|
||||
return nil
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/health", cnf.URL)
|
||||
resp, _, errs := gorequest.New().Timeout(10 * time.Second).SetDebug(Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to request to CVE server. url: %s, errs: %s",
|
||||
url, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GovalDictConf is goval-dictionary config
|
||||
type GovalDictConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const govalType = "OVALDB_TYPE"
|
||||
const govalURL = "OVALDB_URL"
|
||||
const govalPATH = "OVALDB_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *GovalDictConf) Init() {
|
||||
cnf.Name = "ovalDict"
|
||||
if os.Getenv(govalType) != "" {
|
||||
cnf.Type = os.Getenv(govalType)
|
||||
}
|
||||
if os.Getenv(govalURL) != "" {
|
||||
cnf.URL = os.Getenv(govalURL)
|
||||
}
|
||||
if os.Getenv(govalPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(govalPATH)
|
||||
}
|
||||
cnf.setDefault("oval.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
// ExploitConf is exploit config
|
||||
type ExploitConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const exploitDBType = "EXPLOITDB_TYPE"
|
||||
const exploitDBURL = "EXPLOITDB_URL"
|
||||
const exploitDBPATH = "EXPLOITDB_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *ExploitConf) Init() {
|
||||
cnf.Name = "exploit"
|
||||
if os.Getenv(exploitDBType) != "" {
|
||||
cnf.Type = os.Getenv(exploitDBType)
|
||||
}
|
||||
if os.Getenv(exploitDBURL) != "" {
|
||||
cnf.URL = os.Getenv(exploitDBURL)
|
||||
}
|
||||
if os.Getenv(exploitDBPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(exploitDBPATH)
|
||||
}
|
||||
cnf.setDefault("go-exploitdb.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
// GoCveDictConf is GoCveDict config
|
||||
type GoCveDictConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const cveDBType = "CVEDB_TYPE"
|
||||
const cveDBURL = "CVEDB_URL"
|
||||
const cveDBPATH = "CVEDB_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *GoCveDictConf) Init() {
|
||||
cnf.Name = "cveDict"
|
||||
if os.Getenv(cveDBType) != "" {
|
||||
cnf.Type = os.Getenv(cveDBType)
|
||||
}
|
||||
if os.Getenv(cveDBURL) != "" {
|
||||
cnf.URL = os.Getenv(cveDBURL)
|
||||
}
|
||||
if os.Getenv(cveDBPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(cveDBPATH)
|
||||
}
|
||||
cnf.setDefault("cve.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
// GostConf is gost config
|
||||
type GostConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const gostDBType = "GOSTDB_TYPE"
|
||||
const gostDBURL = "GOSTDB_URL"
|
||||
const gostDBPATH = "GOSTDB_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *GostConf) Init() {
|
||||
cnf.Name = "gost"
|
||||
if os.Getenv(gostDBType) != "" {
|
||||
cnf.Type = os.Getenv(gostDBType)
|
||||
}
|
||||
if os.Getenv(gostDBURL) != "" {
|
||||
cnf.URL = os.Getenv(gostDBURL)
|
||||
}
|
||||
if os.Getenv(gostDBPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(gostDBPATH)
|
||||
}
|
||||
cnf.setDefault("gost.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
// MetasploitConf is gost go-metasploitdb
|
||||
type MetasploitConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const metasploitDBType = "METASPLOITDB_TYPE"
|
||||
const metasploitDBURL = "METASPLOITDB_URL"
|
||||
const metasploitDBPATH = "METASPLOITDB_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *MetasploitConf) Init() {
|
||||
cnf.Name = "metasploit"
|
||||
if os.Getenv(metasploitDBType) != "" {
|
||||
cnf.Type = os.Getenv(metasploitDBType)
|
||||
}
|
||||
if os.Getenv(metasploitDBURL) != "" {
|
||||
cnf.URL = os.Getenv(metasploitDBURL)
|
||||
}
|
||||
if os.Getenv(metasploitDBPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(metasploitDBPATH)
|
||||
}
|
||||
cnf.setDefault("go-msfdb.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
61
constant/constant.go
Normal file
61
constant/constant.go
Normal file
@@ -0,0 +1,61 @@
|
||||
package constant
|
||||
|
||||
// Global constant
|
||||
// Pkg local constants should not be defined here.
|
||||
// Define them in the each package.
|
||||
|
||||
const (
|
||||
// RedHat is
|
||||
RedHat = "redhat"
|
||||
|
||||
// Debian is
|
||||
Debian = "debian"
|
||||
|
||||
// Ubuntu is
|
||||
Ubuntu = "ubuntu"
|
||||
|
||||
// CentOS is
|
||||
CentOS = "centos"
|
||||
|
||||
// Fedora is
|
||||
// Fedora = "fedora"
|
||||
|
||||
// Amazon is
|
||||
Amazon = "amazon"
|
||||
|
||||
// Oracle is
|
||||
Oracle = "oracle"
|
||||
|
||||
// FreeBSD is
|
||||
FreeBSD = "freebsd"
|
||||
|
||||
// Raspbian is
|
||||
Raspbian = "raspbian"
|
||||
|
||||
// Windows is
|
||||
Windows = "windows"
|
||||
|
||||
// OpenSUSE is
|
||||
OpenSUSE = "opensuse"
|
||||
|
||||
// OpenSUSELeap is
|
||||
OpenSUSELeap = "opensuse.leap"
|
||||
|
||||
// SUSEEnterpriseServer is
|
||||
SUSEEnterpriseServer = "suse.linux.enterprise.server"
|
||||
|
||||
// SUSEEnterpriseDesktop is
|
||||
SUSEEnterpriseDesktop = "suse.linux.enterprise.desktop"
|
||||
|
||||
// SUSEOpenstackCloud is
|
||||
SUSEOpenstackCloud = "suse.openstack.cloud"
|
||||
|
||||
// Alpine is
|
||||
Alpine = "alpine"
|
||||
|
||||
// ServerTypePseudo is used for ServerInfo.Type, r.Family
|
||||
ServerTypePseudo = "pseudo"
|
||||
|
||||
// DeepSecurity is
|
||||
DeepSecurity = "deepsecurity"
|
||||
)
|
||||
@@ -22,6 +22,9 @@ func Parse(vulnJSON []byte, scanResult *models.ScanResult) (result *models.ScanR
|
||||
vulnInfos := models.VulnInfos{}
|
||||
uniqueLibraryScannerPaths := map[string]models.LibraryScanner{}
|
||||
for _, trivyResult := range trivyResults {
|
||||
if IsTrivySupportedOS(trivyResult.Type) {
|
||||
overrideServerData(scanResult, &trivyResult)
|
||||
}
|
||||
for _, vuln := range trivyResult.Vulnerabilities {
|
||||
if _, ok := vulnInfos[vuln.VulnerabilityID]; !ok {
|
||||
vulnInfos[vuln.VulnerabilityID] = models.VulnInfo{
|
||||
@@ -57,12 +60,24 @@ func Parse(vulnJSON []byte, scanResult *models.ScanResult) (result *models.ScanR
|
||||
return references[i].Link < references[j].Link
|
||||
})
|
||||
|
||||
var published time.Time
|
||||
if vuln.PublishedDate != nil {
|
||||
published = *vuln.PublishedDate
|
||||
}
|
||||
|
||||
var lastModified time.Time
|
||||
if vuln.LastModifiedDate != nil {
|
||||
lastModified = *vuln.LastModifiedDate
|
||||
}
|
||||
|
||||
vulnInfo.CveContents = models.CveContents{
|
||||
models.Trivy: models.CveContent{
|
||||
Cvss3Severity: vuln.Severity,
|
||||
References: references,
|
||||
Title: vuln.Title,
|
||||
Summary: vuln.Description,
|
||||
Published: published,
|
||||
LastModified: lastModified,
|
||||
},
|
||||
}
|
||||
// do only if image type is Vuln
|
||||
@@ -77,16 +92,6 @@ func Parse(vulnJSON []byte, scanResult *models.ScanResult) (result *models.ScanR
|
||||
FixState: fixState,
|
||||
FixedIn: vuln.FixedVersion,
|
||||
})
|
||||
|
||||
// overwrite every time if os package
|
||||
scanResult.Family = trivyResult.Type
|
||||
scanResult.ServerName = trivyResult.Target
|
||||
scanResult.Optional = map[string]interface{}{
|
||||
"trivy-target": trivyResult.Target,
|
||||
}
|
||||
scanResult.ScannedAt = time.Now()
|
||||
scanResult.ScannedBy = "trivy"
|
||||
scanResult.ScannedVia = "trivy"
|
||||
} else {
|
||||
// LibraryScanの結果
|
||||
vulnInfo.LibraryFixedIns = append(vulnInfo.LibraryFixedIns, models.LibraryFixedIn{
|
||||
@@ -162,3 +167,14 @@ func IsTrivySupportedOS(family string) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func overrideServerData(scanResult *models.ScanResult, trivyResult *report.Result) {
|
||||
scanResult.Family = trivyResult.Type
|
||||
scanResult.ServerName = trivyResult.Target
|
||||
scanResult.Optional = map[string]interface{}{
|
||||
"trivy-target": trivyResult.Target,
|
||||
}
|
||||
scanResult.ScannedAt = time.Now()
|
||||
scanResult.ScannedBy = "trivy"
|
||||
scanResult.ScannedVia = "trivy"
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/types"
|
||||
"github.com/d4l3k/messagediff"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
@@ -3205,6 +3206,33 @@ func TestParse(t *testing.T) {
|
||||
Optional: map[string]interface{}{"trivy-target": "knqyf263/vuln-image:1.2.3 (alpine 3.7.1)"},
|
||||
},
|
||||
},
|
||||
"found-no-vulns": {
|
||||
vulnJSON: []byte(`[
|
||||
{
|
||||
"Target": "no-vuln-image:v1 (debian 9.13)",
|
||||
"Type": "debian",
|
||||
"Vulnerabilities": null
|
||||
}
|
||||
]
|
||||
`),
|
||||
scanResult: &models.ScanResult{
|
||||
JSONVersion: 1,
|
||||
ServerUUID: "uuid",
|
||||
ScannedCves: models.VulnInfos{},
|
||||
},
|
||||
expected: &models.ScanResult{
|
||||
JSONVersion: 1,
|
||||
ServerUUID: "uuid",
|
||||
ServerName: "no-vuln-image:v1 (debian 9.13)",
|
||||
Family: "debian",
|
||||
ScannedBy: "trivy",
|
||||
ScannedVia: "trivy",
|
||||
ScannedCves: models.VulnInfos{},
|
||||
Packages: models.Packages{},
|
||||
LibraryScanners: models.LibraryScanners{},
|
||||
Optional: map[string]interface{}{"trivy-target": "no-vuln-image:v1 (debian 9.13)"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for testcase, v := range cases {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
// +build !scanner
|
||||
|
||||
package report
|
||||
package detector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -13,69 +13,61 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/util"
|
||||
cvedb "github.com/kotakanbe/go-cve-dictionary/db"
|
||||
cvelog "github.com/kotakanbe/go-cve-dictionary/log"
|
||||
cvemodels "github.com/kotakanbe/go-cve-dictionary/models"
|
||||
)
|
||||
|
||||
// CveClient is api client of CVE disctionary service.
|
||||
var CveClient cvedictClient
|
||||
|
||||
type cvedictClient struct {
|
||||
// httpProxy string
|
||||
baseURL string
|
||||
type goCveDictClient struct {
|
||||
cnf config.VulnDictInterface
|
||||
driver cvedb.DB
|
||||
}
|
||||
|
||||
func (api *cvedictClient) initialize() {
|
||||
api.baseURL = config.Conf.CveDict.URL
|
||||
func newGoCveDictClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goCveDictClient, error) {
|
||||
cvelog.SetLogger(o.Debug, o.Quiet, false, o.LogToFile, o.LogDir)
|
||||
|
||||
driver, locked, err := newCveDB(cnf)
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &goCveDictClient{cnf: cnf, driver: driver}, nil
|
||||
}
|
||||
|
||||
func (api cvedictClient) CheckHealth() error {
|
||||
if !config.Conf.CveDict.IsFetchViaHTTP() {
|
||||
util.Log.Debugf("get cve-dictionary from %s", config.Conf.CveDict.Type)
|
||||
func (api goCveDictClient) closeDB() error {
|
||||
if api.driver == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
api.initialize()
|
||||
url := fmt.Sprintf("%s/health", api.baseURL)
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
resp, _, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to request to CVE server. url: %s, errs: %w",
|
||||
url, errs)
|
||||
if err := api.driver.CloseDB(); err != nil {
|
||||
return xerrors.Errorf("Failed to close DB: %+v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (api goCveDictClient) fetchCveDetails(cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
for _, cveID := range cveIDs {
|
||||
cveDetail, err := api.driver.Get(cveID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fetch CVE. err: %w", err)
|
||||
}
|
||||
if len(cveDetail.CveID) == 0 {
|
||||
cveDetails = append(cveDetails, cvemodels.CveDetail{CveID: cveID})
|
||||
} else {
|
||||
cveDetails = append(cveDetails, *cveDetail)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type response struct {
|
||||
Key string
|
||||
CveDetail cvemodels.CveDetail
|
||||
}
|
||||
|
||||
func (api cvedictClient) FetchCveDetails(driver cvedb.DB, cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
if !config.Conf.CveDict.IsFetchViaHTTP() {
|
||||
if driver == nil {
|
||||
return
|
||||
}
|
||||
for _, cveID := range cveIDs {
|
||||
cveDetail, err := driver.Get(cveID)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fetch CVE. err: %w", err)
|
||||
}
|
||||
if len(cveDetail.CveID) == 0 {
|
||||
cveDetails = append(cveDetails, cvemodels.CveDetail{
|
||||
CveID: cveID,
|
||||
})
|
||||
} else {
|
||||
cveDetails = append(cveDetails, *cveDetail)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
api.baseURL = config.Conf.CveDict.URL
|
||||
func (api goCveDictClient) fetchCveDetailsViaHTTP(cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
reqChan := make(chan string, len(cveIDs))
|
||||
resChan := make(chan response, len(cveIDs))
|
||||
errChan := make(chan error, len(cveIDs))
|
||||
@@ -95,11 +87,11 @@ func (api cvedictClient) FetchCveDetails(driver cvedb.DB, cveIDs []string) (cveD
|
||||
tasks <- func() {
|
||||
select {
|
||||
case cveID := <-reqChan:
|
||||
url, err := util.URLPathJoin(api.baseURL, "cves", cveID)
|
||||
url, err := util.URLPathJoin(api.cnf.GetURL(), "cves", cveID)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
util.Log.Debugf("HTTP Request to %s", url)
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
api.httpGet(cveID, url, resChan, errChan)
|
||||
}
|
||||
}
|
||||
@@ -131,22 +123,20 @@ func (api cvedictClient) FetchCveDetails(driver cvedb.DB, cveIDs []string) (cveD
|
||||
return
|
||||
}
|
||||
|
||||
func (api cvedictClient) httpGet(key, url string, resChan chan<- response, errChan chan<- error) {
|
||||
func (api goCveDictClient) httpGet(key, url string, resChan chan<- response, errChan chan<- error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Get(url).End()
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("HTTP GET Error, url: %s, resp: %v, err: %w",
|
||||
return xerrors.Errorf("HTTP GET Error, url: %s, resp: %v, err: %+v",
|
||||
url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
util.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %s",
|
||||
t, err)
|
||||
logging.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
@@ -155,7 +145,7 @@ func (api cvedictClient) httpGet(key, url string, resChan chan<- response, errCh
|
||||
}
|
||||
cveDetail := cvemodels.CveDetail{}
|
||||
if err := json.Unmarshal([]byte(body), &cveDetail); err != nil {
|
||||
errChan <- xerrors.Errorf("Failed to Unmarshall. body: %s, err: %w", body, err)
|
||||
errChan <- xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
return
|
||||
}
|
||||
resChan <- response{
|
||||
@@ -164,39 +154,37 @@ func (api cvedictClient) httpGet(key, url string, resChan chan<- response, errCh
|
||||
}
|
||||
}
|
||||
|
||||
func (api cvedictClient) FetchCveDetailsByCpeName(driver cvedb.DB, cpeName string) ([]cvemodels.CveDetail, error) {
|
||||
if config.Conf.CveDict.IsFetchViaHTTP() {
|
||||
api.baseURL = config.Conf.CveDict.URL
|
||||
url, err := util.URLPathJoin(api.baseURL, "cpes")
|
||||
func (api goCveDictClient) fetchCveDetailsByCpeName(cpeName string) ([]cvemodels.CveDetail, error) {
|
||||
if api.cnf.IsFetchViaHTTP() {
|
||||
url, err := util.URLPathJoin(api.cnf.GetURL(), "cpes")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
query := map[string]string{"name": cpeName}
|
||||
util.Log.Debugf("HTTP Request to %s, query: %#v", url, query)
|
||||
logging.Log.Debugf("HTTP Request to %s, query: %#v", url, query)
|
||||
return api.httpPost(cpeName, url, query)
|
||||
}
|
||||
return driver.GetByCpeURI(cpeName)
|
||||
return api.driver.GetByCpeURI(cpeName)
|
||||
}
|
||||
|
||||
func (api cvedictClient) httpPost(key, url string, query map[string]string) ([]cvemodels.CveDetail, error) {
|
||||
func (api goCveDictClient) httpPost(key, url string, query map[string]string) ([]cvemodels.CveDetail, error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
f := func() (err error) {
|
||||
// req := gorequest.New().SetDebug(config.Conf.Debug).Post(url)
|
||||
req := gorequest.New().Post(url)
|
||||
req := gorequest.New().Timeout(10 * time.Second).Post(url)
|
||||
for key := range query {
|
||||
req = req.Send(fmt.Sprintf("%s=%s", key, query[key])).Type("json")
|
||||
}
|
||||
resp, body, errs = req.End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("HTTP POST error. url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
return xerrors.Errorf("HTTP POST error. url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
util.Log.Warnf("Failed to HTTP POST. retrying in %s seconds. err: %s", t, err)
|
||||
logging.Log.Warnf("Failed to HTTP POST. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
@@ -206,7 +194,23 @@ func (api cvedictClient) httpPost(key, url string, query map[string]string) ([]c
|
||||
cveDetails := []cvemodels.CveDetail{}
|
||||
if err := json.Unmarshal([]byte(body), &cveDetails); err != nil {
|
||||
return nil,
|
||||
xerrors.Errorf("Failed to Unmarshall. body: %s, err: %w", body, err)
|
||||
xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
return cveDetails, nil
|
||||
}
|
||||
|
||||
func newCveDB(cnf config.VulnDictInterface) (driver cvedb.DB, locked bool, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err = cvedb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL())
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("Failed to init CVE DB. err: %w, path: %s", err, path)
|
||||
return nil, locked, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
475
detector/detector.go
Normal file
475
detector/detector.go
Normal file
@@ -0,0 +1,475 @@
|
||||
// +build !scanner
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/contrib/owasp-dependency-check/parser"
|
||||
"github.com/future-architect/vuls/cwe"
|
||||
"github.com/future-architect/vuls/gost"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/oval"
|
||||
"github.com/future-architect/vuls/reporter"
|
||||
"github.com/future-architect/vuls/util"
|
||||
cvemodels "github.com/kotakanbe/go-cve-dictionary/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Detect vulns and fill CVE detailed information
|
||||
func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
|
||||
// Use the same reportedAt for all rs
|
||||
reportedAt := time.Now()
|
||||
for i, r := range rs {
|
||||
if !config.Conf.RefreshCve && !needToRefreshCve(r) {
|
||||
logging.Log.Info("No need to refresh")
|
||||
continue
|
||||
}
|
||||
|
||||
if !reuseScannedCves(&r) {
|
||||
r.ScannedCves = models.VulnInfos{}
|
||||
}
|
||||
|
||||
cpeURIs, owaspDCXMLPath := []string{}, ""
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
cpeURIs = config.Conf.Servers[r.ServerName].CpeNames
|
||||
owaspDCXMLPath = config.Conf.Servers[r.ServerName].OwaspDCXMLPath
|
||||
} else {
|
||||
if s, ok := config.Conf.Servers[r.ServerName]; ok {
|
||||
if con, ok := s.Containers[r.Container.Name]; ok {
|
||||
cpeURIs = con.Cpes
|
||||
owaspDCXMLPath = con.OwaspDCXMLPath
|
||||
}
|
||||
}
|
||||
}
|
||||
if owaspDCXMLPath != "" {
|
||||
cpes, err := parser.Parse(owaspDCXMLPath)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read OWASP Dependency Check XML on %s, `%s`, err: %w",
|
||||
r.ServerInfo(), owaspDCXMLPath, err)
|
||||
}
|
||||
cpeURIs = append(cpeURIs, cpes...)
|
||||
}
|
||||
|
||||
if err := DetectLibsCves(&r, config.Conf.TrivyCacheDBDir, config.Conf.NoProgress); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with Library dependency: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectPkgCves(&r, config.Conf.OvalDict, config.Conf.Gost); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect Pkg CVE: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectCpeURIsCves(&r, cpeURIs, config.Conf.CveDict, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect CVE of `%s`: %w", cpeURIs, err)
|
||||
}
|
||||
|
||||
repos := config.Conf.Servers[r.ServerName].GitHubRepos
|
||||
if err := DetectGitHubCves(&r, repos, config.Conf.IgnoreGitHubDismissed); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect GitHub Cves: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectWordPressCves(&r, config.Conf.WpScan); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect WordPress Cves: %w", err)
|
||||
}
|
||||
|
||||
logging.Log.Infof("Fill CVE detailed with gost")
|
||||
if err := gost.FillCVEsWithRedHat(&r, config.Conf.Gost); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with gost: %w", err)
|
||||
}
|
||||
|
||||
logging.Log.Infof("Fill CVE detailed with go-cve-dictionary")
|
||||
if err := FillCvesWithNvdJvn(&r, config.Conf.CveDict, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with CVE: %w", err)
|
||||
}
|
||||
|
||||
nExploitCve, err := FillWithExploit(&r, config.Conf.Exploit)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with exploit: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: %d exploits are detected", r.FormatServerName(), nExploitCve)
|
||||
|
||||
nMetasploitCve, err := FillWithMetasploit(&r, config.Conf.Metasploit)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with metasploit: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: %d modules are detected", r.FormatServerName(), nMetasploitCve)
|
||||
|
||||
FillCweDict(&r)
|
||||
|
||||
r.ReportedBy, _ = os.Hostname()
|
||||
r.Lang = config.Conf.Lang
|
||||
r.ReportedAt = reportedAt
|
||||
r.ReportedVersion = config.Version
|
||||
r.ReportedRevision = config.Revision
|
||||
r.Config.Report = config.Conf
|
||||
r.Config.Report.Servers = map[string]config.ServerInfo{
|
||||
r.ServerName: config.Conf.Servers[r.ServerName],
|
||||
}
|
||||
rs[i] = r
|
||||
}
|
||||
|
||||
// Overwrite the json file every time to clear the fields specified in config.IgnoredJSONKeys
|
||||
for _, r := range rs {
|
||||
if s, ok := config.Conf.Servers[r.ServerName]; ok {
|
||||
r = r.ClearFields(s.IgnoredJSONKeys)
|
||||
}
|
||||
//TODO don't call here
|
||||
if err := reporter.OverwriteJSONFile(dir, r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to write JSON: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if config.Conf.DiffPlus || config.Conf.DiffMinus {
|
||||
prevs, err := loadPrevious(rs, config.Conf.ResultsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs = diff(rs, prevs, config.Conf.DiffPlus, config.Conf.DiffMinus)
|
||||
}
|
||||
|
||||
for i, r := range rs {
|
||||
r.ScannedCves = r.ScannedCves.FilterByCvssOver(config.Conf.CvssScoreOver)
|
||||
r.ScannedCves = r.ScannedCves.FilterUnfixed(config.Conf.IgnoreUnfixed)
|
||||
|
||||
// IgnoreCves
|
||||
ignoreCves := []string{}
|
||||
if r.Container.Name == "" {
|
||||
ignoreCves = config.Conf.Servers[r.ServerName].IgnoreCves
|
||||
} else if con, ok := config.Conf.Servers[r.ServerName].Containers[r.Container.Name]; ok {
|
||||
ignoreCves = con.IgnoreCves
|
||||
}
|
||||
r.ScannedCves = r.ScannedCves.FilterIgnoreCves(ignoreCves)
|
||||
|
||||
// ignorePkgs
|
||||
ignorePkgsRegexps := []string{}
|
||||
if r.Container.Name == "" {
|
||||
ignorePkgsRegexps = config.Conf.Servers[r.ServerName].IgnorePkgsRegexp
|
||||
} else if s, ok := config.Conf.Servers[r.ServerName].Containers[r.Container.Name]; ok {
|
||||
ignorePkgsRegexps = s.IgnorePkgsRegexp
|
||||
}
|
||||
r.ScannedCves = r.ScannedCves.FilterIgnorePkgs(ignorePkgsRegexps)
|
||||
|
||||
// IgnoreUnscored
|
||||
if config.Conf.IgnoreUnscoredCves {
|
||||
r.ScannedCves = r.ScannedCves.FindScoredVulns()
|
||||
}
|
||||
|
||||
r.FilterInactiveWordPressLibs(config.Conf.WpScan.DetectInactive)
|
||||
rs[i] = r
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
// DetectPkgCves detects OS pkg cves
|
||||
// pass 2 configs
|
||||
func DetectPkgCves(r *models.ScanResult, ovalCnf config.GovalDictConf, gostCnf config.GostConf) error {
|
||||
// Pkg Scan
|
||||
if r.Release != "" {
|
||||
// OVAL
|
||||
if err := detectPkgsCvesWithOval(ovalCnf, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with OVAL: %w", err)
|
||||
}
|
||||
|
||||
// gost
|
||||
if err := detectPkgsCvesWithGost(gostCnf, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with gost: %w", err)
|
||||
}
|
||||
} else if reuseScannedCves(r) {
|
||||
logging.Log.Infof("r.Release is empty. Use CVEs as it as.")
|
||||
} else if r.Family == constant.ServerTypePseudo {
|
||||
logging.Log.Infof("pseudo type. Skip OVAL and gost detection")
|
||||
} else {
|
||||
return xerrors.Errorf("Failed to fill CVEs. r.Release is empty")
|
||||
}
|
||||
|
||||
for i, v := range r.ScannedCves {
|
||||
for j, p := range v.AffectedPackages {
|
||||
if p.NotFixedYet && p.FixState == "" {
|
||||
p.FixState = "Not fixed yet"
|
||||
r.ScannedCves[i].AffectedPackages[j] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// To keep backward compatibility
|
||||
// Newer versions use ListenPortStats,
|
||||
// but older versions of Vuls are set to ListenPorts.
|
||||
// Set ListenPorts to ListenPortStats to allow newer Vuls to report old results.
|
||||
for i, pkg := range r.Packages {
|
||||
for j, proc := range pkg.AffectedProcs {
|
||||
for _, ipPort := range proc.ListenPorts {
|
||||
ps, err := models.NewPortStat(ipPort)
|
||||
if err != nil {
|
||||
logging.Log.Warnf("Failed to parse ip:port: %s, err:%+v", ipPort, err)
|
||||
continue
|
||||
}
|
||||
r.Packages[i].AffectedProcs[j].ListenPortStats = append(
|
||||
r.Packages[i].AffectedProcs[j].ListenPortStats, *ps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectGitHubCves fetches CVEs from GitHub Security Alerts
|
||||
func DetectGitHubCves(r *models.ScanResult, githubConfs map[string]config.GitHubConf, ignoreDismissed bool) error {
|
||||
if len(githubConfs) == 0 {
|
||||
return nil
|
||||
}
|
||||
for ownerRepo, setting := range githubConfs {
|
||||
ss := strings.Split(ownerRepo, "/")
|
||||
if len(ss) != 2 {
|
||||
return xerrors.Errorf("Failed to parse GitHub owner/repo: %s", ownerRepo)
|
||||
}
|
||||
owner, repo := ss[0], ss[1]
|
||||
n, err := DetectGitHubSecurityAlerts(r, owner, repo, setting.Token, ignoreDismissed)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to access GitHub Security Alerts: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: %d CVEs detected with GHSA %s/%s",
|
||||
r.FormatServerName(), n, owner, repo)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectWordPressCves detects CVEs of WordPress
|
||||
func DetectWordPressCves(r *models.ScanResult, wpCnf config.WpScanConf) error {
|
||||
if len(r.WordPressPackages) == 0 {
|
||||
return nil
|
||||
}
|
||||
logging.Log.Infof("Detect WordPress CVE. pkgs: %d ", len(r.WordPressPackages))
|
||||
n, err := detectWordPressCves(r, wpCnf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to detect WordPress CVE: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: found %d WordPress CVEs", r.FormatServerName(), n)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FillCvesWithNvdJvn fills CVE detail with NVD, JVN
|
||||
func FillCvesWithNvdJvn(r *models.ScanResult, cnf config.GoCveDictConf, logOpts logging.LogOpts) (err error) {
|
||||
cveIDs := []string{}
|
||||
for _, v := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, v.CveID)
|
||||
}
|
||||
|
||||
client, err := newGoCveDictClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
var ds []cvemodels.CveDetail
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
ds, err = client.fetchCveDetailsViaHTTP(cveIDs)
|
||||
} else {
|
||||
ds, err = client.fetchCveDetails(cveIDs)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, d := range ds {
|
||||
nvd, exploits, mitigations := models.ConvertNvdJSONToModel(d.CveID, d.NvdJSON)
|
||||
jvn := models.ConvertJvnToModel(d.CveID, d.Jvn)
|
||||
|
||||
alerts := fillCertAlerts(&d)
|
||||
for cveID, vinfo := range r.ScannedCves {
|
||||
if vinfo.CveID == d.CveID {
|
||||
if vinfo.CveContents == nil {
|
||||
vinfo.CveContents = models.CveContents{}
|
||||
}
|
||||
for _, con := range []*models.CveContent{nvd, jvn} {
|
||||
if con != nil && !con.Empty() {
|
||||
vinfo.CveContents[con.Type] = *con
|
||||
}
|
||||
}
|
||||
vinfo.AlertDict = alerts
|
||||
vinfo.Exploits = append(vinfo.Exploits, exploits...)
|
||||
vinfo.Mitigations = append(vinfo.Mitigations, mitigations...)
|
||||
r.ScannedCves[cveID] = vinfo
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fillCertAlerts(cvedetail *cvemodels.CveDetail) (dict models.AlertDict) {
|
||||
if cvedetail.NvdJSON != nil {
|
||||
for _, cert := range cvedetail.NvdJSON.Certs {
|
||||
dict.En = append(dict.En, models.Alert{
|
||||
URL: cert.Link,
|
||||
Title: cert.Title,
|
||||
Team: "us",
|
||||
})
|
||||
}
|
||||
}
|
||||
if cvedetail.Jvn != nil {
|
||||
for _, cert := range cvedetail.Jvn.Certs {
|
||||
dict.Ja = append(dict.Ja, models.Alert{
|
||||
URL: cert.Link,
|
||||
Title: cert.Title,
|
||||
Team: "jp",
|
||||
})
|
||||
}
|
||||
}
|
||||
return dict
|
||||
}
|
||||
|
||||
// detectPkgsCvesWithOval fetches OVAL database
|
||||
func detectPkgsCvesWithOval(cnf config.GovalDictConf, r *models.ScanResult) error {
|
||||
ovalClient, err := oval.NewOVALClient(r.Family, cnf)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ovalClient == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Check if oval fetched: %s %s", r.Family, r.Release)
|
||||
ok, err := ovalClient.CheckIfOvalFetched(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return xerrors.Errorf("OVAL entries of %s %s are not found. Fetch OVAL before reporting. For details, see `https://github.com/kotakanbe/goval-dictionary#usage`", r.Family, r.Release)
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Check if oval fresh: %s %s", r.Family, r.Release)
|
||||
_, err = ovalClient.CheckIfOvalFresh(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Fill with oval: %s %s", r.Family, r.Release)
|
||||
nCVEs, err := ovalClient.FillWithOval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.Log.Infof("%s: %d CVEs are detected with OVAL", r.FormatServerName(), nCVEs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult) error {
|
||||
client, err := gost.NewClient(cnf, r.Family)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to new a gost client: %w", err)
|
||||
}
|
||||
|
||||
nCVEs, err := client.DetectUnfixed(r, true)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to detect unfixed CVEs with gost: %w", err)
|
||||
}
|
||||
|
||||
logging.Log.Infof("%s: %d unfixed CVEs are detected with gost", r.FormatServerName(), nCVEs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectCpeURIsCves detects CVEs of given CPE-URIs
|
||||
func DetectCpeURIsCves(r *models.ScanResult, cpeURIs []string, cnf config.GoCveDictConf, logOpts logging.LogOpts) error {
|
||||
client, err := newGoCveDictClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
nCVEs := 0
|
||||
for _, name := range cpeURIs {
|
||||
details, err := client.fetchCveDetailsByCpeName(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, detail := range details {
|
||||
if val, ok := r.ScannedCves[detail.CveID]; ok {
|
||||
names := val.CpeURIs
|
||||
names = util.AppendIfMissing(names, name)
|
||||
val.CpeURIs = names
|
||||
val.Confidences.AppendIfMissing(models.CpeNameMatch)
|
||||
r.ScannedCves[detail.CveID] = val
|
||||
} else {
|
||||
v := models.VulnInfo{
|
||||
CveID: detail.CveID,
|
||||
CpeURIs: []string{name},
|
||||
Confidences: models.Confidences{models.CpeNameMatch},
|
||||
}
|
||||
r.ScannedCves[detail.CveID] = v
|
||||
nCVEs++
|
||||
}
|
||||
}
|
||||
}
|
||||
logging.Log.Infof("%s: %d CVEs are detected with CPE", r.FormatServerName(), nCVEs)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FillCweDict fills CWE
|
||||
func FillCweDict(r *models.ScanResult) {
|
||||
uniqCweIDMap := map[string]bool{}
|
||||
for _, vinfo := range r.ScannedCves {
|
||||
for _, cont := range vinfo.CveContents {
|
||||
for _, id := range cont.CweIDs {
|
||||
if strings.HasPrefix(id, "CWE-") {
|
||||
id = strings.TrimPrefix(id, "CWE-")
|
||||
uniqCweIDMap[id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dict := map[string]models.CweDictEntry{}
|
||||
for id := range uniqCweIDMap {
|
||||
entry := models.CweDictEntry{}
|
||||
if e, ok := cwe.CweDictEn[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
entry.En = &e
|
||||
} else {
|
||||
logging.Log.Debugf("CWE-ID %s is not found in English CWE Dict", id)
|
||||
entry.En = &cwe.Cwe{CweID: id}
|
||||
}
|
||||
|
||||
if r.Lang == "ja" {
|
||||
if e, ok := cwe.CweDictJa[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
entry.Ja = &e
|
||||
} else {
|
||||
logging.Log.Debugf("CWE-ID %s is not found in Japanese CWE Dict", id)
|
||||
entry.Ja = &cwe.Cwe{CweID: id}
|
||||
}
|
||||
}
|
||||
dict[id] = entry
|
||||
}
|
||||
r.CweDict = dict
|
||||
return
|
||||
}
|
||||
223
detector/exploitdb.go
Normal file
223
detector/exploitdb.go
Normal file
@@ -0,0 +1,223 @@
|
||||
// +build !scanner
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
exploitdb "github.com/vulsio/go-exploitdb/db"
|
||||
exploitmodels "github.com/vulsio/go-exploitdb/models"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// FillWithExploit fills exploit information that has in Exploit
|
||||
func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf) (nExploitCve int, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, _ := util.URLPathJoin(cnf.GetURL(), "cves")
|
||||
responses, err := getCvesViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, res := range responses {
|
||||
exps := []*exploitmodels.Exploit{}
|
||||
if err := json.Unmarshal([]byte(res.json), &exps); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
exploits := ConvertToModels(exps)
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
if ok {
|
||||
v.Exploits = exploits
|
||||
}
|
||||
r.ScannedCves[res.request.cveID] = v
|
||||
nExploitCve++
|
||||
}
|
||||
} else {
|
||||
|
||||
driver, locked, err := newExploitDB(&cnf)
|
||||
if locked {
|
||||
return 0, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
es := driver.GetExploitByCveID(cveID)
|
||||
if len(es) == 0 {
|
||||
continue
|
||||
}
|
||||
exploits := ConvertToModels(es)
|
||||
vuln.Exploits = exploits
|
||||
r.ScannedCves[cveID] = vuln
|
||||
nExploitCve++
|
||||
}
|
||||
}
|
||||
return nExploitCve, nil
|
||||
}
|
||||
|
||||
// ConvertToModels converts gost model to vuls model
|
||||
func ConvertToModels(es []*exploitmodels.Exploit) (exploits []models.Exploit) {
|
||||
for _, e := range es {
|
||||
var documentURL, shellURL *string
|
||||
if e.OffensiveSecurity != nil {
|
||||
os := e.OffensiveSecurity
|
||||
if os.Document != nil {
|
||||
documentURL = &os.Document.DocumentURL
|
||||
}
|
||||
if os.ShellCode != nil {
|
||||
shellURL = &os.ShellCode.ShellCodeURL
|
||||
}
|
||||
}
|
||||
exploit := models.Exploit{
|
||||
ExploitType: e.ExploitType,
|
||||
ID: e.ExploitUniqueID,
|
||||
URL: e.URL,
|
||||
Description: e.Description,
|
||||
DocumentURL: documentURL,
|
||||
ShellCodeURL: shellURL,
|
||||
}
|
||||
exploits = append(exploits, exploit)
|
||||
}
|
||||
return exploits
|
||||
}
|
||||
|
||||
type exploitResponse struct {
|
||||
request request
|
||||
json string
|
||||
}
|
||||
|
||||
func getCvesViaHTTP(cveIDs []string, urlPrefix string) (
|
||||
responses []exploitResponse, err error) {
|
||||
nReq := len(cveIDs)
|
||||
reqChan := make(chan request, nReq)
|
||||
resChan := make(chan exploitResponse, nReq)
|
||||
errChan := make(chan error, nReq)
|
||||
defer close(reqChan)
|
||||
defer close(resChan)
|
||||
defer close(errChan)
|
||||
|
||||
go func() {
|
||||
for _, cveID := range cveIDs {
|
||||
reqChan <- request{
|
||||
cveID: cveID,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
concurrency := 10
|
||||
tasks := util.GenWorkers(concurrency)
|
||||
for i := 0; i < nReq; i++ {
|
||||
tasks <- func() {
|
||||
select {
|
||||
case req := <-reqChan:
|
||||
url, err := util.URLPathJoin(
|
||||
urlPrefix,
|
||||
req.cveID,
|
||||
)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.After(2 * 60 * time.Second)
|
||||
var errs []error
|
||||
for i := 0; i < nReq; i++ {
|
||||
select {
|
||||
case res := <-resChan:
|
||||
responses = append(responses, res)
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching OVAL")
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, xerrors.Errorf("Failed to fetch OVAL. err: %w", errs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type request struct {
|
||||
osMajorVersion string
|
||||
packName string
|
||||
isSrcPack bool
|
||||
cveID string
|
||||
}
|
||||
|
||||
func httpGet(url string, req request, resChan chan<- exploitResponse, errChan chan<- error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
count, retryMax := 0, 3
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
errChan <- xerrors.Errorf("HTTP Error %w", err)
|
||||
return
|
||||
}
|
||||
if count == retryMax {
|
||||
errChan <- xerrors.New("Retry count exceeded")
|
||||
return
|
||||
}
|
||||
|
||||
resChan <- exploitResponse{
|
||||
request: req,
|
||||
json: body,
|
||||
}
|
||||
}
|
||||
|
||||
func newExploitDB(cnf config.VulnDictInterface) (driver exploitdb.DB, locked bool, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = exploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL()); err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("exploitDB is locked. err: %w", err)
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package github
|
||||
package detector
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -9,18 +9,18 @@ import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/errof"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
// FillGitHubSecurityAlerts access to owner/repo on GitHub and fetch security alerts of the repository via GitHub API v4 GraphQL and then set to the given ScanResult.
|
||||
// DetectGitHubSecurityAlerts access to owner/repo on GitHub and fetch security alerts of the repository via GitHub API v4 GraphQL and then set to the given ScanResult.
|
||||
// https://help.github.com/articles/about-security-alerts-for-vulnerable-dependencies/
|
||||
func FillGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string) (nCVEs int, err error) {
|
||||
func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string, ignoreDismissed bool) (nCVEs int, err error) {
|
||||
src := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
//TODO Proxy
|
||||
httpClient := oauth2.NewClient(context.Background(), src)
|
||||
|
||||
// TODO Use `https://github.com/shurcooL/githubv4` if the tool supports vulnerabilityAlerts Endpoint
|
||||
@@ -31,10 +31,12 @@ func FillGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string) (
|
||||
|
||||
for {
|
||||
jsonStr := fmt.Sprintf(jsonfmt, owner, repo, 100, after)
|
||||
req, err := http.NewRequest("POST",
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
"https://api.github.com/graphql",
|
||||
bytes.NewBuffer([]byte(jsonStr)),
|
||||
)
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -65,14 +67,12 @@ func FillGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string) (
|
||||
// util.Log.Debugf("%s", pp.Sprint(alerts))
|
||||
// util.Log.Debugf("%s", string(body))
|
||||
if alerts.Data.Repository.URL == "" {
|
||||
return 0, errof.New(
|
||||
errof.ErrFailedToAccessGithubAPI,
|
||||
fmt.Sprintf("Failed to access to GitHub API. Response: %s", string(body)),
|
||||
)
|
||||
return 0, errof.New(errof.ErrFailedToAccessGithubAPI,
|
||||
fmt.Sprintf("Failed to access to GitHub API. Response: %s", string(body)))
|
||||
}
|
||||
|
||||
for _, v := range alerts.Data.Repository.VulnerabilityAlerts.Edges {
|
||||
if config.Conf.IgnoreGitHubDismissed && v.Node.DismissReason != "" {
|
||||
if ignoreDismissed && v.Node.DismissReason != "" {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -102,20 +102,39 @@ func FillGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string) (
|
||||
cveIDs = other
|
||||
}
|
||||
|
||||
refs := []models.Reference{}
|
||||
for _, r := range v.Node.SecurityAdvisory.References {
|
||||
refs = append(refs, models.Reference{Link: r.URL})
|
||||
}
|
||||
|
||||
for _, cveID := range cveIDs {
|
||||
cveContent := models.CveContent{
|
||||
Type: models.GitHub,
|
||||
CveID: cveID,
|
||||
Title: v.Node.SecurityAdvisory.Summary,
|
||||
Summary: v.Node.SecurityAdvisory.Description,
|
||||
Cvss2Severity: v.Node.SecurityVulnerability.Severity,
|
||||
Cvss3Severity: v.Node.SecurityVulnerability.Severity,
|
||||
SourceLink: v.Node.SecurityAdvisory.Permalink,
|
||||
References: refs,
|
||||
Published: v.Node.SecurityAdvisory.PublishedAt,
|
||||
LastModified: v.Node.SecurityAdvisory.UpdatedAt,
|
||||
}
|
||||
|
||||
if val, ok := r.ScannedCves[cveID]; ok {
|
||||
val.GitHubSecurityAlerts = val.GitHubSecurityAlerts.Add(m)
|
||||
val.CveContents[models.GitHub] = cveContent
|
||||
r.ScannedCves[cveID] = val
|
||||
nCVEs++
|
||||
} else {
|
||||
v := models.VulnInfo{
|
||||
CveID: cveID,
|
||||
Confidences: models.Confidences{models.GitHubMatch},
|
||||
GitHubSecurityAlerts: models.GitHubSecurityAlerts{m},
|
||||
CveContents: models.NewCveContents(cveContent),
|
||||
}
|
||||
r.ScannedCves[cveID] = v
|
||||
nCVEs++
|
||||
}
|
||||
nCVEs++
|
||||
}
|
||||
}
|
||||
if !alerts.Data.Repository.VulnerabilityAlerts.PageInfo.HasNextPage {
|
||||
@@ -1,4 +1,4 @@
|
||||
package libmanager
|
||||
package detector
|
||||
|
||||
import (
|
||||
"context"
|
||||
@@ -12,13 +12,12 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
)
|
||||
|
||||
// DetectLibsCves fills LibraryScanner information
|
||||
func DetectLibsCves(r *models.ScanResult) (err error) {
|
||||
func DetectLibsCves(r *models.ScanResult, cacheDir string, noProgress bool) (err error) {
|
||||
totalCnt := 0
|
||||
if len(r.LibraryScanners) == 0 {
|
||||
return
|
||||
@@ -30,12 +29,12 @@ func DetectLibsCves(r *models.ScanResult) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
util.Log.Info("Updating library db...")
|
||||
if err := downloadDB(config.Version, config.Conf.TrivyCacheDBDir, config.Conf.NoProgress, false, false); err != nil {
|
||||
logging.Log.Info("Updating library db...")
|
||||
if err := downloadDB("", cacheDir, noProgress, false, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := db2.Init(config.Conf.TrivyCacheDBDir); err != nil {
|
||||
if err := db2.Init(cacheDir); err != nil {
|
||||
return err
|
||||
}
|
||||
defer db2.Close()
|
||||
@@ -57,7 +56,7 @@ func DetectLibsCves(r *models.ScanResult) (err error) {
|
||||
totalCnt += len(vinfos)
|
||||
}
|
||||
|
||||
util.Log.Infof("%s: %d CVEs are detected with Library",
|
||||
logging.Log.Infof("%s: %d CVEs are detected with Library",
|
||||
r.FormatServerName(), totalCnt)
|
||||
|
||||
return nil
|
||||
@@ -72,8 +71,8 @@ func downloadDB(appVersion, cacheDir string, quiet, light, skipUpdate bool) erro
|
||||
}
|
||||
|
||||
if needsUpdate {
|
||||
util.Log.Info("Need to update DB")
|
||||
util.Log.Info("Downloading DB...")
|
||||
logging.Log.Info("Need to update DB")
|
||||
logging.Log.Info("Downloading DB...")
|
||||
if err := client.Download(ctx, cacheDir, light); err != nil {
|
||||
return xerrors.Errorf("failed to download vulnerability DB: %w", err)
|
||||
}
|
||||
@@ -106,7 +105,7 @@ func showDBInfo(cacheDir string) error {
|
||||
if err != nil {
|
||||
return xerrors.Errorf("something wrong with DB: %w", err)
|
||||
}
|
||||
util.Log.Debugf("DB Schema: %d, Type: %d, UpdatedAt: %s, NextUpdate: %s",
|
||||
logging.Log.Debugf("DB Schema: %d, Type: %d, UpdatedAt: %s, NextUpdate: %s",
|
||||
metadata.Version, metadata.Type, metadata.UpdatedAt, metadata.NextUpdate)
|
||||
return nil
|
||||
}
|
||||
81
detector/msf.go
Normal file
81
detector/msf.go
Normal file
@@ -0,0 +1,81 @@
|
||||
// +build !scanner
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
metasploitdb "github.com/takuzoo3868/go-msfdb/db"
|
||||
metasploitmodels "github.com/takuzoo3868/go-msfdb/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// FillWithMetasploit fills metasploit module information that has in module
|
||||
func FillWithMetasploit(r *models.ScanResult, cnf config.MetasploitConf) (nMetasploitCve int, err error) {
|
||||
|
||||
driver, locked, err := newMetasploitDB(&cnf)
|
||||
if locked {
|
||||
return 0, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v")
|
||||
}
|
||||
}()
|
||||
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
ms := driver.GetModuleByCveID(cveID)
|
||||
if len(ms) == 0 {
|
||||
continue
|
||||
}
|
||||
modules := ConvertToModelsMsf(ms)
|
||||
vuln.Metasploits = modules
|
||||
r.ScannedCves[cveID] = vuln
|
||||
nMetasploitCve++
|
||||
}
|
||||
|
||||
return nMetasploitCve, nil
|
||||
}
|
||||
|
||||
// ConvertToModelsMsf converts gost model to vuls model
|
||||
func ConvertToModelsMsf(ms []*metasploitmodels.Metasploit) (modules []models.Metasploit) {
|
||||
for _, m := range ms {
|
||||
var links []string
|
||||
if 0 < len(m.References) {
|
||||
for _, u := range m.References {
|
||||
links = append(links, u.Link)
|
||||
}
|
||||
}
|
||||
module := models.Metasploit{
|
||||
Name: m.Name,
|
||||
Title: m.Title,
|
||||
Description: m.Description,
|
||||
URLs: links,
|
||||
}
|
||||
modules = append(modules, module)
|
||||
}
|
||||
return modules
|
||||
}
|
||||
|
||||
func newMetasploitDB(cnf config.VulnDictInterface) (driver metasploitdb.DB, locked bool, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = metasploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), false); err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("metasploitDB is locked. err: %w", err)
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
270
detector/util.go
Normal file
270
detector/util.go
Normal file
@@ -0,0 +1,270 @@
|
||||
package detector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
func reuseScannedCves(r *models.ScanResult) bool {
|
||||
switch r.Family {
|
||||
case constant.FreeBSD, constant.Raspbian:
|
||||
return true
|
||||
}
|
||||
if isTrivyResult(r) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isTrivyResult(r *models.ScanResult) bool {
|
||||
_, ok := r.Optional["trivy-target"]
|
||||
return ok
|
||||
}
|
||||
|
||||
func needToRefreshCve(r models.ScanResult) bool {
|
||||
for _, cve := range r.ScannedCves {
|
||||
if 0 < len(cve.CveContents) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func loadPrevious(currs models.ScanResults, resultsDir string) (prevs models.ScanResults, err error) {
|
||||
dirs, err := ListValidJSONDirs(resultsDir)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, result := range currs {
|
||||
filename := result.ServerName + ".json"
|
||||
if result.Container.Name != "" {
|
||||
filename = fmt.Sprintf("%s@%s.json", result.Container.Name, result.ServerName)
|
||||
}
|
||||
for _, dir := range dirs[1:] {
|
||||
path := filepath.Join(dir, filename)
|
||||
r, err := loadOneServerScanResult(path)
|
||||
if err != nil {
|
||||
logging.Log.Debugf("%+v", err)
|
||||
continue
|
||||
}
|
||||
if r.Family == result.Family && r.Release == result.Release {
|
||||
prevs = append(prevs, *r)
|
||||
logging.Log.Infof("Previous json found: %s", path)
|
||||
break
|
||||
} else {
|
||||
logging.Log.Infof("Previous json is different family.Release: %s, pre: %s.%s cur: %s.%s",
|
||||
path, r.Family, r.Release, result.Family, result.Release)
|
||||
}
|
||||
}
|
||||
}
|
||||
return prevs, nil
|
||||
}
|
||||
|
||||
func diff(curResults, preResults models.ScanResults, isPlus, isMinus bool) (diffed models.ScanResults) {
|
||||
for _, current := range curResults {
|
||||
found := false
|
||||
var previous models.ScanResult
|
||||
for _, r := range preResults {
|
||||
if current.ServerName == r.ServerName && current.Container.Name == r.Container.Name {
|
||||
found = true
|
||||
previous = r
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !found {
|
||||
diffed = append(diffed, current)
|
||||
continue
|
||||
}
|
||||
|
||||
cves := models.VulnInfos{}
|
||||
if isPlus {
|
||||
cves = getPlusDiffCves(previous, current)
|
||||
}
|
||||
if isMinus {
|
||||
minus := getMinusDiffCves(previous, current)
|
||||
if len(cves) == 0 {
|
||||
cves = minus
|
||||
} else {
|
||||
for k, v := range minus {
|
||||
cves[k] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
packages := models.Packages{}
|
||||
for _, s := range cves {
|
||||
for _, affected := range s.AffectedPackages {
|
||||
var p models.Package
|
||||
if s.DiffStatus == models.DiffPlus {
|
||||
p = current.Packages[affected.Name]
|
||||
} else {
|
||||
p = previous.Packages[affected.Name]
|
||||
}
|
||||
packages[affected.Name] = p
|
||||
}
|
||||
}
|
||||
current.ScannedCves = cves
|
||||
current.Packages = packages
|
||||
diffed = append(diffed, current)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func getPlusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
previousCveIDsSet := map[string]bool{}
|
||||
for _, previousVulnInfo := range previous.ScannedCves {
|
||||
previousCveIDsSet[previousVulnInfo.CveID] = true
|
||||
}
|
||||
|
||||
new := models.VulnInfos{}
|
||||
updated := models.VulnInfos{}
|
||||
for _, v := range current.ScannedCves {
|
||||
if previousCveIDsSet[v.CveID] {
|
||||
if isCveInfoUpdated(v.CveID, previous, current) {
|
||||
v.DiffStatus = models.DiffPlus
|
||||
updated[v.CveID] = v
|
||||
logging.Log.Debugf("updated: %s", v.CveID)
|
||||
|
||||
// TODO commented out because a bug of diff logic when multiple oval defs found for a certain CVE-ID and same updated_at
|
||||
// if these OVAL defs have different affected packages, this logic detects as updated.
|
||||
// This logic will be uncomented after integration with gost https://github.com/knqyf263/gost
|
||||
// } else if isCveFixed(v, previous) {
|
||||
// updated[v.CveID] = v
|
||||
// logging.Log.Debugf("fixed: %s", v.CveID)
|
||||
|
||||
} else {
|
||||
logging.Log.Debugf("same: %s", v.CveID)
|
||||
}
|
||||
} else {
|
||||
logging.Log.Debugf("new: %s", v.CveID)
|
||||
v.DiffStatus = models.DiffPlus
|
||||
new[v.CveID] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 && len(new) == 0 {
|
||||
logging.Log.Infof("%s: There are %d vulnerabilities, but no difference between current result and previous one.", current.FormatServerName(), len(current.ScannedCves))
|
||||
}
|
||||
|
||||
for cveID, vuln := range new {
|
||||
updated[cveID] = vuln
|
||||
}
|
||||
return updated
|
||||
}
|
||||
|
||||
func getMinusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
currentCveIDsSet := map[string]bool{}
|
||||
for _, currentVulnInfo := range current.ScannedCves {
|
||||
currentCveIDsSet[currentVulnInfo.CveID] = true
|
||||
}
|
||||
|
||||
clear := models.VulnInfos{}
|
||||
for _, v := range previous.ScannedCves {
|
||||
if !currentCveIDsSet[v.CveID] {
|
||||
v.DiffStatus = models.DiffMinus
|
||||
clear[v.CveID] = v
|
||||
logging.Log.Debugf("clear: %s", v.CveID)
|
||||
}
|
||||
}
|
||||
if len(clear) == 0 {
|
||||
logging.Log.Infof("%s: There are %d vulnerabilities, but no difference between current result and previous one.", current.FormatServerName(), len(current.ScannedCves))
|
||||
}
|
||||
|
||||
return clear
|
||||
}
|
||||
|
||||
func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
cTypes := []models.CveContentType{
|
||||
models.Nvd,
|
||||
models.Jvn,
|
||||
models.NewCveContentType(current.Family),
|
||||
}
|
||||
|
||||
prevLastModified := map[models.CveContentType]time.Time{}
|
||||
preVinfo, ok := previous.ScannedCves[cveID]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
for _, cType := range cTypes {
|
||||
if content, ok := preVinfo.CveContents[cType]; ok {
|
||||
prevLastModified[cType] = content.LastModified
|
||||
}
|
||||
}
|
||||
|
||||
curLastModified := map[models.CveContentType]time.Time{}
|
||||
curVinfo, ok := current.ScannedCves[cveID]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
for _, cType := range cTypes {
|
||||
if content, ok := curVinfo.CveContents[cType]; ok {
|
||||
curLastModified[cType] = content.LastModified
|
||||
}
|
||||
}
|
||||
|
||||
for _, t := range cTypes {
|
||||
if !curLastModified[t].Equal(prevLastModified[t]) {
|
||||
logging.Log.Debugf("%s LastModified not equal: \n%s\n%s",
|
||||
cveID, curLastModified[t], prevLastModified[t])
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// jsonDirPattern is file name pattern of JSON directory
|
||||
// 2016-11-16T10:43:28+09:00
|
||||
// 2016-11-16T10:43:28Z
|
||||
var jsonDirPattern = regexp.MustCompile(
|
||||
`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:Z|[+-]\d{2}:\d{2})$`)
|
||||
|
||||
// ListValidJSONDirs returns valid json directory as array
|
||||
// Returned array is sorted so that recent directories are at the head
|
||||
func ListValidJSONDirs(resultsDir string) (dirs []string, err error) {
|
||||
var dirInfo []os.FileInfo
|
||||
if dirInfo, err = ioutil.ReadDir(resultsDir); err != nil {
|
||||
err = xerrors.Errorf("Failed to read %s: %w",
|
||||
config.Conf.ResultsDir, err)
|
||||
return
|
||||
}
|
||||
for _, d := range dirInfo {
|
||||
if d.IsDir() && jsonDirPattern.MatchString(d.Name()) {
|
||||
jsonDir := filepath.Join(resultsDir, d.Name())
|
||||
dirs = append(dirs, jsonDir)
|
||||
}
|
||||
}
|
||||
sort.Slice(dirs, func(i, j int) bool {
|
||||
return dirs[j] < dirs[i]
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
// loadOneServerScanResult read JSON data of one server
|
||||
func loadOneServerScanResult(jsonFile string) (*models.ScanResult, error) {
|
||||
var (
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if data, err = ioutil.ReadFile(jsonFile); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", jsonFile, err)
|
||||
}
|
||||
result := &models.ScanResult{}
|
||||
if err := json.Unmarshal(data, result); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to parse %s: %w", jsonFile, err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package wordpress
|
||||
package detector
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@@ -8,14 +9,16 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/errof"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
//WpCveInfos is for wpvulndb's json
|
||||
//WpCveInfos is for wpscan json
|
||||
type WpCveInfos struct {
|
||||
ReleaseDate string `json:"release_date"`
|
||||
ChangelogURL string `json:"changelog_url"`
|
||||
@@ -27,153 +30,75 @@ type WpCveInfos struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
//WpCveInfo is for wpvulndb's json
|
||||
//WpCveInfo is for wpscan json
|
||||
type WpCveInfo struct {
|
||||
ID int `json:"id"`
|
||||
Title string `json:"title"`
|
||||
CreatedAt string `json:"created_at"`
|
||||
UpdatedAt string `json:"updated_at"`
|
||||
// PublishedDate string `json:"published_date"`
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
VulnType string `json:"vuln_type"`
|
||||
References References `json:"references"`
|
||||
FixedIn string `json:"fixed_in"`
|
||||
}
|
||||
|
||||
//References is for wpvulndb's json
|
||||
//References is for wpscan json
|
||||
type References struct {
|
||||
URL []string `json:"url"`
|
||||
Cve []string `json:"cve"`
|
||||
Secunia []string `json:"secunia"`
|
||||
}
|
||||
|
||||
// FillWordPress access to wpvulndb and fetch scurity alerts and then set to the given ScanResult.
|
||||
// DetectWordPressCves access to wpscan and fetch scurity alerts and then set to the given ScanResult.
|
||||
// https://wpscan.com/
|
||||
func FillWordPress(r *models.ScanResult, token string, wpVulnCaches *map[string]string) (int, error) {
|
||||
func detectWordPressCves(r *models.ScanResult, cnf config.WpScanConf) (int, error) {
|
||||
if len(r.WordPressPackages) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
// Core
|
||||
ver := strings.Replace(r.WordPressPackages.CoreVersion(), ".", "", -1)
|
||||
if ver == "" {
|
||||
return 0, xerrors.New("Failed to get WordPress core version")
|
||||
return 0, errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to get WordPress core version."))
|
||||
}
|
||||
|
||||
body, ok := searchCache(ver, wpVulnCaches)
|
||||
if !ok {
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/wordpresses/%s", ver)
|
||||
var err error
|
||||
body, err = httpRequest(url, token)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if body == "" {
|
||||
util.Log.Warnf("A result of REST access is empty: %s", url)
|
||||
}
|
||||
|
||||
(*wpVulnCaches)[ver] = body
|
||||
}
|
||||
|
||||
wpVinfos, err := convertToVinfos(models.WPCore, body)
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/wordpresses/%s", ver)
|
||||
wpVinfos, err := wpscan(url, ver, cnf.Token, true)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
themes := r.WordPressPackages.Themes()
|
||||
plugins := r.WordPressPackages.Plugins()
|
||||
|
||||
if c.Conf.WpIgnoreInactive {
|
||||
themes = removeInactives(themes)
|
||||
plugins = removeInactives(plugins)
|
||||
}
|
||||
|
||||
// Themes
|
||||
themes := r.WordPressPackages.Themes()
|
||||
if !cnf.DetectInactive {
|
||||
themes = removeInactives(themes)
|
||||
}
|
||||
for _, p := range themes {
|
||||
body, ok := searchCache(p.Name, wpVulnCaches)
|
||||
if !ok {
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/themes/%s", p.Name)
|
||||
var err error
|
||||
body, err = httpRequest(url, token)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
(*wpVulnCaches)[p.Name] = body
|
||||
}
|
||||
|
||||
if body == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
templateVinfos, err := convertToVinfos(p.Name, body)
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/themes/%s", p.Name)
|
||||
candidates, err := wpscan(url, p.Name, cnf.Token, false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range templateVinfos {
|
||||
for _, fixstat := range v.WpPackageFixStats {
|
||||
pkg, ok := r.WordPressPackages.Find(fixstat.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ok, err := match(pkg.Version, fixstat.FixedIn)
|
||||
if err != nil {
|
||||
util.Log.Infof("[poor] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
wpVinfos = append(wpVinfos, v)
|
||||
util.Log.Infof("[match] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
} else {
|
||||
util.Log.Debugf("[miss] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
}
|
||||
}
|
||||
}
|
||||
vulns := detect(p, candidates)
|
||||
wpVinfos = append(wpVinfos, vulns...)
|
||||
}
|
||||
|
||||
// Plugins
|
||||
plugins := r.WordPressPackages.Plugins()
|
||||
if !cnf.DetectInactive {
|
||||
plugins = removeInactives(plugins)
|
||||
}
|
||||
for _, p := range plugins {
|
||||
body, ok := searchCache(p.Name, wpVulnCaches)
|
||||
if !ok {
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/plugins/%s", p.Name)
|
||||
var err error
|
||||
body, err = httpRequest(url, token)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
(*wpVulnCaches)[p.Name] = body
|
||||
}
|
||||
|
||||
if body == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
pluginVinfos, err := convertToVinfos(p.Name, body)
|
||||
url := fmt.Sprintf("https://wpscan.com/api/v3/plugins/%s", p.Name)
|
||||
candidates, err := wpscan(url, p.Name, cnf.Token, false)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, v := range pluginVinfos {
|
||||
for _, fixstat := range v.WpPackageFixStats {
|
||||
pkg, ok := r.WordPressPackages.Find(fixstat.Name)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
ok, err := match(pkg.Version, fixstat.FixedIn)
|
||||
if err != nil {
|
||||
util.Log.Infof("[poor] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
wpVinfos = append(wpVinfos, v)
|
||||
//TODO Debugf
|
||||
util.Log.Infof("[match] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
} else {
|
||||
//TODO Debugf
|
||||
util.Log.Infof("[miss] %s installed: %s, fixedIn: %s", pkg.Name, pkg.Version, fixstat.FixedIn)
|
||||
}
|
||||
}
|
||||
}
|
||||
vulns := detect(p, candidates)
|
||||
wpVinfos = append(wpVinfos, vulns...)
|
||||
}
|
||||
|
||||
for _, wpVinfo := range wpVinfos {
|
||||
if vinfo, ok := r.ScannedCves[wpVinfo.CveID]; ok {
|
||||
vinfo.CveContents[models.WPVulnDB] = wpVinfo.CveContents[models.WPVulnDB]
|
||||
vinfo.CveContents[models.WpScan] = wpVinfo.CveContents[models.WpScan]
|
||||
vinfo.VulnType = wpVinfo.VulnType
|
||||
vinfo.Confidences = append(vinfo.Confidences, wpVinfo.Confidences...)
|
||||
vinfo.WpPackageFixStats = append(vinfo.WpPackageFixStats, wpVinfo.WpPackageFixStats...)
|
||||
@@ -185,6 +110,43 @@ func FillWordPress(r *models.ScanResult, token string, wpVulnCaches *map[string]
|
||||
return len(wpVinfos), nil
|
||||
}
|
||||
|
||||
func wpscan(url, name, token string, isCore bool) (vinfos []models.VulnInfo, err error) {
|
||||
body, err := httpRequest(url, token)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if body == "" {
|
||||
logging.Log.Debugf("wpscan.com response body is empty. URL: %s", url)
|
||||
}
|
||||
if isCore {
|
||||
name = "core"
|
||||
}
|
||||
return convertToVinfos(name, body)
|
||||
}
|
||||
|
||||
func detect(installed models.WpPackage, candidates []models.VulnInfo) (vulns []models.VulnInfo) {
|
||||
for _, v := range candidates {
|
||||
for _, fixstat := range v.WpPackageFixStats {
|
||||
ok, err := match(installed.Version, fixstat.FixedIn)
|
||||
if err != nil {
|
||||
logging.Log.Warnf("Failed to compare versions %s installed: %s, fixedIn: %s, v: %+v",
|
||||
installed.Name, installed.Version, fixstat.FixedIn, v)
|
||||
// continue scanning
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
vulns = append(vulns, v)
|
||||
logging.Log.Debugf("Affected: %s installed: %s, fixedIn: %s",
|
||||
installed.Name, installed.Version, fixstat.FixedIn)
|
||||
} else {
|
||||
logging.Log.Debugf("Not affected: %s : %s, fixedIn: %s",
|
||||
installed.Name, installed.Version, fixstat.FixedIn)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func match(installedVer, fixedIn string) (bool, error) {
|
||||
v1, err := version.NewVersion(installedVer)
|
||||
if err != nil {
|
||||
@@ -219,7 +181,7 @@ func extractToVulnInfos(pkgName string, cves []WpCveInfo) (vinfos []models.VulnI
|
||||
var cveIDs []string
|
||||
|
||||
if len(vulnerability.References.Cve) == 0 {
|
||||
cveIDs = append(cveIDs, fmt.Sprintf("WPVDBID-%d", vulnerability.ID))
|
||||
cveIDs = append(cveIDs, fmt.Sprintf("WPVDBID-%s", vulnerability.ID))
|
||||
}
|
||||
for _, cveNumber := range vulnerability.References.Cve {
|
||||
cveIDs = append(cveIDs, "CVE-"+cveNumber)
|
||||
@@ -237,15 +199,17 @@ func extractToVulnInfos(pkgName string, cves []WpCveInfo) (vinfos []models.VulnI
|
||||
CveID: cveID,
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.WPVulnDB,
|
||||
CveID: cveID,
|
||||
Title: vulnerability.Title,
|
||||
References: refs,
|
||||
Type: models.WpScan,
|
||||
CveID: cveID,
|
||||
Title: vulnerability.Title,
|
||||
References: refs,
|
||||
Published: vulnerability.CreatedAt,
|
||||
LastModified: vulnerability.UpdatedAt,
|
||||
},
|
||||
),
|
||||
VulnType: vulnerability.VulnType,
|
||||
Confidences: []models.Confidence{
|
||||
models.WPVulnDBMatch,
|
||||
models.WpScanMatch,
|
||||
},
|
||||
WpPackageFixStats: []models.WpPackageFixStatus{{
|
||||
Name: pkgName,
|
||||
@@ -258,36 +222,41 @@ func extractToVulnInfos(pkgName string, cves []WpCveInfo) (vinfos []models.VulnI
|
||||
}
|
||||
|
||||
func httpRequest(url, token string) (string, error) {
|
||||
retry := 1
|
||||
util.Log.Debugf("%s", url)
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return "", errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to access to wpscan.com. err: %s", err))
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Token token=%s", token))
|
||||
client, err := util.GetHTTPClient(config.Conf.HTTPProxy)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
req.Header.Set("Authorization", fmt.Sprintf("Token token=%s", token))
|
||||
loop:
|
||||
resp, err := new(http.Client).Do(req)
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to access to wpscan.com. err: %s", err))
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to access to wpscan.com. err: %s", err))
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode == 200 {
|
||||
return string(body), nil
|
||||
} else if resp.StatusCode == 404 {
|
||||
// This package is not in WPVulnDB
|
||||
// This package is not in wpscan
|
||||
return "", nil
|
||||
} else if resp.StatusCode == 429 {
|
||||
return "", errof.New(errof.ErrWpScanAPILimitExceeded,
|
||||
fmt.Sprintf("wpscan.com API limit exceeded: %+v", resp.Status))
|
||||
} else {
|
||||
logging.Log.Warnf("wpscan.com unknown status code: %+v", resp.Status)
|
||||
return "", nil
|
||||
} else if resp.StatusCode == 429 && retry <= 3 {
|
||||
// 429 Too Many Requests
|
||||
util.Log.Debugf("sleep %d min(s): %s", retry, resp.Status)
|
||||
time.Sleep(time.Duration(retry) * time.Minute)
|
||||
retry++
|
||||
goto loop
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
|
||||
func removeInactives(pkgs models.WordPressPackages) (removed models.WordPressPackages) {
|
||||
@@ -299,11 +268,3 @@ func removeInactives(pkgs models.WordPressPackages) (removed models.WordPressPac
|
||||
}
|
||||
return removed
|
||||
}
|
||||
|
||||
func searchCache(name string, wpVulnCaches *map[string]string) (string, bool) {
|
||||
value, ok := (*wpVulnCaches)[name]
|
||||
if ok {
|
||||
return value, true
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package wordpress
|
||||
package detector
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
@@ -79,52 +79,3 @@ func TestRemoveInactive(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSearchCache(t *testing.T) {
|
||||
|
||||
var tests = []struct {
|
||||
name string
|
||||
wpVulnCache map[string]string
|
||||
value string
|
||||
ok bool
|
||||
}{
|
||||
{
|
||||
name: "akismet",
|
||||
wpVulnCache: map[string]string{
|
||||
"akismet": "body",
|
||||
},
|
||||
value: "body",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "akismet",
|
||||
wpVulnCache: map[string]string{
|
||||
"BackWPup": "body",
|
||||
"akismet": "body",
|
||||
},
|
||||
value: "body",
|
||||
ok: true,
|
||||
},
|
||||
{
|
||||
name: "akismet",
|
||||
wpVulnCache: map[string]string{
|
||||
"BackWPup": "body",
|
||||
},
|
||||
value: "",
|
||||
ok: false,
|
||||
},
|
||||
{
|
||||
name: "akismet",
|
||||
wpVulnCache: nil,
|
||||
value: "",
|
||||
ok: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
value, ok := searchCache(tt.name, &tt.wpVulnCache)
|
||||
if value != tt.value || ok != tt.ok {
|
||||
t.Errorf("[%d] searchCache error ", i)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -16,6 +16,12 @@ func (e Error) Error() string {
|
||||
var (
|
||||
// ErrFailedToAccessGithubAPI is error of github alert's api access
|
||||
ErrFailedToAccessGithubAPI ErrorCode = "ErrFailedToAccessGithubAPI"
|
||||
|
||||
// ErrFailedToAccessWpScan is error of wpscan.com api access
|
||||
ErrFailedToAccessWpScan ErrorCode = "ErrFailedToAccessWpScan"
|
||||
|
||||
// ErrWpScanAPILimitExceeded is error of wpscan.com api limit exceeded
|
||||
ErrWpScanAPILimitExceeded ErrorCode = "ErrWpScanAPILimitExceeded"
|
||||
)
|
||||
|
||||
// New :
|
||||
|
||||
@@ -1,119 +0,0 @@
|
||||
// +build !scanner
|
||||
|
||||
package exploit
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cnf "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/mozqnet/go-exploitdb/db"
|
||||
exploitmodels "github.com/mozqnet/go-exploitdb/models"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// FillWithExploit fills exploit information that has in Exploit
|
||||
func FillWithExploit(driver db.DB, r *models.ScanResult) (nExploitCve int, err error) {
|
||||
if cnf.Conf.Exploit.IsFetchViaHTTP() {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, _ := util.URLPathJoin(cnf.Conf.Exploit.URL, "cves")
|
||||
responses, err := getCvesViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, res := range responses {
|
||||
exps := []*exploitmodels.Exploit{}
|
||||
if err := json.Unmarshal([]byte(res.json), &exps); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
exploits := ConvertToModels(exps)
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
if ok {
|
||||
v.Exploits = exploits
|
||||
}
|
||||
r.ScannedCves[res.request.cveID] = v
|
||||
nExploitCve++
|
||||
}
|
||||
} else {
|
||||
if driver == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
es := driver.GetExploitByCveID(cveID)
|
||||
if len(es) == 0 {
|
||||
continue
|
||||
}
|
||||
exploits := ConvertToModels(es)
|
||||
vuln.Exploits = exploits
|
||||
r.ScannedCves[cveID] = vuln
|
||||
nExploitCve++
|
||||
}
|
||||
}
|
||||
return nExploitCve, nil
|
||||
}
|
||||
|
||||
// ConvertToModels converts gost model to vuls model
|
||||
func ConvertToModels(es []*exploitmodels.Exploit) (exploits []models.Exploit) {
|
||||
for _, e := range es {
|
||||
var documentURL, shellURL *string
|
||||
if e.OffensiveSecurity != nil {
|
||||
os := e.OffensiveSecurity
|
||||
if os.Document != nil {
|
||||
documentURL = &os.Document.DocumentURL
|
||||
}
|
||||
if os.ShellCode != nil {
|
||||
shellURL = &os.ShellCode.ShellCodeURL
|
||||
}
|
||||
}
|
||||
exploit := models.Exploit{
|
||||
ExploitType: e.ExploitType,
|
||||
ID: e.ExploitUniqueID,
|
||||
URL: e.URL,
|
||||
Description: e.Description,
|
||||
DocumentURL: documentURL,
|
||||
ShellCodeURL: shellURL,
|
||||
}
|
||||
exploits = append(exploits, exploit)
|
||||
}
|
||||
return exploits
|
||||
}
|
||||
|
||||
// CheckHTTPHealth do health check
|
||||
func CheckHTTPHealth() error {
|
||||
if !cnf.Conf.Exploit.IsFetchViaHTTP() {
|
||||
return nil
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/health", cnf.Conf.Exploit.URL)
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
resp, _, errs = gorequest.New().Get(url).End()
|
||||
// resp, _, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to connect to exploit server. url: %s, errs: %w", url, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckIfExploitFetched checks if oval entries are in DB by family, release.
|
||||
func CheckIfExploitFetched(driver db.DB, osFamily string) (fetched bool, err error) {
|
||||
//TODO
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CheckIfExploitFresh checks if oval entries are fresh enough
|
||||
func CheckIfExploitFresh(driver db.DB, osFamily string) (ok bool, err error) {
|
||||
//TODO
|
||||
return true, nil
|
||||
}
|
||||
115
exploit/util.go
115
exploit/util.go
@@ -1,115 +0,0 @@
|
||||
package exploit
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type response struct {
|
||||
request request
|
||||
json string
|
||||
}
|
||||
|
||||
func getCvesViaHTTP(cveIDs []string, urlPrefix string) (
|
||||
responses []response, err error) {
|
||||
nReq := len(cveIDs)
|
||||
reqChan := make(chan request, nReq)
|
||||
resChan := make(chan response, nReq)
|
||||
errChan := make(chan error, nReq)
|
||||
defer close(reqChan)
|
||||
defer close(resChan)
|
||||
defer close(errChan)
|
||||
|
||||
go func() {
|
||||
for _, cveID := range cveIDs {
|
||||
reqChan <- request{
|
||||
cveID: cveID,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
concurrency := 10
|
||||
tasks := util.GenWorkers(concurrency)
|
||||
for i := 0; i < nReq; i++ {
|
||||
tasks <- func() {
|
||||
select {
|
||||
case req := <-reqChan:
|
||||
url, err := util.URLPathJoin(
|
||||
urlPrefix,
|
||||
req.cveID,
|
||||
)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
util.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.After(2 * 60 * time.Second)
|
||||
var errs []error
|
||||
for i := 0; i < nReq; i++ {
|
||||
select {
|
||||
case res := <-resChan:
|
||||
responses = append(responses, res)
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching OVAL")
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, xerrors.Errorf("Failed to fetch OVAL. err: %w", errs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type request struct {
|
||||
osMajorVersion string
|
||||
packName string
|
||||
isSrcPack bool
|
||||
cveID string
|
||||
}
|
||||
|
||||
func httpGet(url string, req request, resChan chan<- response, errChan chan<- error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
count, retryMax := 0, 3
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
util.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %s", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
errChan <- xerrors.Errorf("HTTP Error %w", err)
|
||||
return
|
||||
}
|
||||
if count == retryMax {
|
||||
errChan <- xerrors.New("Retry count exceeded")
|
||||
return
|
||||
}
|
||||
|
||||
resChan <- response{
|
||||
request: req,
|
||||
json: body,
|
||||
}
|
||||
}
|
||||
71
go.mod
71
go.mod
@@ -1,38 +1,25 @@
|
||||
module github.com/future-architect/vuls
|
||||
|
||||
go 1.15
|
||||
|
||||
replace (
|
||||
gopkg.in/mattn/go-colorable.v0 => github.com/mattn/go-colorable v0.1.0
|
||||
gopkg.in/mattn/go-isatty.v0 => github.com/mattn/go-isatty v0.0.6
|
||||
)
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v49.1.0+incompatible
|
||||
github.com/Azure/go-autorest/autorest v0.11.15 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.10 // indirect
|
||||
github.com/Azure/azure-sdk-for-go v50.2.0+incompatible
|
||||
github.com/BurntSushi/toml v0.3.1
|
||||
github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91
|
||||
github.com/aquasecurity/fanal v0.0.0-20201218050947-981a0510f9cb
|
||||
github.com/aquasecurity/trivy v0.14.0
|
||||
github.com/aquasecurity/trivy-db v0.0.0-20201220084758-2d91316c83fa
|
||||
github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef
|
||||
github.com/aws/aws-sdk-go v1.36.12
|
||||
github.com/aquasecurity/fanal v0.0.0-20210119051230-28c249da7cfd
|
||||
github.com/aquasecurity/trivy v0.16.0
|
||||
github.com/aquasecurity/trivy-db v0.0.0-20210121143430-2a5c54036a86
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||
github.com/aws/aws-sdk-go v1.36.31
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/briandowns/spinner v1.12.0 // indirect
|
||||
github.com/caarlos0/env/v6 v6.4.0 // indirect
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/d4l3k/messagediff v1.2.2-0.20190829033028-7e0a312ae40b
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21
|
||||
github.com/emersion/go-smtp v0.14.0
|
||||
github.com/goccy/go-yaml v1.8.4 // indirect
|
||||
github.com/golang/protobuf v1.4.3 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/google/subcommands v1.2.0
|
||||
github.com/google/wire v0.4.0 // indirect
|
||||
github.com/gosuri/uitable v0.0.4
|
||||
github.com/grokify/html-strip-tags-go v0.0.0-20200923094847-079d207a09f1 // indirect
|
||||
github.com/hashicorp/go-uuid v1.0.2
|
||||
github.com/hashicorp/go-version v1.2.1
|
||||
github.com/hashicorp/go-version v1.3.0
|
||||
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c
|
||||
github.com/jesseduffield/gocui v0.3.0
|
||||
github.com/k0kubun/pp v3.0.1+incompatible
|
||||
@@ -40,41 +27,37 @@ require (
|
||||
github.com/knqyf263/go-cpe v0.0.0-20201213041631-54f6ab28673f
|
||||
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
|
||||
github.com/knqyf263/go-rpm-version v0.0.0-20170716094938-74609b86c936
|
||||
github.com/knqyf263/gost v0.1.7
|
||||
github.com/kotakanbe/go-cve-dictionary v0.5.6
|
||||
github.com/knqyf263/gost v0.1.10
|
||||
github.com/kotakanbe/go-cve-dictionary v0.5.12
|
||||
github.com/kotakanbe/go-pingscanner v0.1.0
|
||||
github.com/kotakanbe/goval-dictionary v0.2.16
|
||||
github.com/kotakanbe/goval-dictionary v0.3.3
|
||||
github.com/kotakanbe/logrus-prefixed-formatter v0.0.0-20180123152602-928f7356cb96
|
||||
github.com/lib/pq v1.10.0 // indirect
|
||||
github.com/magiconair/properties v1.8.4 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.12 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/mitchellh/mapstructure v1.4.0 // indirect
|
||||
github.com/mozqnet/go-exploitdb v0.1.2
|
||||
github.com/mitchellh/mapstructure v1.4.1 // indirect
|
||||
github.com/nlopes/slack v0.6.0
|
||||
github.com/nsf/termbox-go v0.0.0-20201124104050-ed494de23a00 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.4
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/pelletier/go-toml v1.8.1 // indirect
|
||||
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5
|
||||
github.com/sirupsen/logrus v1.7.0
|
||||
github.com/spf13/afero v1.5.1
|
||||
github.com/spf13/afero v1.6.0
|
||||
github.com/spf13/cast v1.3.1 // indirect
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/viper v1.7.1 // indirect
|
||||
github.com/takuzoo3868/go-msfdb v0.1.3
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.16.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20201217014255-9d1352758620
|
||||
github.com/takuzoo3868/go-msfdb v0.1.5
|
||||
github.com/vulsio/go-exploitdb v0.1.7
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2 // indirect
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5
|
||||
golang.org/x/sys v0.0.0-20201218084310-7d0127a74742 // indirect
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
golang.org/x/tools v0.0.0-20201211185031-d93e913c1a58 // indirect
|
||||
golang.org/x/net v0.0.0-20210331212208-0fccb6fa2b5c // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210125201302-af13f521f196
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
gopkg.in/ini.v1 v1.62.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
honnef.co/go/tools v0.1.0 // indirect
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920
|
||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
|
||||
)
|
||||
|
||||
53
gost/base.go
53
gost/base.go
@@ -1,53 +0,0 @@
|
||||
// +build !scanner
|
||||
|
||||
package gost
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cnf "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/knqyf263/gost/db"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Base is a base struct
|
||||
type Base struct {
|
||||
}
|
||||
|
||||
// FillCVEsWithRedHat fills cve information that has in Gost
|
||||
func (b Base) FillCVEsWithRedHat(driver db.DB, r *models.ScanResult) error {
|
||||
return RedHat{}.fillFixed(driver, r)
|
||||
}
|
||||
|
||||
// CheckHTTPHealth do health check
|
||||
func (b Base) CheckHTTPHealth() error {
|
||||
if !cnf.Conf.Gost.IsFetchViaHTTP() {
|
||||
return nil
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/health", cnf.Conf.Gost.URL)
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
resp, _, errs = gorequest.New().Get(url).End()
|
||||
// resp, _, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to connect to gost server. url: %s, errs: %w", url, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckIfGostFetched checks if oval entries are in DB by family, release.
|
||||
func (b Base) CheckIfGostFetched(driver db.DB, osFamily string) (fetched bool, err error) {
|
||||
//TODO
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// CheckIfGostFresh checks if oval entries are fresh enough
|
||||
func (b Base) CheckIfGostFresh(driver db.DB, osFamily string) (ok bool, err error) {
|
||||
//TODO
|
||||
return true, nil
|
||||
}
|
||||
@@ -5,10 +5,10 @@ package gost
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/knqyf263/gost/db"
|
||||
gostmodels "github.com/knqyf263/gost/models"
|
||||
)
|
||||
|
||||
@@ -33,10 +33,10 @@ func (deb Debian) supported(major string) bool {
|
||||
}
|
||||
|
||||
// DetectUnfixed fills cve information that has in Gost
|
||||
func (deb Debian) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
func (deb Debian) DetectUnfixed(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
if !deb.supported(major(r.Release)) {
|
||||
// only logging
|
||||
util.Log.Warnf("Debian %s is not supported yet", r.Release)
|
||||
logging.Log.Warnf("Debian %s is not supported yet", r.Release)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -56,15 +56,15 @@ func (deb Debian) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (nCV
|
||||
|
||||
// Debian Security Tracker does not support Package for Raspbian, so skip it.
|
||||
var scanResult models.ScanResult
|
||||
if r.Family != config.Raspbian {
|
||||
if r.Family != constant.Raspbian {
|
||||
scanResult = *r
|
||||
} else {
|
||||
scanResult = r.RemoveRaspbianPackFromResult()
|
||||
}
|
||||
|
||||
packCvesList := []packCves{}
|
||||
if config.Conf.Gost.IsFetchViaHTTP() {
|
||||
url, _ := util.URLPathJoin(config.Conf.Gost.URL, "debian", major(scanResult.Release), "pkgs")
|
||||
if deb.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
url, _ := util.URLPathJoin(deb.DBDriver.Cnf.GetURL(), "debian", major(scanResult.Release), "pkgs")
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, url)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
@@ -86,11 +86,11 @@ func (deb Debian) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (nCV
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if driver == nil {
|
||||
if deb.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range scanResult.Packages {
|
||||
cveDebs := driver.GetUnfixedCvesDebian(major(scanResult.Release), pack.Name)
|
||||
cveDebs := deb.DBDriver.DB.GetUnfixedCvesDebian(major(scanResult.Release), pack.Name)
|
||||
cves := []models.CveContent{}
|
||||
for _, cveDeb := range cveDebs {
|
||||
cves = append(cves, *deb.ConvertToModel(&cveDeb))
|
||||
@@ -104,7 +104,7 @@ func (deb Debian) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (nCV
|
||||
|
||||
// SrcPack
|
||||
for _, pack := range scanResult.SrcPackages {
|
||||
cveDebs := driver.GetUnfixedCvesDebian(major(scanResult.Release), pack.Name)
|
||||
cveDebs := deb.DBDriver.DB.GetUnfixedCvesDebian(major(scanResult.Release), pack.Name)
|
||||
cves := []models.CveContent{}
|
||||
for _, cveDeb := range cveDebs {
|
||||
cves = append(cves, *deb.ConvertToModel(&cveDeb))
|
||||
|
||||
83
gost/gost.go
83
gost/gost.go
@@ -3,33 +3,84 @@
|
||||
package gost
|
||||
|
||||
import (
|
||||
cnf "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/knqyf263/gost/db"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
// DBDriver is a DB Driver
|
||||
type DBDriver struct {
|
||||
DB db.DB
|
||||
Cnf config.VulnDictInterface
|
||||
}
|
||||
|
||||
// Client is the interface of OVAL client.
|
||||
type Client interface {
|
||||
DetectUnfixed(db.DB, *models.ScanResult, bool) (int, error)
|
||||
FillCVEsWithRedHat(db.DB, *models.ScanResult) error
|
||||
DetectUnfixed(*models.ScanResult, bool) (int, error)
|
||||
}
|
||||
|
||||
//TODO implement
|
||||
// CheckHTTPHealth() error
|
||||
// CheckIfGostFetched checks if Gost entries are fetched
|
||||
// CheckIfGostFetched(db.DB, string, string) (bool, error)
|
||||
// CheckIfGostFresh(db.DB, string, string) (bool, error)
|
||||
// Base is a base struct
|
||||
type Base struct {
|
||||
DBDriver DBDriver
|
||||
}
|
||||
|
||||
// FillCVEsWithRedHat fills CVE detailed with Red Hat Security
|
||||
func FillCVEsWithRedHat(r *models.ScanResult, cnf config.GostConf) error {
|
||||
db, locked, err := newGostDB(cnf)
|
||||
if locked {
|
||||
return xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := db.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
return RedHat{Base{DBDriver{DB: db, Cnf: &cnf}}}.fillCvesWithRedHatAPI(r)
|
||||
}
|
||||
|
||||
// NewClient make Client by family
|
||||
func NewClient(family string) Client {
|
||||
func NewClient(cnf config.GostConf, family string) (Client, error) {
|
||||
db, locked, err := newGostDB(cnf)
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driver := DBDriver{DB: db, Cnf: &cnf}
|
||||
|
||||
switch family {
|
||||
case cnf.RedHat, cnf.CentOS:
|
||||
return RedHat{}
|
||||
case cnf.Debian, cnf.Raspbian:
|
||||
return Debian{}
|
||||
case cnf.Windows:
|
||||
return Microsoft{}
|
||||
case constant.RedHat, constant.CentOS:
|
||||
return RedHat{Base{DBDriver: driver}}, nil
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return Debian{Base{DBDriver: driver}}, nil
|
||||
case constant.Windows:
|
||||
return Microsoft{Base{DBDriver: driver}}, nil
|
||||
default:
|
||||
return Pseudo{}
|
||||
return Pseudo{}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewGostDB returns db client for Gost
|
||||
func newGostDB(cnf config.GostConf) (driver db.DB, locked bool, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = db.NewDB(cnf.GetType(), path, cnf.GetDebugSQL()); err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("gostDB is locked. err: %w", err)
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/knqyf263/gost/db"
|
||||
gostmodels "github.com/knqyf263/gost/models"
|
||||
)
|
||||
|
||||
@@ -16,15 +15,15 @@ type Microsoft struct {
|
||||
}
|
||||
|
||||
// DetectUnfixed fills cve information that has in Gost
|
||||
func (ms Microsoft) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
if driver == nil {
|
||||
func (ms Microsoft) DetectUnfixed(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
if ms.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
cveIDs := []string{}
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
for cveID, msCve := range driver.GetMicrosoftMulti(cveIDs) {
|
||||
for cveID, msCve := range ms.DBDriver.DB.GetMicrosoftMulti(cveIDs) {
|
||||
if _, ok := r.ScannedCves[cveID]; !ok {
|
||||
continue
|
||||
}
|
||||
@@ -70,11 +69,10 @@ func (ms Microsoft) ConvertToModel(cve *gostmodels.MicrosoftCVE) (*models.CveCon
|
||||
|
||||
option := map[string]string{}
|
||||
if 0 < len(cve.ExploitStatus) {
|
||||
// TODO: CVE-2020-0739
|
||||
// "exploit_status": "Publicly Disclosed:No;Exploited:No;Latest Software Release:Exploitation Less Likely;Older Software Release:Exploitation Less Likely;DOS:N/A",
|
||||
option["exploit"] = cve.ExploitStatus
|
||||
}
|
||||
if 0 < len(cve.Workaround) {
|
||||
option["workaround"] = cve.Workaround
|
||||
}
|
||||
kbids := []string{}
|
||||
for _, kbid := range cve.KBIDs {
|
||||
kbids = append(kbids, kbid.KBID)
|
||||
@@ -86,13 +84,18 @@ func (ms Microsoft) ConvertToModel(cve *gostmodels.MicrosoftCVE) (*models.CveCon
|
||||
vendorURL := "https://msrc.microsoft.com/update-guide/vulnerability/" + cve.CveID
|
||||
mitigations := []models.Mitigation{}
|
||||
if cve.Mitigation != "" {
|
||||
mitigations = []models.Mitigation{
|
||||
{
|
||||
CveContentType: models.Microsoft,
|
||||
Mitigation: cve.Mitigation,
|
||||
URL: vendorURL,
|
||||
},
|
||||
}
|
||||
mitigations = append(mitigations, models.Mitigation{
|
||||
CveContentType: models.Microsoft,
|
||||
Mitigation: cve.Mitigation,
|
||||
URL: vendorURL,
|
||||
})
|
||||
}
|
||||
if cve.Workaround != "" {
|
||||
mitigations = append(mitigations, models.Mitigation{
|
||||
CveContentType: models.Microsoft,
|
||||
Mitigation: cve.Workaround,
|
||||
URL: vendorURL,
|
||||
})
|
||||
}
|
||||
|
||||
return &models.CveContent{
|
||||
|
||||
@@ -4,7 +4,6 @@ package gost
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/knqyf263/gost/db"
|
||||
)
|
||||
|
||||
// Pseudo is Gost client except for RedHat family and Debian
|
||||
@@ -13,6 +12,6 @@ type Pseudo struct {
|
||||
}
|
||||
|
||||
// DetectUnfixed fills cve information that has in Gost
|
||||
func (pse Pseudo) DetectUnfixed(driver db.DB, r *models.ScanResult, _ bool) (int, error) {
|
||||
func (pse Pseudo) DetectUnfixed(r *models.ScanResult, _ bool) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
197
gost/redhat.go
197
gost/redhat.go
@@ -10,7 +10,6 @@ import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/knqyf263/gost/db"
|
||||
gostmodels "github.com/knqyf263/gost/models"
|
||||
)
|
||||
|
||||
@@ -20,11 +19,43 @@ type RedHat struct {
|
||||
}
|
||||
|
||||
// DetectUnfixed fills cve information that has in Gost
|
||||
func (red RedHat) DetectUnfixed(driver db.DB, r *models.ScanResult, ignoreWillNotFix bool) (nCVEs int, err error) {
|
||||
return red.fillUnfixed(driver, r, ignoreWillNotFix)
|
||||
func (red RedHat) DetectUnfixed(r *models.ScanResult, ignoreWillNotFix bool) (nCVEs int, err error) {
|
||||
if red.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(red.DBDriver.Cnf.GetURL(), "redhat", major(r.Release), "pkgs")
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, res := range responses {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves := map[string]gostmodels.RedhatCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &cves); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, cve := range cves {
|
||||
if newly := red.setUnfixedCveToScanResult(&cve, r); newly {
|
||||
nCVEs++
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if red.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range r.Packages {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves := red.DBDriver.DB.GetUnfixedCvesRedhat(major(r.Release), pack.Name, ignoreWillNotFix)
|
||||
for _, cve := range cves {
|
||||
if newly := red.setUnfixedCveToScanResult(&cve, r); newly {
|
||||
nCVEs++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nCVEs, nil
|
||||
}
|
||||
|
||||
func (red RedHat) fillFixed(driver db.DB, r *models.ScanResult) error {
|
||||
func (red RedHat) fillCvesWithRedHatAPI(r *models.ScanResult) error {
|
||||
cveIDs := []string{}
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if _, ok := vuln.CveContents[models.RedHatAPI]; ok {
|
||||
@@ -33,9 +64,8 @@ func (red RedHat) fillFixed(driver db.DB, r *models.ScanResult) error {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
|
||||
if config.Conf.Gost.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(config.Conf.Gost.URL,
|
||||
"redhat", "cves")
|
||||
if red.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(config.Conf.Gost.URL, "redhat", "cves")
|
||||
responses, err := getCvesViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -48,131 +78,68 @@ func (red RedHat) fillFixed(driver db.DB, r *models.ScanResult) error {
|
||||
if redCve.ID == 0 {
|
||||
continue
|
||||
}
|
||||
cveCont, mitigations := red.ConvertToModel(&redCve)
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
r.ScannedCves[res.request.cveID] = v
|
||||
red.setFixedCveToScanResult(&redCve, r)
|
||||
}
|
||||
} else {
|
||||
if driver == nil {
|
||||
if red.DBDriver.DB == nil {
|
||||
return nil
|
||||
}
|
||||
for cveID, redCve := range driver.GetRedhatMulti(cveIDs) {
|
||||
for _, redCve := range red.DBDriver.DB.GetRedhatMulti(cveIDs) {
|
||||
if len(redCve.Name) == 0 {
|
||||
continue
|
||||
}
|
||||
cveCont, mitigations := red.ConvertToModel(&redCve)
|
||||
v, ok := r.ScannedCves[cveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
r.ScannedCves[cveID] = v
|
||||
red.setFixedCveToScanResult(&redCve, r)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (red RedHat) fillUnfixed(driver db.DB, r *models.ScanResult, ignoreWillNotFix bool) (nCVEs int, err error) {
|
||||
if config.Conf.Gost.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(config.Conf.Gost.URL,
|
||||
"redhat", major(r.Release), "pkgs")
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
for _, res := range responses {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves := map[string]gostmodels.RedhatCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &cves); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
for _, cve := range cves {
|
||||
cveCont, mitigations := red.ConvertToModel(&cve)
|
||||
v, ok := r.ScannedCves[cve.Name]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
nCVEs++
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
pkgStats := red.mergePackageStates(v,
|
||||
cve.PackageState, r.Packages, r.Release)
|
||||
if 0 < len(pkgStats) {
|
||||
v.AffectedPackages = pkgStats
|
||||
r.ScannedCves[cve.Name] = v
|
||||
}
|
||||
}
|
||||
func (red RedHat) setFixedCveToScanResult(cve *gostmodels.RedhatCVE, r *models.ScanResult) {
|
||||
cveCont, mitigations := red.ConvertToModel(cve)
|
||||
v, ok := r.ScannedCves[cveCont.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
if driver == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range r.Packages {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves := driver.GetUnfixedCvesRedhat(major(r.Release), pack.Name, ignoreWillNotFix)
|
||||
for _, cve := range cves {
|
||||
cveCont, mitigations := red.ConvertToModel(&cve)
|
||||
v, ok := r.ScannedCves[cve.Name]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
nCVEs++
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
pkgStats := red.mergePackageStates(v,
|
||||
cve.PackageState, r.Packages, r.Release)
|
||||
if 0 < len(pkgStats) {
|
||||
v.AffectedPackages = pkgStats
|
||||
r.ScannedCves[cve.Name] = v
|
||||
}
|
||||
}
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
}
|
||||
return nCVEs, nil
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
r.ScannedCves[cveCont.CveID] = v
|
||||
}
|
||||
|
||||
func (red RedHat) setUnfixedCveToScanResult(cve *gostmodels.RedhatCVE, r *models.ScanResult) (newly bool) {
|
||||
cveCont, mitigations := red.ConvertToModel(cve)
|
||||
v, ok := r.ScannedCves[cve.Name]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(*cveCont)
|
||||
} else {
|
||||
v.CveContents[models.RedHatAPI] = *cveCont
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cveCont.CveID,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Confidences: models.Confidences{models.RedHatAPIMatch},
|
||||
}
|
||||
newly = true
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
pkgStats := red.mergePackageStates(v,
|
||||
cve.PackageState, r.Packages, r.Release)
|
||||
if 0 < len(pkgStats) {
|
||||
v.AffectedPackages = pkgStats
|
||||
r.ScannedCves[cve.Name] = v
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (red RedHat) mergePackageStates(v models.VulnInfo, ps []gostmodels.RedhatPackageState, installed models.Packages, release string) (pkgStats models.PackageFixStatuses) {
|
||||
|
||||
11
gost/util.go
11
gost/util.go
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
@@ -48,7 +49,7 @@ func getCvesViaHTTP(cveIDs []string, urlPrefix string) (
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
util.Log.Debugf("HTTP Request to %s", url)
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
@@ -122,7 +123,7 @@ func getAllUnfixedCvesViaHTTP(r *models.ScanResult, urlPrefix string) (
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
util.Log.Debugf("HTTP Request to %s", url)
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
@@ -154,18 +155,18 @@ func httpGet(url string, req request, resChan chan<- response, errChan chan<- er
|
||||
count, retryMax := 0, 3
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Get(url).End()
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
util.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %s", t, err)
|
||||
logging.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
|
||||
6534
integration/data/amazon_2.json
Executable file
6534
integration/data/amazon_2.json
Executable file
File diff suppressed because it is too large
Load Diff
5634
integration/data/centos_7.json
Executable file
5634
integration/data/centos_7.json
Executable file
File diff suppressed because it is too large
Load Diff
5631
integration/data/debian_10.json
Executable file
5631
integration/data/debian_10.json
Executable file
File diff suppressed because it is too large
Load Diff
131
integration/data/rails.json
Normal file
131
integration/data/rails.json
Normal file
@@ -0,0 +1,131 @@
|
||||
{
|
||||
"jsonVersion": 4,
|
||||
"lang": "",
|
||||
"serverUUID": "",
|
||||
"serverName": "rails",
|
||||
"family": "pseudo",
|
||||
"release": "",
|
||||
"container": {
|
||||
"containerID": "",
|
||||
"name": "",
|
||||
"image": "",
|
||||
"type": "",
|
||||
"uuid": ""
|
||||
},
|
||||
"platform": {
|
||||
"name": "other",
|
||||
"instanceID": ""
|
||||
},
|
||||
"scannedAt": "2021-03-31T12:22:26.428630183+09:00",
|
||||
"scanMode": "fast mode",
|
||||
"scannedVersion": "v0.15.9",
|
||||
"scannedRevision": "build-20210331_121257_1a58c94",
|
||||
"scannedBy": "dev",
|
||||
"scannedVia": "pseudo",
|
||||
"scannedIpv4Addrs": [
|
||||
"172.19.0.1",
|
||||
"172.17.0.1",
|
||||
"172.27.0.1"
|
||||
],
|
||||
"reportedAt": "0001-01-01T00:00:00Z",
|
||||
"reportedVersion": "",
|
||||
"reportedRevision": "",
|
||||
"reportedBy": "",
|
||||
"errors": [],
|
||||
"warnings": [],
|
||||
"scannedCves": {},
|
||||
"runningKernel": {
|
||||
"release": "",
|
||||
"version": "",
|
||||
"rebootRequired": false
|
||||
},
|
||||
"packages": {},
|
||||
"config": {
|
||||
"scan": {
|
||||
"logDir": "/var/log/vuls",
|
||||
"resultsDir": "/home/ubuntu/go/src/github.com/future-architect/vuls/results",
|
||||
"default": {
|
||||
"port": "22",
|
||||
"scanMode": [
|
||||
"fast"
|
||||
]
|
||||
},
|
||||
"servers": {
|
||||
"rails": {
|
||||
"serverName": "rails",
|
||||
"cpeNames": [
|
||||
"cpe:/a:rubyonrails:ruby_on_rails:3.0.1"
|
||||
],
|
||||
"scanMode": [
|
||||
"fast"
|
||||
],
|
||||
"type": "pseudo",
|
||||
"wordpress": {}
|
||||
}
|
||||
},
|
||||
"cveDict": {
|
||||
"Name": "cveDict",
|
||||
"Type": "sqlite3",
|
||||
"SQLite3Path": "/home/ubuntu/go/src/github.com/kotakanbe/go-cve-dictionary/cve.sqlite3",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"ovalDict": {
|
||||
"Name": "ovalDict",
|
||||
"Type": "sqlite3",
|
||||
"SQLite3Path": "/home/ubuntu/go/src/github.com/kotakanbe/goval-dictionary/oval.sqlite3",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"gost": {
|
||||
"Name": "gost",
|
||||
"Type": "sqlite3",
|
||||
"SQLite3Path": "/home/ubuntu/go/src/github.com/future-architect/vuls/gost.sqlite3",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"exploit": {
|
||||
"Name": "exploit",
|
||||
"Type": "sqlite3",
|
||||
"SQLite3Path": "/home/ubuntu/go/src/github.com/vulsio/go-exploitdb/go-exploitdb.sqlite3",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"metasploit": {
|
||||
"Name": "metasploit",
|
||||
"Type": "sqlite3",
|
||||
"SQLite3Path": "/home/ubuntu/go/src/github.com/takuzoo3868/go-msfdb/go-msfdb.sqlite3",
|
||||
"DebugSQL": false
|
||||
}
|
||||
},
|
||||
"report": {
|
||||
"default": {},
|
||||
"cveDict": {
|
||||
"Name": "",
|
||||
"Type": "",
|
||||
"SQLite3Path": "",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"ovalDict": {
|
||||
"Name": "",
|
||||
"Type": "",
|
||||
"SQLite3Path": "",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"gost": {
|
||||
"Name": "",
|
||||
"Type": "",
|
||||
"SQLite3Path": "",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"exploit": {
|
||||
"Name": "",
|
||||
"Type": "",
|
||||
"SQLite3Path": "",
|
||||
"DebugSQL": false
|
||||
},
|
||||
"metasploit": {
|
||||
"Name": "",
|
||||
"Type": "",
|
||||
"SQLite3Path": "",
|
||||
"DebugSQL": false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
5158
integration/data/rhel_71.json
Executable file
5158
integration/data/rhel_71.json
Executable file
File diff suppressed because it is too large
Load Diff
6926
integration/data/rhel_8.json
Executable file
6926
integration/data/rhel_8.json
Executable file
File diff suppressed because it is too large
Load Diff
8609
integration/data/ubuntu_1804.json
Executable file
8609
integration/data/ubuntu_1804.json
Executable file
File diff suppressed because it is too large
Load Diff
8559
integration/data/ubuntu_2004.json
Executable file
8559
integration/data/ubuntu_2004.json
Executable file
File diff suppressed because it is too large
Load Diff
27
integration/int-config.toml
Executable file
27
integration/int-config.toml
Executable file
@@ -0,0 +1,27 @@
|
||||
[cveDict]
|
||||
Type = "sqlite3"
|
||||
SQLite3Path = "/home/ubuntu/vulsctl/docker/cve.sqlite3"
|
||||
|
||||
[ovalDict]
|
||||
Type = "sqlite3"
|
||||
SQLite3Path = "/home/ubuntu/vulsctl/docker/oval.sqlite3"
|
||||
|
||||
[gost]
|
||||
Type = "sqlite3"
|
||||
SQLite3Path = "/home/ubuntu/vulsctl/docker/gost.sqlite3"
|
||||
|
||||
[exploit]
|
||||
Type = "sqlite3"
|
||||
SQLite3Path = "/home/ubuntu/vulsctl/docker/go-exploitdb.sqlite3"
|
||||
|
||||
[metasploit]
|
||||
type = "sqlite3"
|
||||
SQLite3Path = "/home/ubuntu/vulsctl/docker/go-msfdb.sqlite3"
|
||||
|
||||
[default]
|
||||
|
||||
[servers]
|
||||
|
||||
[servers.rails]
|
||||
type = "pseudo"
|
||||
cpeNames = [ "cpe:/a:rubyonrails:ruby_on_rails:3.0.1" ]
|
||||
27
integration/int-redis-config.toml
Executable file
27
integration/int-redis-config.toml
Executable file
@@ -0,0 +1,27 @@
|
||||
[cveDict]
|
||||
Type = "redis"
|
||||
Url = "redis://127.0.0.1/3"
|
||||
|
||||
[ovalDict]
|
||||
Type = "redis"
|
||||
Url = "redis://127.0.0.1/1"
|
||||
|
||||
[gost]
|
||||
Type = "redis"
|
||||
Url = "redis://127.0.0.1/2"
|
||||
|
||||
[exploit]
|
||||
Type = "redis"
|
||||
Url = "redis://127.0.0.1/4"
|
||||
|
||||
[metasploit]
|
||||
Type = "redis"
|
||||
Url = "redis://127.0.0.1/5"
|
||||
|
||||
[default]
|
||||
|
||||
[servers]
|
||||
|
||||
[servers.rails]
|
||||
type = "pseudo"
|
||||
cpeNames = [ "cpe:/a:rubyonrails:ruby_on_rails:3.0.1" ]
|
||||
119
logging/logutil.go
Normal file
119
logging/logutil.go
Normal file
@@ -0,0 +1,119 @@
|
||||
package logging
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/k0kubun/pp"
|
||||
"github.com/rifflock/lfshook"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
formatter "github.com/kotakanbe/logrus-prefixed-formatter"
|
||||
)
|
||||
|
||||
//LogOpts has options for logging
|
||||
type LogOpts struct {
|
||||
Debug bool `json:"debug,omitempty"`
|
||||
DebugSQL bool `json:"debugSQL,omitempty"`
|
||||
LogToFile bool `json:"logToFile,omitempty"`
|
||||
LogDir string `json:"logDir,omitempty"`
|
||||
Quiet bool `json:"quiet,omitempty"`
|
||||
}
|
||||
|
||||
// Log for localhost
|
||||
var Log Logger
|
||||
|
||||
// Logger has logrus entry
|
||||
type Logger struct {
|
||||
logrus.Entry
|
||||
}
|
||||
|
||||
func init() {
|
||||
log := logrus.New()
|
||||
log.Out = ioutil.Discard
|
||||
fields := logrus.Fields{"prefix": ""}
|
||||
Log = Logger{Entry: *log.WithFields(fields)}
|
||||
}
|
||||
|
||||
// NewNormalLogger creates normal logger
|
||||
func NewNormalLogger() Logger {
|
||||
return Logger{Entry: logrus.Entry{Logger: logrus.New()}}
|
||||
}
|
||||
|
||||
// NewCustomLogger creates logrus
|
||||
func NewCustomLogger(debug, quiet, logToFile bool, logDir, logMsgAnsiColor, serverName string) Logger {
|
||||
log := logrus.New()
|
||||
log.Formatter = &formatter.TextFormatter{MsgAnsiColor: logMsgAnsiColor}
|
||||
log.Level = logrus.InfoLevel
|
||||
if debug {
|
||||
log.Level = logrus.DebugLevel
|
||||
pp.ColoringEnabled = false
|
||||
}
|
||||
|
||||
if flag.Lookup("test.v") != nil {
|
||||
return Logger{Entry: *logrus.NewEntry(log)}
|
||||
}
|
||||
|
||||
whereami := "localhost"
|
||||
if serverName != "" {
|
||||
whereami = serverName
|
||||
}
|
||||
|
||||
if logToFile {
|
||||
dir := GetDefaultLogDir()
|
||||
if logDir != "" {
|
||||
dir = logDir
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dir); os.IsNotExist(err) {
|
||||
if err := os.Mkdir(dir, 0700); err != nil {
|
||||
log.Errorf("Failed to create log directory. path: %s, err: %+v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
logFile := dir + "/vuls.log"
|
||||
if file, err := os.OpenFile(logFile, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644); err == nil {
|
||||
log.Out = io.MultiWriter(os.Stderr, file)
|
||||
} else {
|
||||
log.Out = os.Stderr
|
||||
log.Errorf("Failed to create log file. path: %s, err: %+v", logFile, err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(dir); err == nil {
|
||||
path := filepath.Join(dir, fmt.Sprintf("%s.log", whereami))
|
||||
if _, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0644); err == nil {
|
||||
log.Hooks.Add(lfshook.NewHook(lfshook.PathMap{
|
||||
logrus.DebugLevel: path,
|
||||
logrus.InfoLevel: path,
|
||||
logrus.WarnLevel: path,
|
||||
logrus.ErrorLevel: path,
|
||||
logrus.FatalLevel: path,
|
||||
logrus.PanicLevel: path,
|
||||
}, nil))
|
||||
} else {
|
||||
log.Errorf("Failed to create log file. path: %s, err: %+v", path, err)
|
||||
}
|
||||
}
|
||||
} else if quiet {
|
||||
log.Out = io.Discard
|
||||
} else {
|
||||
log.Out = os.Stderr
|
||||
}
|
||||
|
||||
entry := log.WithFields(logrus.Fields{"prefix": whereami})
|
||||
return Logger{Entry: *entry}
|
||||
}
|
||||
|
||||
// GetDefaultLogDir returns default log directory
|
||||
func GetDefaultLogDir() string {
|
||||
defaultLogDir := "/var/log/vuls"
|
||||
if runtime.GOOS == "windows" {
|
||||
defaultLogDir = filepath.Join(os.Getenv("APPDATA"), "vuls")
|
||||
}
|
||||
return defaultLogDir
|
||||
}
|
||||
@@ -1,6 +1,7 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aquasecurity/trivy-db/pkg/vulnsrc/vulnerability"
|
||||
@@ -58,7 +59,7 @@ func (v CveContents) PrimarySrcURLs(lang, myFamily, cveID string) (values []CveC
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Nvd, NewCveContentType(myFamily)}
|
||||
order := CveContentTypes{Nvd, NewCveContentType(myFamily), GitHub}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v[ctype]; found {
|
||||
if cont.SourceLink == "" {
|
||||
@@ -74,7 +75,7 @@ func (v CveContents) PrimarySrcURLs(lang, myFamily, cveID string) (values []CveC
|
||||
}
|
||||
}
|
||||
|
||||
if len(values) == 0 {
|
||||
if len(values) == 0 && strings.HasPrefix(cveID, "CVE") {
|
||||
return []CveContentStr{{
|
||||
Type: Nvd,
|
||||
Value: "https://nvd.nist.gov/vuln/detail/" + cveID,
|
||||
@@ -247,11 +248,13 @@ func NewCveContentType(name string) CveContentType {
|
||||
case "microsoft":
|
||||
return Microsoft
|
||||
case "wordpress":
|
||||
return WPVulnDB
|
||||
return WpScan
|
||||
case "amazon":
|
||||
return Amazon
|
||||
case "trivy":
|
||||
return Trivy
|
||||
case "GitHub":
|
||||
return Trivy
|
||||
default:
|
||||
return Unknown
|
||||
}
|
||||
@@ -291,12 +294,15 @@ const (
|
||||
// Microsoft is Microsoft
|
||||
Microsoft CveContentType = "microsoft"
|
||||
|
||||
// WPVulnDB is WordPress
|
||||
WPVulnDB CveContentType = "wpvulndb"
|
||||
// WpScan is WordPress
|
||||
WpScan CveContentType = "wpscan"
|
||||
|
||||
// Trivy is Trivy
|
||||
Trivy CveContentType = "trivy"
|
||||
|
||||
// GitHub is GitHub Security Alerts
|
||||
GitHub CveContentType = "github"
|
||||
|
||||
// Unknown is Unknown
|
||||
Unknown CveContentType = "unknown"
|
||||
)
|
||||
@@ -315,8 +321,9 @@ var AllCveContetTypes = CveContentTypes{
|
||||
Amazon,
|
||||
SUSE,
|
||||
DebianSecurityTracker,
|
||||
WPVulnDB,
|
||||
WpScan,
|
||||
Trivy,
|
||||
GitHub,
|
||||
}
|
||||
|
||||
// Except returns CveContentTypes except for given args
|
||||
|
||||
@@ -6,9 +6,9 @@ import (
|
||||
"github.com/aquasecurity/trivy-db/pkg/db"
|
||||
trivyDBTypes "github.com/aquasecurity/trivy-db/pkg/types"
|
||||
"github.com/aquasecurity/trivy/pkg/detector/library"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/types"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"golang.org/x/xerrors"
|
||||
// "github.com/aquasecurity/go-dep-parser/pkg/types"
|
||||
)
|
||||
@@ -30,6 +30,14 @@ func (lss LibraryScanners) Find(path, name string) map[string]types.Library {
|
||||
return filtered
|
||||
}
|
||||
|
||||
// Total returns total count of pkgs
|
||||
func (lss LibraryScanners) Total() (total int) {
|
||||
for _, lib := range lss {
|
||||
total += len(lib.Libs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// LibraryScanner has libraries information
|
||||
type LibraryScanner struct {
|
||||
Path string
|
||||
@@ -63,7 +71,7 @@ func (s LibraryScanner) convertFanalToVuln(tvulns []types.DetectedVulnerability)
|
||||
for _, tvuln := range tvulns {
|
||||
vinfo, err := s.getVulnDetail(tvuln)
|
||||
if err != nil {
|
||||
util.Log.Debugf("failed to getVulnDetail. err: %s, tvuln: %#v", err, tvuln)
|
||||
logging.Log.Debugf("failed to getVulnDetail. err: %+v, tvuln: %#v", err, tvuln)
|
||||
continue
|
||||
}
|
||||
vulns = append(vulns, vinfo)
|
||||
|
||||
@@ -63,13 +63,13 @@ func (ps Packages) FindOne(f func(Package) bool) (string, Package, bool) {
|
||||
}
|
||||
|
||||
// FindByFQPN search a package by Fully-Qualified-Package-Name
|
||||
func (ps Packages) FindByFQPN(nameVerRelArc string) (*Package, error) {
|
||||
func (ps Packages) FindByFQPN(nameVerRel string) (*Package, error) {
|
||||
for _, p := range ps {
|
||||
if nameVerRelArc == p.FQPN() {
|
||||
if nameVerRel == p.FQPN() {
|
||||
return &p, nil
|
||||
}
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to find the package: %s", nameVerRelArc)
|
||||
return nil, xerrors.Errorf("Failed to find the package: %s", nameVerRel)
|
||||
}
|
||||
|
||||
// Package has installed binary packages.
|
||||
@@ -81,7 +81,7 @@ type Package struct {
|
||||
NewRelease string `json:"newRelease"`
|
||||
Arch string `json:"arch"`
|
||||
Repository string `json:"repository"`
|
||||
Changelog Changelog `json:"changelog"`
|
||||
Changelog *Changelog `json:"changelog,omitempty"`
|
||||
AffectedProcs []AffectedProcess `json:",omitempty"`
|
||||
NeedRestartProcs []NeedRestartProcess `json:",omitempty"`
|
||||
}
|
||||
@@ -96,9 +96,6 @@ func (p Package) FQPN() string {
|
||||
if p.Release != "" {
|
||||
fqpn += fmt.Sprintf("-%s", p.Release)
|
||||
}
|
||||
if p.Arch != "" {
|
||||
fqpn += fmt.Sprintf(".%s", p.Arch)
|
||||
}
|
||||
return fqpn
|
||||
}
|
||||
|
||||
|
||||
@@ -287,7 +287,7 @@ func TestPackage_FormatVersionFromTo(t *testing.T) {
|
||||
NewRelease: tt.fields.NewRelease,
|
||||
Arch: tt.fields.Arch,
|
||||
Repository: tt.fields.Repository,
|
||||
Changelog: tt.fields.Changelog,
|
||||
Changelog: &tt.fields.Changelog,
|
||||
AffectedProcs: tt.fields.AffectedProcs,
|
||||
NeedRestartProcs: tt.fields.NeedRestartProcs,
|
||||
}
|
||||
|
||||
@@ -4,13 +4,14 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/cwe"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
)
|
||||
|
||||
// ScanResults is a slide of ScanResult
|
||||
@@ -18,37 +19,38 @@ type ScanResults []ScanResult
|
||||
|
||||
// ScanResult has the result of scanned CVE information.
|
||||
type ScanResult struct {
|
||||
JSONVersion int `json:"jsonVersion"`
|
||||
Lang string `json:"lang"`
|
||||
ServerUUID string `json:"serverUUID"`
|
||||
ServerName string `json:"serverName"` // TOML Section key
|
||||
Family string `json:"family"`
|
||||
Release string `json:"release"`
|
||||
Container Container `json:"container"`
|
||||
Platform Platform `json:"platform"`
|
||||
IPv4Addrs []string `json:"ipv4Addrs,omitempty"` // only global unicast address (https://golang.org/pkg/net/#IP.IsGlobalUnicast)
|
||||
IPv6Addrs []string `json:"ipv6Addrs,omitempty"` // only global unicast address (https://golang.org/pkg/net/#IP.IsGlobalUnicast)
|
||||
IPSIdentifiers map[config.IPS]string `json:"ipsIdentifiers,omitempty"`
|
||||
ScannedAt time.Time `json:"scannedAt"`
|
||||
ScanMode string `json:"scanMode"`
|
||||
ScannedVersion string `json:"scannedVersion"`
|
||||
ScannedRevision string `json:"scannedRevision"`
|
||||
ScannedBy string `json:"scannedBy"`
|
||||
ScannedVia string `json:"scannedVia"`
|
||||
ScannedIPv4Addrs []string `json:"scannedIpv4Addrs,omitempty"`
|
||||
ScannedIPv6Addrs []string `json:"scannedIpv6Addrs,omitempty"`
|
||||
ReportedAt time.Time `json:"reportedAt"`
|
||||
ReportedVersion string `json:"reportedVersion"`
|
||||
ReportedRevision string `json:"reportedRevision"`
|
||||
ReportedBy string `json:"reportedBy"`
|
||||
Errors []string `json:"errors"`
|
||||
Warnings []string `json:"warnings"`
|
||||
JSONVersion int `json:"jsonVersion"`
|
||||
Lang string `json:"lang"`
|
||||
ServerUUID string `json:"serverUUID"`
|
||||
ServerName string `json:"serverName"` // TOML Section key
|
||||
Family string `json:"family"`
|
||||
Release string `json:"release"`
|
||||
Container Container `json:"container"`
|
||||
Platform Platform `json:"platform"`
|
||||
IPv4Addrs []string `json:"ipv4Addrs,omitempty"` // only global unicast address (https://golang.org/pkg/net/#IP.IsGlobalUnicast)
|
||||
IPv6Addrs []string `json:"ipv6Addrs,omitempty"` // only global unicast address (https://golang.org/pkg/net/#IP.IsGlobalUnicast)
|
||||
IPSIdentifiers map[string]string `json:"ipsIdentifiers,omitempty"`
|
||||
ScannedAt time.Time `json:"scannedAt"`
|
||||
ScanMode string `json:"scanMode"`
|
||||
ScannedVersion string `json:"scannedVersion"`
|
||||
ScannedRevision string `json:"scannedRevision"`
|
||||
ScannedBy string `json:"scannedBy"`
|
||||
ScannedVia string `json:"scannedVia"`
|
||||
ScannedIPv4Addrs []string `json:"scannedIpv4Addrs,omitempty"`
|
||||
ScannedIPv6Addrs []string `json:"scannedIpv6Addrs,omitempty"`
|
||||
ReportedAt time.Time `json:"reportedAt"`
|
||||
ReportedVersion string `json:"reportedVersion"`
|
||||
ReportedRevision string `json:"reportedRevision"`
|
||||
ReportedBy string `json:"reportedBy"`
|
||||
Errors []string `json:"errors"`
|
||||
Warnings []string `json:"warnings"`
|
||||
|
||||
ScannedCves VulnInfos `json:"scannedCves"`
|
||||
RunningKernel Kernel `json:"runningKernel"`
|
||||
Packages Packages `json:"packages"`
|
||||
SrcPackages SrcPackages `json:",omitempty"`
|
||||
WordPressPackages *WordPressPackages `json:",omitempty"`
|
||||
EnabledDnfModules []string `json:"enabledDnfModules,omitempty"` // for dnf modules
|
||||
WordPressPackages WordPressPackages `json:",omitempty"`
|
||||
LibraryScanners LibraryScanners `json:"libraries,omitempty"`
|
||||
CweDict CweDict `json:"cweDict,omitempty"`
|
||||
Optional map[string]interface{} `json:",omitempty"`
|
||||
@@ -58,13 +60,371 @@ type ScanResult struct {
|
||||
} `json:"config"`
|
||||
}
|
||||
|
||||
// Container has Container information
|
||||
type Container struct {
|
||||
ContainerID string `json:"containerID"`
|
||||
Name string `json:"name"`
|
||||
Image string `json:"image"`
|
||||
Type string `json:"type"`
|
||||
UUID string `json:"uuid"`
|
||||
}
|
||||
|
||||
// Platform has platform information
|
||||
type Platform struct {
|
||||
Name string `json:"name"` // aws or azure or gcp or other...
|
||||
InstanceID string `json:"instanceID"`
|
||||
}
|
||||
|
||||
// Kernel has the Release, version and whether need restart
|
||||
type Kernel struct {
|
||||
Release string `json:"release"`
|
||||
Version string `json:"version"`
|
||||
RebootRequired bool `json:"rebootRequired"`
|
||||
}
|
||||
|
||||
// FilterInactiveWordPressLibs is filter function.
|
||||
func (r *ScanResult) FilterInactiveWordPressLibs(detectInactive bool) {
|
||||
if detectInactive {
|
||||
return
|
||||
}
|
||||
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
if len(v.WpPackageFixStats) == 0 {
|
||||
return true
|
||||
}
|
||||
// Ignore if all libs in this vulnInfo inactive
|
||||
for _, wp := range v.WpPackageFixStats {
|
||||
if p, ok := r.WordPressPackages.Find(wp.Name); ok {
|
||||
if p.Status != Inactive {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
logging.Log.Warnf("Failed to find the WordPress pkg: %+s", wp.Name)
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
r.ScannedCves = filtered
|
||||
return
|
||||
}
|
||||
|
||||
// ReportFileName returns the filename on localhost without extension
|
||||
func (r ScanResult) ReportFileName() (name string) {
|
||||
if r.Container.ContainerID == "" {
|
||||
return fmt.Sprintf("%s", r.ServerName)
|
||||
}
|
||||
return fmt.Sprintf("%s@%s", r.Container.Name, r.ServerName)
|
||||
}
|
||||
|
||||
// ReportKeyName returns the name of key on S3, Azure-Blob without extension
|
||||
func (r ScanResult) ReportKeyName() (name string) {
|
||||
timestr := r.ScannedAt.Format(time.RFC3339)
|
||||
if r.Container.ContainerID == "" {
|
||||
return fmt.Sprintf("%s/%s", timestr, r.ServerName)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s@%s", timestr, r.Container.Name, r.ServerName)
|
||||
}
|
||||
|
||||
// ServerInfo returns server name one line
|
||||
func (r ScanResult) ServerInfo() string {
|
||||
if r.Container.ContainerID == "" {
|
||||
return fmt.Sprintf("%s (%s%s)",
|
||||
r.FormatServerName(), r.Family, r.Release)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s (%s%s) on %s",
|
||||
r.FormatServerName(),
|
||||
r.Family,
|
||||
r.Release,
|
||||
r.ServerName,
|
||||
)
|
||||
}
|
||||
|
||||
// ServerInfoTui returns server information for TUI sidebar
|
||||
func (r ScanResult) ServerInfoTui() string {
|
||||
if r.Container.ContainerID == "" {
|
||||
line := fmt.Sprintf("%s (%s%s)",
|
||||
r.ServerName, r.Family, r.Release)
|
||||
if len(r.Warnings) != 0 {
|
||||
line = "[Warn] " + line
|
||||
}
|
||||
if r.RunningKernel.RebootRequired {
|
||||
return "[Reboot] " + line
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
fmtstr := "|-- %s (%s%s)"
|
||||
if r.RunningKernel.RebootRequired {
|
||||
fmtstr = "|-- [Reboot] %s (%s%s)"
|
||||
}
|
||||
return fmt.Sprintf(fmtstr, r.Container.Name, r.Family, r.Release)
|
||||
}
|
||||
|
||||
// FormatServerName returns server and container name
|
||||
func (r ScanResult) FormatServerName() (name string) {
|
||||
if r.Container.ContainerID == "" {
|
||||
name = r.ServerName
|
||||
} else {
|
||||
name = fmt.Sprintf("%s@%s",
|
||||
r.Container.Name, r.ServerName)
|
||||
}
|
||||
if r.RunningKernel.RebootRequired {
|
||||
name = "[Reboot Required] " + name
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FormatTextReportHeader returns header of text report
|
||||
func (r ScanResult) FormatTextReportHeader() string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(r.ServerInfo()); i++ {
|
||||
buf.WriteString("=")
|
||||
}
|
||||
|
||||
pkgs := r.FormatUpdatablePkgsSummary()
|
||||
if 0 < len(r.WordPressPackages) {
|
||||
pkgs = fmt.Sprintf("%s, %d WordPress pkgs", pkgs, len(r.WordPressPackages))
|
||||
}
|
||||
if 0 < len(r.LibraryScanners) {
|
||||
pkgs = fmt.Sprintf("%s, %d libs", pkgs, r.LibraryScanners.Total())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s\n%s, %s, %s, %s\n%s\n",
|
||||
r.ServerInfo(),
|
||||
buf.String(),
|
||||
r.ScannedCves.FormatCveSummary(),
|
||||
r.ScannedCves.FormatFixedStatus(r.Packages),
|
||||
r.FormatExploitCveSummary(),
|
||||
r.FormatMetasploitCveSummary(),
|
||||
r.FormatAlertSummary(),
|
||||
pkgs)
|
||||
}
|
||||
|
||||
// FormatUpdatablePkgsSummary returns a summary of updatable packages
|
||||
func (r ScanResult) FormatUpdatablePkgsSummary() string {
|
||||
mode := r.Config.Scan.Servers[r.ServerName].Mode
|
||||
if !r.isDisplayUpdatableNum(mode) {
|
||||
return fmt.Sprintf("%d installed", len(r.Packages))
|
||||
}
|
||||
|
||||
nUpdatable := 0
|
||||
for _, p := range r.Packages {
|
||||
if p.NewVersion == "" {
|
||||
continue
|
||||
}
|
||||
if p.Version != p.NewVersion || p.Release != p.NewRelease {
|
||||
nUpdatable++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d installed, %d updatable",
|
||||
len(r.Packages),
|
||||
nUpdatable)
|
||||
}
|
||||
|
||||
// FormatExploitCveSummary returns a summary of exploit cve
|
||||
func (r ScanResult) FormatExploitCveSummary() string {
|
||||
nExploitCve := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if 0 < len(vuln.Exploits) {
|
||||
nExploitCve++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d poc", nExploitCve)
|
||||
}
|
||||
|
||||
// FormatMetasploitCveSummary returns a summary of exploit cve
|
||||
func (r ScanResult) FormatMetasploitCveSummary() string {
|
||||
nMetasploitCve := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if 0 < len(vuln.Metasploits) {
|
||||
nMetasploitCve++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d exploits", nMetasploitCve)
|
||||
}
|
||||
|
||||
// FormatAlertSummary returns a summary of CERT alerts
|
||||
func (r ScanResult) FormatAlertSummary() string {
|
||||
jaCnt := 0
|
||||
enCnt := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if len(vuln.AlertDict.En) > 0 {
|
||||
enCnt += len(vuln.AlertDict.En)
|
||||
}
|
||||
if len(vuln.AlertDict.Ja) > 0 {
|
||||
jaCnt += len(vuln.AlertDict.Ja)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("en: %d, ja: %d alerts", enCnt, jaCnt)
|
||||
}
|
||||
|
||||
func (r ScanResult) isDisplayUpdatableNum(mode config.ScanMode) bool {
|
||||
if r.Family == constant.FreeBSD {
|
||||
return false
|
||||
}
|
||||
|
||||
if mode.IsOffline() {
|
||||
return false
|
||||
}
|
||||
if mode.IsFastRoot() || mode.IsDeep() {
|
||||
return true
|
||||
}
|
||||
if mode.IsFast() {
|
||||
switch r.Family {
|
||||
case constant.RedHat,
|
||||
constant.Oracle,
|
||||
constant.Debian,
|
||||
constant.Ubuntu,
|
||||
constant.Raspbian:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsContainer returns whether this ServerInfo is about container
|
||||
func (r ScanResult) IsContainer() bool {
|
||||
return 0 < len(r.Container.ContainerID)
|
||||
}
|
||||
|
||||
// RemoveRaspbianPackFromResult is for Raspberry Pi and removes the Raspberry Pi dedicated package from ScanResult.
|
||||
func (r ScanResult) RemoveRaspbianPackFromResult() ScanResult {
|
||||
if r.Family != constant.Raspbian {
|
||||
return r
|
||||
}
|
||||
|
||||
result := r
|
||||
packs := make(Packages)
|
||||
for _, pack := range r.Packages {
|
||||
if !IsRaspbianPackage(pack.Name, pack.Version) {
|
||||
packs[pack.Name] = pack
|
||||
}
|
||||
}
|
||||
srcPacks := make(SrcPackages)
|
||||
for _, pack := range r.SrcPackages {
|
||||
if !IsRaspbianPackage(pack.Name, pack.Version) {
|
||||
srcPacks[pack.Name] = pack
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
result.Packages = packs
|
||||
result.SrcPackages = srcPacks
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ClearFields clears a given fields of ScanResult
|
||||
func (r ScanResult) ClearFields(targetTagNames []string) ScanResult {
|
||||
if len(targetTagNames) == 0 {
|
||||
return r
|
||||
}
|
||||
target := map[string]bool{}
|
||||
for _, n := range targetTagNames {
|
||||
target[strings.ToLower(n)] = true
|
||||
}
|
||||
t := reflect.ValueOf(r).Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
jsonValue := strings.Split(f.Tag.Get("json"), ",")[0]
|
||||
if ok := target[strings.ToLower(jsonValue)]; ok {
|
||||
vv := reflect.New(f.Type).Elem().Interface()
|
||||
reflect.ValueOf(&r).Elem().FieldByName(f.Name).Set(reflect.ValueOf(vv))
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// CheckEOL checks the EndOfLife of the OS
|
||||
func (r *ScanResult) CheckEOL() {
|
||||
switch r.Family {
|
||||
case constant.ServerTypePseudo, constant.Raspbian:
|
||||
return
|
||||
}
|
||||
|
||||
eol, found := config.GetEOL(r.Family, r.Release)
|
||||
if !found {
|
||||
r.Warnings = append(r.Warnings,
|
||||
fmt.Sprintf("Failed to check EOL. Register the issue to https://github.com/future-architect/vuls/issues with the information in `Family: %s Release: %s`",
|
||||
r.Family, r.Release))
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
if eol.IsStandardSupportEnded(now) {
|
||||
r.Warnings = append(r.Warnings, "Standard OS support is EOL(End-of-Life). Purchase extended support if available or Upgrading your OS is strongly recommended.")
|
||||
if eol.ExtendedSupportUntil.IsZero() {
|
||||
return
|
||||
}
|
||||
if !eol.IsExtendedSuppportEnded(now) {
|
||||
r.Warnings = append(r.Warnings,
|
||||
fmt.Sprintf("Extended support available until %s. Check the vendor site.",
|
||||
eol.ExtendedSupportUntil.Format("2006-01-02")))
|
||||
} else {
|
||||
r.Warnings = append(r.Warnings,
|
||||
"Extended support is also EOL. There are many Vulnerabilities that are not detected, Upgrading your OS strongly recommended.")
|
||||
}
|
||||
} else if !eol.StandardSupportUntil.IsZero() &&
|
||||
now.AddDate(0, 3, 0).After(eol.StandardSupportUntil) {
|
||||
r.Warnings = append(r.Warnings,
|
||||
fmt.Sprintf("Standard OS support will be end in 3 months. EOL date: %s",
|
||||
eol.StandardSupportUntil.Format("2006-01-02")))
|
||||
}
|
||||
}
|
||||
|
||||
// SortForJSONOutput sort list elements in the ScanResult to diff in integration-test
|
||||
func (r *ScanResult) SortForJSONOutput() {
|
||||
for k, v := range r.Packages {
|
||||
sort.SliceStable(v.AffectedProcs, func(i, j int) bool {
|
||||
return v.AffectedProcs[i].PID < v.AffectedProcs[j].PID
|
||||
})
|
||||
sort.SliceStable(v.NeedRestartProcs, func(i, j int) bool {
|
||||
return v.NeedRestartProcs[i].PID < v.NeedRestartProcs[j].PID
|
||||
})
|
||||
r.Packages[k] = v
|
||||
}
|
||||
|
||||
for k, v := range r.ScannedCves {
|
||||
sort.SliceStable(v.AffectedPackages, func(i, j int) bool {
|
||||
return v.AffectedPackages[i].Name < v.AffectedPackages[j].Name
|
||||
})
|
||||
sort.SliceStable(v.DistroAdvisories, func(i, j int) bool {
|
||||
return v.DistroAdvisories[i].AdvisoryID < v.DistroAdvisories[j].AdvisoryID
|
||||
})
|
||||
sort.SliceStable(v.Exploits, func(i, j int) bool {
|
||||
return v.Exploits[i].ID < v.Exploits[j].ID
|
||||
})
|
||||
sort.SliceStable(v.Metasploits, func(i, j int) bool {
|
||||
return v.Metasploits[i].Name < v.Metasploits[j].Name
|
||||
})
|
||||
for kk, vv := range v.CveContents {
|
||||
sort.SliceStable(vv.References, func(i, j int) bool {
|
||||
return vv.References[i].Link < vv.References[j].Link
|
||||
})
|
||||
v.CveContents[kk] = vv
|
||||
}
|
||||
|
||||
sort.SliceStable(v.AlertDict.En, func(i, j int) bool {
|
||||
return v.AlertDict.En[i].Title < v.AlertDict.En[j].Title
|
||||
})
|
||||
sort.SliceStable(v.AlertDict.Ja, func(i, j int) bool {
|
||||
return v.AlertDict.Ja[i].Title < v.AlertDict.Ja[j].Title
|
||||
})
|
||||
|
||||
r.ScannedCves[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// CweDict is a dictionary for CWE
|
||||
type CweDict map[string]CweDictEntry
|
||||
|
||||
// Get the name, url, top10URL for the specified cweID, lang
|
||||
func (c CweDict) Get(cweID, lang string) (name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL string) {
|
||||
cweNum := strings.TrimPrefix(cweID, "CWE-")
|
||||
switch config.Conf.Lang {
|
||||
switch lang {
|
||||
case "ja":
|
||||
if dict, ok := c[cweNum]; ok && dict.OwaspTopTen2017 != "" {
|
||||
top10Rank = dict.OwaspTopTen2017
|
||||
@@ -116,412 +476,3 @@ type CweDictEntry struct {
|
||||
CweTopTwentyfive2019 string `json:"cweTopTwentyfive2019"`
|
||||
SansTopTwentyfive string `json:"sansTopTwentyfive"`
|
||||
}
|
||||
|
||||
// Kernel has the Release, version and whether need restart
|
||||
type Kernel struct {
|
||||
Release string `json:"release"`
|
||||
Version string `json:"version"`
|
||||
RebootRequired bool `json:"rebootRequired"`
|
||||
}
|
||||
|
||||
// FilterByCvssOver is filter function.
|
||||
func (r ScanResult) FilterByCvssOver(over float64) ScanResult {
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
v2Max := v.MaxCvss2Score()
|
||||
v3Max := v.MaxCvss3Score()
|
||||
max := v2Max.Value.Score
|
||||
if max < v3Max.Value.Score {
|
||||
max = v3Max.Value.Score
|
||||
}
|
||||
if over <= max {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
r.ScannedCves = filtered
|
||||
return r
|
||||
}
|
||||
|
||||
// FilterIgnoreCves is filter function.
|
||||
func (r ScanResult) FilterIgnoreCves() ScanResult {
|
||||
|
||||
ignoreCves := []string{}
|
||||
if len(r.Container.Name) == 0 {
|
||||
ignoreCves = config.Conf.Servers[r.ServerName].IgnoreCves
|
||||
} else {
|
||||
if s, ok := config.Conf.Servers[r.ServerName]; ok {
|
||||
if con, ok := s.Containers[r.Container.Name]; ok {
|
||||
ignoreCves = con.IgnoreCves
|
||||
} else {
|
||||
return r
|
||||
}
|
||||
} else {
|
||||
util.Log.Errorf("%s is not found in config.toml",
|
||||
r.ServerName)
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
for _, c := range ignoreCves {
|
||||
if v.CveID == c {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
r.ScannedCves = filtered
|
||||
return r
|
||||
}
|
||||
|
||||
// FilterUnfixed is filter function.
|
||||
func (r ScanResult) FilterUnfixed() ScanResult {
|
||||
if !config.Conf.IgnoreUnfixed {
|
||||
return r
|
||||
}
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
// Report cves detected by CPE because Vuls can't know 'fixed' or 'unfixed'
|
||||
if len(v.CpeURIs) != 0 {
|
||||
return true
|
||||
}
|
||||
NotFixedAll := true
|
||||
for _, p := range v.AffectedPackages {
|
||||
NotFixedAll = NotFixedAll && p.NotFixedYet
|
||||
}
|
||||
return !NotFixedAll
|
||||
})
|
||||
r.ScannedCves = filtered
|
||||
return r
|
||||
}
|
||||
|
||||
// FilterIgnorePkgs is filter function.
|
||||
func (r ScanResult) FilterIgnorePkgs() ScanResult {
|
||||
var ignorePkgsRegexps []string
|
||||
if len(r.Container.Name) == 0 {
|
||||
ignorePkgsRegexps = config.Conf.Servers[r.ServerName].IgnorePkgsRegexp
|
||||
} else {
|
||||
if s, ok := config.Conf.Servers[r.ServerName]; ok {
|
||||
if con, ok := s.Containers[r.Container.Name]; ok {
|
||||
ignorePkgsRegexps = con.IgnorePkgsRegexp
|
||||
} else {
|
||||
return r
|
||||
}
|
||||
} else {
|
||||
util.Log.Errorf("%s is not found in config.toml",
|
||||
r.ServerName)
|
||||
return r
|
||||
}
|
||||
}
|
||||
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, pkgRegexp := range ignorePkgsRegexps {
|
||||
re, err := regexp.Compile(pkgRegexp)
|
||||
if err != nil {
|
||||
util.Log.Errorf("Failed to parse %s. err: %+v", pkgRegexp, err)
|
||||
continue
|
||||
} else {
|
||||
regexps = append(regexps, re)
|
||||
}
|
||||
}
|
||||
if len(regexps) == 0 {
|
||||
return r
|
||||
}
|
||||
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
if len(v.AffectedPackages) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, p := range v.AffectedPackages {
|
||||
match := false
|
||||
for _, re := range regexps {
|
||||
if re.MatchString(p.Name) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
|
||||
r.ScannedCves = filtered
|
||||
return r
|
||||
}
|
||||
|
||||
// FilterInactiveWordPressLibs is filter function.
|
||||
func (r ScanResult) FilterInactiveWordPressLibs() ScanResult {
|
||||
if !config.Conf.Servers[r.ServerName].WordPress.IgnoreInactive {
|
||||
return r
|
||||
}
|
||||
|
||||
filtered := r.ScannedCves.Find(func(v VulnInfo) bool {
|
||||
if len(v.WpPackageFixStats) == 0 {
|
||||
return true
|
||||
}
|
||||
// Ignore if all libs in this vulnInfo inactive
|
||||
for _, wp := range v.WpPackageFixStats {
|
||||
if p, ok := r.WordPressPackages.Find(wp.Name); ok {
|
||||
if p.Status != Inactive {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
r.ScannedCves = filtered
|
||||
return r
|
||||
}
|
||||
|
||||
// ReportFileName returns the filename on localhost without extension
|
||||
func (r ScanResult) ReportFileName() (name string) {
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
return fmt.Sprintf("%s", r.ServerName)
|
||||
}
|
||||
return fmt.Sprintf("%s@%s", r.Container.Name, r.ServerName)
|
||||
}
|
||||
|
||||
// ReportKeyName returns the name of key on S3, Azure-Blob without extension
|
||||
func (r ScanResult) ReportKeyName() (name string) {
|
||||
timestr := r.ScannedAt.Format(time.RFC3339)
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
return fmt.Sprintf("%s/%s", timestr, r.ServerName)
|
||||
}
|
||||
return fmt.Sprintf("%s/%s@%s", timestr, r.Container.Name, r.ServerName)
|
||||
}
|
||||
|
||||
// ServerInfo returns server name one line
|
||||
func (r ScanResult) ServerInfo() string {
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
return fmt.Sprintf("%s (%s%s)",
|
||||
r.FormatServerName(), r.Family, r.Release)
|
||||
}
|
||||
return fmt.Sprintf(
|
||||
"%s (%s%s) on %s",
|
||||
r.FormatServerName(),
|
||||
r.Family,
|
||||
r.Release,
|
||||
r.ServerName,
|
||||
)
|
||||
}
|
||||
|
||||
// ServerInfoTui returns server information for TUI sidebar
|
||||
func (r ScanResult) ServerInfoTui() string {
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
line := fmt.Sprintf("%s (%s%s)",
|
||||
r.ServerName, r.Family, r.Release)
|
||||
if len(r.Warnings) != 0 {
|
||||
line = "[Warn] " + line
|
||||
}
|
||||
if r.RunningKernel.RebootRequired {
|
||||
return "[Reboot] " + line
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
fmtstr := "|-- %s (%s%s)"
|
||||
if r.RunningKernel.RebootRequired {
|
||||
fmtstr = "|-- [Reboot] %s (%s%s)"
|
||||
}
|
||||
return fmt.Sprintf(fmtstr, r.Container.Name, r.Family, r.Release)
|
||||
}
|
||||
|
||||
// FormatServerName returns server and container name
|
||||
func (r ScanResult) FormatServerName() (name string) {
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
name = r.ServerName
|
||||
} else {
|
||||
name = fmt.Sprintf("%s@%s",
|
||||
r.Container.Name, r.ServerName)
|
||||
}
|
||||
if r.RunningKernel.RebootRequired {
|
||||
name = "[Reboot Required] " + name
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// FormatTextReportHeader returns header of text report
|
||||
func (r ScanResult) FormatTextReportHeader() string {
|
||||
var buf bytes.Buffer
|
||||
for i := 0; i < len(r.ServerInfo()); i++ {
|
||||
buf.WriteString("=")
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s\n%s\n%s, %s, %s, %s, %s, %s\n",
|
||||
r.ServerInfo(),
|
||||
buf.String(),
|
||||
r.ScannedCves.FormatCveSummary(),
|
||||
r.ScannedCves.FormatFixedStatus(r.Packages),
|
||||
r.FormatUpdatablePacksSummary(),
|
||||
r.FormatExploitCveSummary(),
|
||||
r.FormatMetasploitCveSummary(),
|
||||
r.FormatAlertSummary(),
|
||||
)
|
||||
}
|
||||
|
||||
// FormatUpdatablePacksSummary returns a summary of updatable packages
|
||||
func (r ScanResult) FormatUpdatablePacksSummary() string {
|
||||
if !r.isDisplayUpdatableNum() {
|
||||
return fmt.Sprintf("%d installed", len(r.Packages))
|
||||
}
|
||||
|
||||
nUpdatable := 0
|
||||
for _, p := range r.Packages {
|
||||
if p.NewVersion == "" {
|
||||
continue
|
||||
}
|
||||
if p.Version != p.NewVersion || p.Release != p.NewRelease {
|
||||
nUpdatable++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d installed, %d updatable",
|
||||
len(r.Packages),
|
||||
nUpdatable)
|
||||
}
|
||||
|
||||
// FormatExploitCveSummary returns a summary of exploit cve
|
||||
func (r ScanResult) FormatExploitCveSummary() string {
|
||||
nExploitCve := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if 0 < len(vuln.Exploits) {
|
||||
nExploitCve++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d exploits", nExploitCve)
|
||||
}
|
||||
|
||||
// FormatMetasploitCveSummary returns a summary of exploit cve
|
||||
func (r ScanResult) FormatMetasploitCveSummary() string {
|
||||
nMetasploitCve := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if 0 < len(vuln.Metasploits) {
|
||||
nMetasploitCve++
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("%d modules", nMetasploitCve)
|
||||
}
|
||||
|
||||
// FormatAlertSummary returns a summary of CERT alerts
|
||||
func (r ScanResult) FormatAlertSummary() string {
|
||||
jaCnt := 0
|
||||
enCnt := 0
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if len(vuln.AlertDict.En) > 0 {
|
||||
enCnt += len(vuln.AlertDict.En)
|
||||
}
|
||||
if len(vuln.AlertDict.Ja) > 0 {
|
||||
jaCnt += len(vuln.AlertDict.Ja)
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("en: %d, ja: %d alerts", enCnt, jaCnt)
|
||||
}
|
||||
|
||||
func (r ScanResult) isDisplayUpdatableNum() bool {
|
||||
if r.Family == config.FreeBSD {
|
||||
return false
|
||||
}
|
||||
|
||||
var mode config.ScanMode
|
||||
s, _ := config.Conf.Servers[r.ServerName]
|
||||
mode = s.Mode
|
||||
|
||||
if mode.IsOffline() {
|
||||
return false
|
||||
}
|
||||
if mode.IsFastRoot() || mode.IsDeep() {
|
||||
return true
|
||||
}
|
||||
if mode.IsFast() {
|
||||
switch r.Family {
|
||||
case config.RedHat,
|
||||
config.Oracle,
|
||||
config.Debian,
|
||||
config.Ubuntu,
|
||||
config.Raspbian:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsContainer returns whether this ServerInfo is about container
|
||||
func (r ScanResult) IsContainer() bool {
|
||||
return 0 < len(r.Container.ContainerID)
|
||||
}
|
||||
|
||||
// IsDeepScanMode checks if the scan mode is deep scan mode.
|
||||
func (r ScanResult) IsDeepScanMode() bool {
|
||||
for _, s := range r.Config.Scan.Servers {
|
||||
for _, m := range s.ScanMode {
|
||||
if m == "deep" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Container has Container information
|
||||
type Container struct {
|
||||
ContainerID string `json:"containerID"`
|
||||
Name string `json:"name"`
|
||||
Image string `json:"image"`
|
||||
Type string `json:"type"`
|
||||
UUID string `json:"uuid"`
|
||||
}
|
||||
|
||||
// Platform has platform information
|
||||
type Platform struct {
|
||||
Name string `json:"name"` // aws or azure or gcp or other...
|
||||
InstanceID string `json:"instanceID"`
|
||||
}
|
||||
|
||||
// RemoveRaspbianPackFromResult is for Raspberry Pi and removes the Raspberry Pi dedicated package from ScanResult.
|
||||
func (r ScanResult) RemoveRaspbianPackFromResult() ScanResult {
|
||||
if r.Family != config.Raspbian {
|
||||
return r
|
||||
}
|
||||
|
||||
result := r
|
||||
packs := make(Packages)
|
||||
for _, pack := range r.Packages {
|
||||
if !IsRaspbianPackage(pack.Name, pack.Version) {
|
||||
packs[pack.Name] = pack
|
||||
}
|
||||
}
|
||||
srcPacks := make(SrcPackages)
|
||||
for _, pack := range r.SrcPackages {
|
||||
if !IsRaspbianPackage(pack.Name, pack.Version) {
|
||||
srcPacks[pack.Name] = pack
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
result.Packages = packs
|
||||
result.SrcPackages = srcPacks
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// ClearFields clears a given fields of ScanResult
|
||||
func (r ScanResult) ClearFields(targetTagNames []string) ScanResult {
|
||||
if len(targetTagNames) == 0 {
|
||||
return r
|
||||
}
|
||||
target := map[string]bool{}
|
||||
for _, n := range targetTagNames {
|
||||
target[strings.ToLower(n)] = true
|
||||
}
|
||||
t := reflect.ValueOf(r).Type()
|
||||
for i := 0; i < t.NumField(); i++ {
|
||||
f := t.Field(i)
|
||||
jsonValue := strings.Split(f.Tag.Get("json"), ",")[0]
|
||||
if ok := target[strings.ToLower(jsonValue)]; ok {
|
||||
vv := reflect.New(f.Type).Elem().Interface()
|
||||
reflect.ValueOf(&r).Elem().FieldByName(f.Name).Set(reflect.ValueOf(vv))
|
||||
}
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
@@ -3,635 +3,11 @@ package models
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/k0kubun/pp"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
func TestFilterByCvssOver(t *testing.T) {
|
||||
type in struct {
|
||||
over float64
|
||||
rs ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: in{
|
||||
over: 7.0,
|
||||
rs: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Score: 7.1,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
CveContent{
|
||||
Type: Jvn,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 7.2,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Score: 7.1,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
CveContent{
|
||||
Type: Jvn,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 7.2,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// OVAL Severity
|
||||
{
|
||||
in: in{
|
||||
over: 7.0,
|
||||
rs: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Ubuntu,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Severity: "HIGH",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: RedHat,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss2Severity: "CRITICAL",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Oracle,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Severity: "IMPORTANT",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Ubuntu,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Severity: "HIGH",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: RedHat,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss2Severity: "CRITICAL",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Oracle,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Severity: "IMPORTANT",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
actual := tt.in.rs.FilterByCvssOver(tt.in.over)
|
||||
for k := range tt.out.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestFilterIgnoreCveIDs(t *testing.T) {
|
||||
type in struct {
|
||||
cves []string
|
||||
rs ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: in{
|
||||
cves: []string{"CVE-2017-0002"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
config.Conf.Servers = map[string]config.ServerInfo{
|
||||
"name": {IgnoreCves: tt.in.cves},
|
||||
}
|
||||
actual := tt.in.rs.FilterIgnoreCves()
|
||||
for k := range tt.out.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
for k := range actual.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterIgnoreCveIDsContainer(t *testing.T) {
|
||||
type in struct {
|
||||
cves []string
|
||||
rs ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: in{
|
||||
cves: []string{"CVE-2017-0002"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
config.Conf.Servers = map[string]config.ServerInfo{
|
||||
"name": {
|
||||
Containers: map[string]config.ContainerSetting{
|
||||
"dockerA": {
|
||||
IgnoreCves: tt.in.cves,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
actual := tt.in.rs.FilterIgnoreCves()
|
||||
for k := range tt.out.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
for k := range actual.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterUnfixed(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in ScanResult
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "a",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "b",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "c",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "b",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "c",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
config.Conf.IgnoreUnfixed = true
|
||||
actual := tt.in.FilterUnfixed()
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves, actual.ScannedCves) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves)
|
||||
a := pp.Sprintf("%v", actual.ScannedCves)
|
||||
t.Errorf("[%d] expected: %v\n actual: %v\n", i, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterIgnorePkgs(t *testing.T) {
|
||||
type in struct {
|
||||
ignorePkgsRegexp []string
|
||||
rs ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel", "^vim", "^bind"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
ScannedCves: VulnInfos{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
config.Conf.Servers = map[string]config.ServerInfo{
|
||||
"name": {IgnorePkgsRegexp: tt.in.ignorePkgsRegexp},
|
||||
}
|
||||
actual := tt.in.rs.FilterIgnorePkgs()
|
||||
for k := range tt.out.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
for k := range actual.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterIgnorePkgsContainer(t *testing.T) {
|
||||
type in struct {
|
||||
ignorePkgsRegexp []string
|
||||
rs ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
out ScanResult
|
||||
}{
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
ignorePkgsRegexp: []string{"^kernel", "^vim", "^bind"},
|
||||
rs: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: ScanResult{
|
||||
ServerName: "name",
|
||||
Container: Container{Name: "dockerA"},
|
||||
ScannedCves: VulnInfos{},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
config.Conf.Servers = map[string]config.ServerInfo{
|
||||
"name": {
|
||||
Containers: map[string]config.ContainerSetting{
|
||||
"dockerA": {
|
||||
IgnorePkgsRegexp: tt.in.ignorePkgsRegexp,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
actual := tt.in.rs.FilterIgnorePkgs()
|
||||
for k := range tt.out.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
for k := range actual.ScannedCves {
|
||||
if !reflect.DeepEqual(tt.out.ScannedCves[k], actual.ScannedCves[k]) {
|
||||
o := pp.Sprintf("%v", tt.out.ScannedCves[k])
|
||||
a := pp.Sprintf("%v", actual.ScannedCves[k])
|
||||
t.Errorf("[%s] expected: %v\n actual: %v\n", k, o, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsDisplayUpdatableNum(t *testing.T) {
|
||||
var tests = []struct {
|
||||
mode []byte
|
||||
@@ -652,52 +28,52 @@ func TestIsDisplayUpdatableNum(t *testing.T) {
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.RedHat,
|
||||
family: constant.RedHat,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Oracle,
|
||||
family: constant.Oracle,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Debian,
|
||||
family: constant.Debian,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Ubuntu,
|
||||
family: constant.Ubuntu,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Raspbian,
|
||||
family: constant.Raspbian,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.CentOS,
|
||||
family: constant.CentOS,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Amazon,
|
||||
family: constant.Amazon,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.FreeBSD,
|
||||
family: constant.FreeBSD,
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.OpenSUSE,
|
||||
family: constant.OpenSUSE,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: config.Alpine,
|
||||
family: constant.Alpine,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
@@ -707,16 +83,284 @@ func TestIsDisplayUpdatableNum(t *testing.T) {
|
||||
for _, m := range tt.mode {
|
||||
mode.Set(m)
|
||||
}
|
||||
config.Conf.Servers = map[string]config.ServerInfo{
|
||||
"name": {Mode: mode},
|
||||
}
|
||||
r := ScanResult{
|
||||
ServerName: "name",
|
||||
Family: tt.family,
|
||||
}
|
||||
act := r.isDisplayUpdatableNum()
|
||||
act := r.isDisplayUpdatableNum(mode)
|
||||
if tt.expected != act {
|
||||
t.Errorf("[%d] expected %#v, actual %#v", i, tt.expected, act)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanResult_Sort(t *testing.T) {
|
||||
type fields struct {
|
||||
Packages Packages
|
||||
ScannedCves VulnInfos
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
expected fields
|
||||
}{
|
||||
{
|
||||
name: "already asc",
|
||||
fields: fields{
|
||||
Packages: map[string]Package{
|
||||
"pkgA": {
|
||||
Name: "pkgA",
|
||||
AffectedProcs: []AffectedProcess{
|
||||
{PID: "1", Name: "procB"},
|
||||
{PID: "2", Name: "procA"},
|
||||
},
|
||||
NeedRestartProcs: []NeedRestartProcess{
|
||||
{PID: "1"},
|
||||
{PID: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2014-3591": VulnInfo{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
PackageFixStatus{Name: "pkgA"},
|
||||
PackageFixStatus{Name: "pkgB"},
|
||||
},
|
||||
DistroAdvisories: []DistroAdvisory{
|
||||
{AdvisoryID: "adv-1"},
|
||||
{AdvisoryID: "adv-2"},
|
||||
},
|
||||
Exploits: []Exploit{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
},
|
||||
Metasploits: []Metasploit{
|
||||
{Name: "a"},
|
||||
{Name: "b"},
|
||||
},
|
||||
CveContents: CveContents{
|
||||
"nvd": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
"jvn": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
AlertDict: AlertDict{
|
||||
En: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
Ja: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: fields{
|
||||
Packages: map[string]Package{
|
||||
"pkgA": {
|
||||
Name: "pkgA",
|
||||
AffectedProcs: []AffectedProcess{
|
||||
{PID: "1", Name: "procB"},
|
||||
{PID: "2", Name: "procA"},
|
||||
},
|
||||
NeedRestartProcs: []NeedRestartProcess{
|
||||
{PID: "1"},
|
||||
{PID: "2"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2014-3591": VulnInfo{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
PackageFixStatus{Name: "pkgA"},
|
||||
PackageFixStatus{Name: "pkgB"},
|
||||
},
|
||||
DistroAdvisories: []DistroAdvisory{
|
||||
{AdvisoryID: "adv-1"},
|
||||
{AdvisoryID: "adv-2"},
|
||||
},
|
||||
Exploits: []Exploit{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
},
|
||||
Metasploits: []Metasploit{
|
||||
{Name: "a"},
|
||||
{Name: "b"},
|
||||
},
|
||||
CveContents: CveContents{
|
||||
"nvd": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
"jvn": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
AlertDict: AlertDict{
|
||||
En: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
Ja: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort",
|
||||
fields: fields{
|
||||
Packages: map[string]Package{
|
||||
"pkgA": {
|
||||
Name: "pkgA",
|
||||
AffectedProcs: []AffectedProcess{
|
||||
{PID: "2", Name: "procA"},
|
||||
{PID: "1", Name: "procB"},
|
||||
},
|
||||
NeedRestartProcs: []NeedRestartProcess{
|
||||
{PID: "91"},
|
||||
{PID: "90"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2014-3591": VulnInfo{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
PackageFixStatus{Name: "pkgB"},
|
||||
PackageFixStatus{Name: "pkgA"},
|
||||
},
|
||||
DistroAdvisories: []DistroAdvisory{
|
||||
{AdvisoryID: "adv-2"},
|
||||
{AdvisoryID: "adv-1"},
|
||||
},
|
||||
Exploits: []Exploit{
|
||||
{ID: "b"},
|
||||
{ID: "a"},
|
||||
},
|
||||
Metasploits: []Metasploit{
|
||||
{Name: "b"},
|
||||
{Name: "a"},
|
||||
},
|
||||
CveContents: CveContents{
|
||||
"nvd": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "b"},
|
||||
Reference{Link: "a"},
|
||||
},
|
||||
},
|
||||
"jvn": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "b"},
|
||||
Reference{Link: "a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
AlertDict: AlertDict{
|
||||
En: []Alert{
|
||||
{Title: "b"},
|
||||
{Title: "a"},
|
||||
},
|
||||
Ja: []Alert{
|
||||
{Title: "b"},
|
||||
{Title: "a"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: fields{
|
||||
Packages: map[string]Package{
|
||||
"pkgA": {
|
||||
Name: "pkgA",
|
||||
AffectedProcs: []AffectedProcess{
|
||||
{PID: "1", Name: "procB"},
|
||||
{PID: "2", Name: "procA"},
|
||||
},
|
||||
NeedRestartProcs: []NeedRestartProcess{
|
||||
{PID: "90"},
|
||||
{PID: "91"},
|
||||
},
|
||||
},
|
||||
},
|
||||
ScannedCves: VulnInfos{
|
||||
"CVE-2014-3591": VulnInfo{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
PackageFixStatus{Name: "pkgA"},
|
||||
PackageFixStatus{Name: "pkgB"},
|
||||
},
|
||||
DistroAdvisories: []DistroAdvisory{
|
||||
{AdvisoryID: "adv-1"},
|
||||
{AdvisoryID: "adv-2"},
|
||||
},
|
||||
Exploits: []Exploit{
|
||||
{ID: "a"},
|
||||
{ID: "b"},
|
||||
},
|
||||
Metasploits: []Metasploit{
|
||||
{Name: "a"},
|
||||
{Name: "b"},
|
||||
},
|
||||
CveContents: CveContents{
|
||||
"nvd": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
"jvn": CveContent{
|
||||
References: References{
|
||||
Reference{Link: "a"},
|
||||
Reference{Link: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
AlertDict: AlertDict{
|
||||
En: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
Ja: []Alert{
|
||||
{Title: "a"},
|
||||
{Title: "b"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
r := &ScanResult{
|
||||
Packages: tt.fields.Packages,
|
||||
ScannedCves: tt.fields.ScannedCves,
|
||||
}
|
||||
r.SortForJSONOutput()
|
||||
if !reflect.DeepEqual(r.Packages, tt.expected.Packages) {
|
||||
t.Errorf("act %+v, want %+v", r.Packages, tt.expected.Packages)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(r.ScannedCves, tt.expected.ScannedCves) {
|
||||
t.Errorf("act %+v, want %+v", r.ScannedCves, tt.expected.ScannedCves)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,10 +65,14 @@ func ConvertNvdJSONToModel(cveID string, nvd *cvedict.NvdJSON) (*CveContent, []E
|
||||
exploits := []Exploit{}
|
||||
mitigations := []Mitigation{}
|
||||
for _, r := range nvd.References {
|
||||
var tags []string
|
||||
if 0 < len(r.Tags) {
|
||||
tags = strings.Split(r.Tags, ",")
|
||||
}
|
||||
refs = append(refs, Reference{
|
||||
Link: r.Link,
|
||||
Source: r.Source,
|
||||
Tags: strings.Split(r.Tags, ","),
|
||||
Tags: tags,
|
||||
})
|
||||
if strings.Contains(r.Tags, "Exploit") {
|
||||
exploits = append(exploits, Exploit{
|
||||
|
||||
@@ -3,12 +3,13 @@ package models
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
exploitmodels "github.com/mozqnet/go-exploitdb/models"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
exploitmodels "github.com/vulsio/go-exploitdb/models"
|
||||
)
|
||||
|
||||
// VulnInfos has a map of VulnInfo
|
||||
@@ -26,6 +27,81 @@ func (v VulnInfos) Find(f func(VulnInfo) bool) VulnInfos {
|
||||
return filtered
|
||||
}
|
||||
|
||||
// FilterByCvssOver return scored vulnerabilities
|
||||
func (v VulnInfos) FilterByCvssOver(over float64) VulnInfos {
|
||||
return v.Find(func(v VulnInfo) bool {
|
||||
if over <= v.MaxCvssScore().Value.Score {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// FilterIgnoreCves filter function.
|
||||
func (v VulnInfos) FilterIgnoreCves(ignoreCveIDs []string) VulnInfos {
|
||||
return v.Find(func(v VulnInfo) bool {
|
||||
for _, c := range ignoreCveIDs {
|
||||
if v.CveID == c {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// FilterUnfixed filter unfixed CVE-IDs
|
||||
func (v VulnInfos) FilterUnfixed(ignoreUnfixed bool) VulnInfos {
|
||||
if !ignoreUnfixed {
|
||||
return v
|
||||
}
|
||||
return v.Find(func(v VulnInfo) bool {
|
||||
// Report cves detected by CPE because Vuls can't know 'fixed' or 'unfixed'
|
||||
if len(v.CpeURIs) != 0 {
|
||||
return true
|
||||
}
|
||||
NotFixedAll := true
|
||||
for _, p := range v.AffectedPackages {
|
||||
NotFixedAll = NotFixedAll && p.NotFixedYet
|
||||
}
|
||||
return !NotFixedAll
|
||||
})
|
||||
}
|
||||
|
||||
// FilterIgnorePkgs is filter function.
|
||||
func (v VulnInfos) FilterIgnorePkgs(ignorePkgsRegexps []string) VulnInfos {
|
||||
regexps := []*regexp.Regexp{}
|
||||
for _, pkgRegexp := range ignorePkgsRegexps {
|
||||
re, err := regexp.Compile(pkgRegexp)
|
||||
if err != nil {
|
||||
logging.Log.Warnf("Failed to parse %s. err: %+v", pkgRegexp, err)
|
||||
continue
|
||||
} else {
|
||||
regexps = append(regexps, re)
|
||||
}
|
||||
}
|
||||
if len(regexps) == 0 {
|
||||
return v
|
||||
}
|
||||
|
||||
return v.Find(func(v VulnInfo) bool {
|
||||
if len(v.AffectedPackages) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, p := range v.AffectedPackages {
|
||||
match := false
|
||||
for _, re := range regexps {
|
||||
if re.MatchString(p.Name) {
|
||||
match = true
|
||||
}
|
||||
}
|
||||
if !match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
})
|
||||
}
|
||||
|
||||
// FindScoredVulns return scored vulnerabilities
|
||||
func (v VulnInfos) FindScoredVulns() VulnInfos {
|
||||
return v.Find(func(vv VulnInfo) bool {
|
||||
@@ -57,11 +133,13 @@ func (v VulnInfos) ToSortedSlice() (sorted []VulnInfo) {
|
||||
func (v VulnInfos) CountGroupBySeverity() map[string]int {
|
||||
m := map[string]int{}
|
||||
for _, vInfo := range v {
|
||||
score := vInfo.MaxCvss2Score().Value.Score
|
||||
score := vInfo.MaxCvss3Score().Value.Score
|
||||
if score < 0.1 {
|
||||
score = vInfo.MaxCvss3Score().Value.Score
|
||||
score = vInfo.MaxCvss2Score().Value.Score
|
||||
}
|
||||
switch {
|
||||
case 9 <= score:
|
||||
m["Critical"]++
|
||||
case 7.0 <= score:
|
||||
m["High"]++
|
||||
case 4.0 <= score:
|
||||
@@ -78,14 +156,15 @@ func (v VulnInfos) CountGroupBySeverity() map[string]int {
|
||||
// FormatCveSummary summarize the number of CVEs group by CVSSv2 Severity
|
||||
func (v VulnInfos) FormatCveSummary() string {
|
||||
m := v.CountGroupBySeverity()
|
||||
|
||||
if config.Conf.IgnoreUnscoredCves {
|
||||
return fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"], m["High"], m["Medium"], m["Low"])
|
||||
}
|
||||
return fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d ?:%d)",
|
||||
line := fmt.Sprintf("Total: %d (Critical:%d High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
m["Critical"], m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
|
||||
nPlus, nMinus := v.CountDiff()
|
||||
if 0 < nPlus || 0 < nMinus {
|
||||
line = fmt.Sprintf("%s +%d -%d", line, nPlus, nMinus)
|
||||
}
|
||||
return line
|
||||
}
|
||||
|
||||
// FormatFixedStatus summarize the number of cves are fixed.
|
||||
@@ -103,6 +182,18 @@ func (v VulnInfos) FormatFixedStatus(packs Packages) string {
|
||||
return fmt.Sprintf("%d/%d Fixed", fixed, total)
|
||||
}
|
||||
|
||||
// CountDiff counts the number of added/removed CVE-ID
|
||||
func (v VulnInfos) CountDiff() (nPlus int, nMinus int) {
|
||||
for _, vInfo := range v {
|
||||
if vInfo.DiffStatus == DiffPlus {
|
||||
nPlus++
|
||||
} else if vInfo.DiffStatus == DiffMinus {
|
||||
nMinus++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// PackageFixStatuses is a list of PackageStatus
|
||||
type PackageFixStatuses []PackageFixStatus
|
||||
|
||||
@@ -157,8 +248,8 @@ type VulnInfo struct {
|
||||
GitHubSecurityAlerts GitHubSecurityAlerts `json:"gitHubSecurityAlerts,omitempty"`
|
||||
WpPackageFixStats WpPackageFixStats `json:"wpPackageFixStats,omitempty"`
|
||||
LibraryFixedIns LibraryFixedIns `json:"libraryFixedIns,omitempty"`
|
||||
|
||||
VulnType string `json:"vulnType,omitempty"`
|
||||
VulnType string `json:"vulnType,omitempty"`
|
||||
DiffStatus DiffStatus `json:"diffStatus,omitempty"`
|
||||
}
|
||||
|
||||
// Alert has CERT alert information
|
||||
@@ -234,24 +325,47 @@ func (g WpPackages) Add(pkg WpPackage) WpPackages {
|
||||
return append(g, pkg)
|
||||
}
|
||||
|
||||
// DiffStatus keeps a comparison with the previous detection results for this CVE
|
||||
type DiffStatus string
|
||||
|
||||
const (
|
||||
// DiffPlus is newly detected CVE
|
||||
DiffPlus = DiffStatus("+")
|
||||
|
||||
// DiffMinus is resolved CVE
|
||||
DiffMinus = DiffStatus("-")
|
||||
)
|
||||
|
||||
// CveIDDiffFormat format CVE-ID for diff mode
|
||||
func (v VulnInfo) CveIDDiffFormat() string {
|
||||
if v.DiffStatus != "" {
|
||||
return fmt.Sprintf("%s %s", v.DiffStatus, v.CveID)
|
||||
}
|
||||
return fmt.Sprintf("%s", v.CveID)
|
||||
}
|
||||
|
||||
// Titles returns title (TUI)
|
||||
func (v VulnInfo) Titles(lang, myFamily string) (values []CveContentStr) {
|
||||
if lang == "ja" {
|
||||
if cont, found := v.CveContents[Jvn]; found && 0 < len(cont.Title) {
|
||||
if cont, found := v.CveContents[Jvn]; found && cont.Title != "" {
|
||||
values = append(values, CveContentStr{Jvn, cont.Title})
|
||||
}
|
||||
}
|
||||
|
||||
// RedHat API has one line title.
|
||||
if cont, found := v.CveContents[RedHatAPI]; found && 0 < len(cont.Title) {
|
||||
if cont, found := v.CveContents[RedHatAPI]; found && cont.Title != "" {
|
||||
values = append(values, CveContentStr{RedHatAPI, cont.Title})
|
||||
}
|
||||
|
||||
// GitHub security alerts has a title.
|
||||
if cont, found := v.CveContents[GitHub]; found && cont.Title != "" {
|
||||
values = append(values, CveContentStr{GitHub, cont.Title})
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, Nvd, NewCveContentType(myFamily)}
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
// Only JVN has meaningful title. so return first 100 char of summary
|
||||
if cont, found := v.CveContents[ctype]; found && 0 < len(cont.Summary) {
|
||||
if cont, found := v.CveContents[ctype]; found && cont.Summary != "" {
|
||||
summary := strings.Replace(cont.Summary, "\n", " ", -1)
|
||||
values = append(values, CveContentStr{
|
||||
Type: ctype,
|
||||
@@ -279,7 +393,7 @@ func (v VulnInfo) Titles(lang, myFamily string) (values []CveContentStr) {
|
||||
// Summaries returns summaries
|
||||
func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
if lang == "ja" {
|
||||
if cont, found := v.CveContents[Jvn]; found && 0 < len(cont.Summary) {
|
||||
if cont, found := v.CveContents[Jvn]; found && cont.Summary != "" {
|
||||
summary := cont.Title
|
||||
summary += "\n" + strings.Replace(
|
||||
strings.Replace(cont.Summary, "\n", " ", -1), "\r", " ", -1)
|
||||
@@ -287,10 +401,10 @@ func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, NewCveContentType(myFamily), Nvd}
|
||||
order := CveContentTypes{Trivy, NewCveContentType(myFamily), Nvd, GitHub}
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found && 0 < len(cont.Summary) {
|
||||
if cont, found := v.CveContents[ctype]; found && cont.Summary != "" {
|
||||
summary := strings.Replace(cont.Summary, "\n", " ", -1)
|
||||
values = append(values, CveContentStr{
|
||||
Type: ctype,
|
||||
@@ -306,9 +420,9 @@ func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
})
|
||||
}
|
||||
|
||||
if v, ok := v.CveContents[WPVulnDB]; ok {
|
||||
if v, ok := v.CveContents[WpScan]; ok {
|
||||
values = append(values, CveContentStr{
|
||||
Type: "WPVDB",
|
||||
Type: WpScan,
|
||||
Value: v.Title,
|
||||
})
|
||||
}
|
||||
@@ -324,14 +438,11 @@ func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
|
||||
// Cvss2Scores returns CVSS V2 Scores
|
||||
func (v VulnInfo) Cvss2Scores(myFamily string) (values []CveContentCvss) {
|
||||
order := []CveContentType{Nvd, RedHatAPI, RedHat, Jvn}
|
||||
if myFamily != config.RedHat && myFamily != config.CentOS {
|
||||
order = append(order, NewCveContentType(myFamily))
|
||||
}
|
||||
func (v VulnInfo) Cvss2Scores() (values []CveContentCvss) {
|
||||
order := []CveContentType{RedHatAPI, RedHat, Nvd, Jvn}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found {
|
||||
if cont.Cvss2Score == 0 || cont.Cvss2Severity == "" {
|
||||
if cont.Cvss2Score == 0 && cont.Cvss2Severity == "" {
|
||||
continue
|
||||
}
|
||||
// https://nvd.nist.gov/vuln-metrics/cvss
|
||||
@@ -346,52 +457,17 @@ func (v VulnInfo) Cvss2Scores(myFamily string) (values []CveContentCvss) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
for _, adv := range v.DistroAdvisories {
|
||||
if adv.Severity != "" {
|
||||
values = append(values, CveContentCvss{
|
||||
Type: "Advisory",
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: severityToV2ScoreRoughly(adv.Severity),
|
||||
CalculatedBySeverity: true,
|
||||
Vector: "-",
|
||||
Severity: strings.ToUpper(adv.Severity),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// An OVAL entry in Ubuntu and Debian has only severity (CVSS score isn't included).
|
||||
// Show severity and dummy score calculated roughly.
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found &&
|
||||
cont.Cvss2Score == 0 &&
|
||||
cont.Cvss3Score == 0 &&
|
||||
cont.Cvss2Severity != "" {
|
||||
|
||||
values = append(values, CveContentCvss{
|
||||
Type: cont.Type,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: severityToV2ScoreRoughly(cont.Cvss2Severity),
|
||||
CalculatedBySeverity: true,
|
||||
Vector: "-",
|
||||
Severity: strings.ToUpper(cont.Cvss2Severity),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Cvss3Scores returns CVSS V3 Score
|
||||
func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
order := []CveContentType{Nvd, RedHatAPI, RedHat, Jvn}
|
||||
order := []CveContentType{RedHatAPI, RedHat, Nvd, Jvn}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found {
|
||||
if cont.Cvss3Score == 0 && cont.Cvss3Severity == "" {
|
||||
continue
|
||||
}
|
||||
// https://nvd.nist.gov/vuln-metrics/cvss
|
||||
values = append(values, CveContentCvss{
|
||||
Type: ctype,
|
||||
@@ -405,131 +481,74 @@ func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
}
|
||||
}
|
||||
|
||||
if cont, found := v.CveContents[Trivy]; found && cont.Cvss3Severity != "" {
|
||||
values = append(values, CveContentCvss{
|
||||
Type: Trivy,
|
||||
Value: Cvss{
|
||||
Type: CVSS3,
|
||||
Score: severityToV2ScoreRoughly(cont.Cvss3Severity),
|
||||
Severity: strings.ToUpper(cont.Cvss3Severity),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// MaxCvss3Score returns Max CVSS V3 Score
|
||||
func (v VulnInfo) MaxCvss3Score() CveContentCvss {
|
||||
order := []CveContentType{Nvd, RedHat, RedHatAPI, Jvn}
|
||||
max := 0.0
|
||||
value := CveContentCvss{
|
||||
Type: Unknown,
|
||||
Value: Cvss{Type: CVSS3},
|
||||
}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found && max < cont.Cvss3Score {
|
||||
// https://nvd.nist.gov/vuln-metrics/cvss
|
||||
value = CveContentCvss{
|
||||
for _, ctype := range []CveContentType{Debian, DebianSecurityTracker, Ubuntu, Amazon, Trivy, GitHub, WpScan} {
|
||||
if cont, found := v.CveContents[ctype]; found && cont.Cvss3Severity != "" {
|
||||
values = append(values, CveContentCvss{
|
||||
Type: ctype,
|
||||
Value: Cvss{
|
||||
Type: CVSS3,
|
||||
Score: cont.Cvss3Score,
|
||||
Vector: cont.Cvss3Vector,
|
||||
Severity: strings.ToUpper(cont.Cvss3Severity),
|
||||
Type: CVSS3,
|
||||
Score: severityToCvssScoreRoughly(cont.Cvss3Severity),
|
||||
CalculatedBySeverity: true,
|
||||
Severity: strings.ToUpper(cont.Cvss3Severity),
|
||||
},
|
||||
}
|
||||
max = cont.Cvss3Score
|
||||
})
|
||||
}
|
||||
}
|
||||
return value
|
||||
|
||||
// Memo: Only RedHat, Oracle and Amazon has severity data in advisory.
|
||||
for _, adv := range v.DistroAdvisories {
|
||||
if adv.Severity != "" {
|
||||
score := severityToCvssScoreRoughly(adv.Severity)
|
||||
values = append(values, CveContentCvss{
|
||||
Type: "Vendor",
|
||||
Value: Cvss{
|
||||
Type: CVSS3,
|
||||
Score: score,
|
||||
CalculatedBySeverity: true,
|
||||
Severity: strings.ToUpper(adv.Severity),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// MaxCvssScore returns max CVSS Score
|
||||
// If there is no CVSS Score, return Severity as a numerical value.
|
||||
func (v VulnInfo) MaxCvssScore() CveContentCvss {
|
||||
v3Max := v.MaxCvss3Score()
|
||||
v2Max := v.MaxCvss2Score()
|
||||
max := v3Max
|
||||
if max.Type == Unknown {
|
||||
return v2Max
|
||||
if v3Max.Type != Unknown {
|
||||
return v3Max
|
||||
}
|
||||
return v.MaxCvss2Score()
|
||||
}
|
||||
|
||||
if max.Value.Score < v2Max.Value.Score && !v2Max.Value.CalculatedBySeverity {
|
||||
max = v2Max
|
||||
// MaxCvss3Score returns Max CVSS V3 Score
|
||||
func (v VulnInfo) MaxCvss3Score() CveContentCvss {
|
||||
max := CveContentCvss{
|
||||
Type: Unknown,
|
||||
Value: Cvss{Type: CVSS3},
|
||||
}
|
||||
for _, cvss := range v.Cvss3Scores() {
|
||||
if max.Value.Score < cvss.Value.Score {
|
||||
max = cvss
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// MaxCvss2Score returns Max CVSS V2 Score
|
||||
func (v VulnInfo) MaxCvss2Score() CveContentCvss {
|
||||
order := []CveContentType{Nvd, RedHat, RedHatAPI, Jvn}
|
||||
max := 0.0
|
||||
value := CveContentCvss{
|
||||
max := CveContentCvss{
|
||||
Type: Unknown,
|
||||
Value: Cvss{Type: CVSS2},
|
||||
}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found && max < cont.Cvss2Score {
|
||||
// https://nvd.nist.gov/vuln-metrics/cvss
|
||||
value = CveContentCvss{
|
||||
Type: ctype,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: cont.Cvss2Score,
|
||||
Vector: cont.Cvss2Vector,
|
||||
Severity: strings.ToUpper(cont.Cvss2Severity),
|
||||
},
|
||||
}
|
||||
max = cont.Cvss2Score
|
||||
for _, cvss := range v.Cvss2Scores() {
|
||||
if max.Value.Score < cvss.Value.Score {
|
||||
max = cvss
|
||||
}
|
||||
}
|
||||
if 0 < max {
|
||||
return value
|
||||
}
|
||||
|
||||
// If CVSS score isn't on NVD, RedHat and JVN, use OVAL and advisory Severity.
|
||||
// Convert severity to cvss score roughly, then returns max severity.
|
||||
// Only Ubuntu, RedHat and Oracle have severity data in OVAL.
|
||||
order = []CveContentType{Ubuntu, RedHat, Oracle}
|
||||
for _, ctype := range order {
|
||||
if cont, found := v.CveContents[ctype]; found && 0 < len(cont.Cvss2Severity) {
|
||||
score := severityToV2ScoreRoughly(cont.Cvss2Severity)
|
||||
if max < score {
|
||||
value = CveContentCvss{
|
||||
Type: ctype,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: score,
|
||||
CalculatedBySeverity: true,
|
||||
Vector: cont.Cvss2Vector,
|
||||
Severity: strings.ToUpper(cont.Cvss2Severity),
|
||||
},
|
||||
}
|
||||
}
|
||||
max = score
|
||||
}
|
||||
}
|
||||
|
||||
// Only RedHat, Oracle and Amazon has severity data in advisory.
|
||||
for _, adv := range v.DistroAdvisories {
|
||||
if adv.Severity != "" {
|
||||
score := severityToV2ScoreRoughly(adv.Severity)
|
||||
if max < score {
|
||||
value = CveContentCvss{
|
||||
Type: "Vendor",
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: score,
|
||||
CalculatedBySeverity: true,
|
||||
Vector: "-",
|
||||
Severity: adv.Severity,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return value
|
||||
return max
|
||||
}
|
||||
|
||||
// AttackVector returns attack vector string
|
||||
@@ -613,16 +632,29 @@ type Cvss struct {
|
||||
|
||||
// Format CVSS Score and Vector
|
||||
func (c Cvss) Format() string {
|
||||
if c.Score == 0 || c.Vector == "" {
|
||||
return c.Severity
|
||||
if c.Vector == "" {
|
||||
return fmt.Sprintf("%s %s", c.SeverityToCvssScoreRange(), c.Severity)
|
||||
}
|
||||
switch c.Type {
|
||||
case CVSS2:
|
||||
return fmt.Sprintf("%3.1f/%s %s", c.Score, c.Vector, c.Severity)
|
||||
case CVSS3:
|
||||
return fmt.Sprintf("%3.1f/%s %s", c.Score, c.Vector, c.Severity)
|
||||
return fmt.Sprintf("%3.1f/%s %s", c.Score, c.Vector, c.Severity)
|
||||
}
|
||||
|
||||
// SeverityToCvssScoreRange returns CVSS score range
|
||||
func (c Cvss) SeverityToCvssScoreRange() string {
|
||||
return severityToCvssScoreRange(c.Severity)
|
||||
}
|
||||
|
||||
func severityToCvssScoreRange(severity string) string {
|
||||
switch strings.ToUpper(severity) {
|
||||
case "CRITICAL":
|
||||
return "9.0-10.0"
|
||||
case "IMPORTANT", "HIGH":
|
||||
return "7.0-8.9"
|
||||
case "MODERATE", "MEDIUM":
|
||||
return "4.0-6.9"
|
||||
case "LOW":
|
||||
return "0.1-3.9"
|
||||
}
|
||||
return ""
|
||||
return "None"
|
||||
}
|
||||
|
||||
// Amazon Linux Security Advisory
|
||||
@@ -637,7 +669,7 @@ func (c Cvss) Format() string {
|
||||
// Critical, High, Medium, Low
|
||||
// https://wiki.ubuntu.com/Bugs/Importance
|
||||
// https://people.canonical.com/~ubuntu-security/cve/priority.html
|
||||
func severityToV2ScoreRoughly(severity string) float64 {
|
||||
func severityToCvssScoreRoughly(severity string) float64 {
|
||||
switch strings.ToUpper(severity) {
|
||||
case "CRITICAL":
|
||||
return 10.0
|
||||
@@ -731,14 +763,10 @@ type AlertDict struct {
|
||||
|
||||
// FormatSource returns which source has this alert
|
||||
func (a AlertDict) FormatSource() string {
|
||||
s := []string{}
|
||||
if len(a.En) != 0 {
|
||||
s = append(s, "USCERT")
|
||||
if len(a.En) != 0 || len(a.Ja) != 0 {
|
||||
return "CERT"
|
||||
}
|
||||
if len(a.Ja) != 0 {
|
||||
s = append(s, "JPCERT")
|
||||
}
|
||||
return strings.Join(s, "/")
|
||||
return ""
|
||||
}
|
||||
|
||||
// Confidences is a list of Confidence
|
||||
@@ -810,8 +838,8 @@ const (
|
||||
// GitHubMatchStr is a String representation of GitHubMatch
|
||||
GitHubMatchStr = "GitHubMatch"
|
||||
|
||||
// WPVulnDBMatchStr is a String representation of WordPress VulnDB scanning
|
||||
WPVulnDBMatchStr = "WPVulnDBMatch"
|
||||
// WpScanMatchStr is a String representation of WordPress VulnDB scanning
|
||||
WpScanMatchStr = "WpScanMatch"
|
||||
|
||||
// FailedToGetChangelog is a String representation of FailedToGetChangelog
|
||||
FailedToGetChangelog = "FailedToGetChangelog"
|
||||
@@ -851,6 +879,6 @@ var (
|
||||
// GitHubMatch is a ranking how confident the CVE-ID was detected correctly
|
||||
GitHubMatch = Confidence{97, GitHubMatchStr, 2}
|
||||
|
||||
// WPVulnDBMatch is a ranking how confident the CVE-ID was detected correctly
|
||||
WPVulnDBMatch = Confidence{100, WPVulnDBMatchStr, 0}
|
||||
// WpScanMatch is a ranking how confident the CVE-ID was detected correctly
|
||||
WpScanMatch = Confidence{100, WpScanMatchStr, 0}
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package models
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestTitles(t *testing.T) {
|
||||
@@ -98,10 +99,10 @@ func TestTitles(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.cont.Titles(tt.in.lang, "redhat")
|
||||
if !reflect.DeepEqual(tt.out, actual) {
|
||||
t.Errorf("\nexpected: %v\n actual: %v\n", tt.out, actual)
|
||||
t.Errorf("[%d]\nexpected: %v\n actual: %v\n", i, tt.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -221,11 +222,65 @@ func TestCountGroupBySeverity(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss2Score: 6.0,
|
||||
Cvss3Score: 6.0,
|
||||
},
|
||||
RedHat: {
|
||||
Type: RedHat,
|
||||
Cvss2Score: 7.0,
|
||||
Cvss3Score: 7.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss3Score: 2.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0004": {
|
||||
CveID: "CVE-2017-0004",
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss3Score: 5.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0005": {
|
||||
CveID: "CVE-2017-0005",
|
||||
},
|
||||
"CVE-2017-0006": {
|
||||
CveID: "CVE-2017-0005",
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss3Score: 10.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: map[string]int{
|
||||
"Critical": 1,
|
||||
"High": 1,
|
||||
"Medium": 1,
|
||||
"Low": 1,
|
||||
"Unknown": 1,
|
||||
},
|
||||
},
|
||||
{
|
||||
in: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss2Score: 1.0,
|
||||
},
|
||||
RedHat: {
|
||||
Type: RedHat,
|
||||
Cvss3Score: 7.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -250,21 +305,31 @@ func TestCountGroupBySeverity(t *testing.T) {
|
||||
"CVE-2017-0005": {
|
||||
CveID: "CVE-2017-0005",
|
||||
},
|
||||
"CVE-2017-0006": {
|
||||
CveID: "CVE-2017-0005",
|
||||
CveContents: CveContents{
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
Cvss2Score: 10.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
out: map[string]int{
|
||||
"High": 1,
|
||||
"Medium": 1,
|
||||
"Low": 1,
|
||||
"Unknown": 1,
|
||||
"Critical": 1,
|
||||
"High": 1,
|
||||
"Medium": 1,
|
||||
"Low": 1,
|
||||
"Unknown": 1,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.CountGroupBySeverity()
|
||||
for k := range tt.out {
|
||||
if tt.out[k] != actual[k] {
|
||||
t.Errorf("\nexpected %s: %d\n actual %d\n",
|
||||
k, tt.out[k], actual[k])
|
||||
t.Errorf("[%d]\nexpected %s: %d\n actual %d\n",
|
||||
i, k, tt.out[k], actual[k])
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -275,6 +340,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
in VulnInfos
|
||||
out []VulnInfo
|
||||
}{
|
||||
//0
|
||||
{
|
||||
in: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
@@ -333,7 +399,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// When max scores are the same, sort by CVE-ID
|
||||
//[1] When max scores are the same, sort by CVE-ID
|
||||
{
|
||||
in: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
@@ -354,7 +420,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
RedHat: {
|
||||
Type: RedHat,
|
||||
Cvss2Score: 7.0,
|
||||
Cvss3Score: 7.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -365,7 +431,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
RedHat: {
|
||||
Type: RedHat,
|
||||
Cvss2Score: 7.0,
|
||||
Cvss3Score: 7.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -384,7 +450,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// When there are no cvss scores, sort by severity
|
||||
//[2] When there are no cvss scores, sort by severity
|
||||
{
|
||||
in: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
@@ -392,7 +458,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "High",
|
||||
Cvss3Severity: "High",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -401,7 +467,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "Low",
|
||||
Cvss3Severity: "Low",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -412,7 +478,7 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "High",
|
||||
Cvss3Severity: "High",
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -421,17 +487,17 @@ func TestToSortedSlice(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "Low",
|
||||
Cvss3Severity: "Low",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.ToSortedSlice()
|
||||
if !reflect.DeepEqual(tt.out, actual) {
|
||||
t.Errorf("\nexpected: %v\n actual: %v\n", tt.out, actual)
|
||||
t.Errorf("[%d]\nexpected: %v\n actual: %v\n", i, tt.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -462,18 +528,16 @@ func TestCvss2Scores(t *testing.T) {
|
||||
Cvss2Vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
Cvss2Severity: "HIGH",
|
||||
},
|
||||
//v3
|
||||
RedHatAPI: {
|
||||
Type: RedHatAPI,
|
||||
Cvss3Score: 8.1,
|
||||
Cvss3Vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
Cvss3Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
out: []CveContentCvss{
|
||||
{
|
||||
Type: Nvd,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 8.1,
|
||||
Vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: RedHat,
|
||||
Value: Cvss{
|
||||
@@ -483,6 +547,15 @@ func TestCvss2Scores(t *testing.T) {
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: Nvd,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 8.1,
|
||||
Vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: Jvn,
|
||||
Value: Cvss{
|
||||
@@ -501,7 +574,7 @@ func TestCvss2Scores(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.Cvss2Scores("redhat")
|
||||
actual := tt.in.Cvss2Scores()
|
||||
if !reflect.DeepEqual(tt.out, actual) {
|
||||
t.Errorf("[%d]\nexpected: %v\n actual: %v\n", i, tt.out, actual)
|
||||
}
|
||||
@@ -513,6 +586,7 @@ func TestMaxCvss2Scores(t *testing.T) {
|
||||
in VulnInfo
|
||||
out CveContentCvss
|
||||
}{
|
||||
// 0
|
||||
{
|
||||
in: VulnInfo{
|
||||
CveContents: CveContents{
|
||||
@@ -546,26 +620,6 @@ func TestMaxCvss2Scores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// Severity in OVAL
|
||||
{
|
||||
in: VulnInfo{
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
out: CveContentCvss{
|
||||
Type: Ubuntu,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 8.9,
|
||||
CalculatedBySeverity: true,
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
// Empty
|
||||
{
|
||||
in: VulnInfo{},
|
||||
@@ -611,13 +665,6 @@ func TestCvss3Scores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
out: []CveContentCvss{
|
||||
{
|
||||
Type: Nvd,
|
||||
Value: Cvss{
|
||||
Type: CVSS3,
|
||||
Score: 0.0,
|
||||
},
|
||||
},
|
||||
{
|
||||
Type: RedHat,
|
||||
Value: Cvss{
|
||||
@@ -629,16 +676,38 @@ func TestCvss3Scores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
// [1] Severity in OVAL
|
||||
{
|
||||
in: VulnInfo{
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss3Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
out: []CveContentCvss{
|
||||
{
|
||||
Type: Ubuntu,
|
||||
Value: Cvss{
|
||||
Type: CVSS3,
|
||||
Score: 8.9,
|
||||
CalculatedBySeverity: true,
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
// Empty
|
||||
{
|
||||
in: VulnInfo{},
|
||||
out: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.Cvss3Scores()
|
||||
if !reflect.DeepEqual(tt.out, actual) {
|
||||
t.Errorf("\nexpected: %v\n actual: %v\n", tt.out, actual)
|
||||
t.Errorf("[%d]\nexpected: %v\n actual: %v\n", i, tt.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -710,10 +779,10 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
out: CveContentCvss{
|
||||
Type: RedHat,
|
||||
Type: Nvd,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 8.0,
|
||||
Type: CVSS3,
|
||||
Score: 7.0,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -740,14 +809,14 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "HIGH",
|
||||
Cvss3Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
out: CveContentCvss{
|
||||
Type: Ubuntu,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Type: CVSS3,
|
||||
Score: 8.9,
|
||||
CalculatedBySeverity: true,
|
||||
Severity: "HIGH",
|
||||
@@ -760,7 +829,7 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "MEDIUM",
|
||||
Cvss3Severity: "MEDIUM",
|
||||
},
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
@@ -770,11 +839,12 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
out: CveContentCvss{
|
||||
Type: Nvd,
|
||||
Type: Ubuntu,
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 7.0,
|
||||
Severity: "HIGH",
|
||||
Type: CVSS3,
|
||||
Score: 6.9,
|
||||
Severity: "MEDIUM",
|
||||
CalculatedBySeverity: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -790,20 +860,20 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
out: CveContentCvss{
|
||||
Type: "Vendor",
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Type: CVSS3,
|
||||
Score: 8.9,
|
||||
CalculatedBySeverity: true,
|
||||
Vector: "-",
|
||||
Severity: "HIGH",
|
||||
},
|
||||
},
|
||||
},
|
||||
//5
|
||||
{
|
||||
in: VulnInfo{
|
||||
CveContents: CveContents{
|
||||
Ubuntu: {
|
||||
Type: Ubuntu,
|
||||
Cvss2Severity: "MEDIUM",
|
||||
Cvss3Severity: "MEDIUM",
|
||||
},
|
||||
Nvd: {
|
||||
Type: Nvd,
|
||||
@@ -818,11 +888,12 @@ func TestMaxCvssScores(t *testing.T) {
|
||||
},
|
||||
},
|
||||
out: CveContentCvss{
|
||||
Type: Nvd,
|
||||
Type: "Vendor",
|
||||
Value: Cvss{
|
||||
Type: CVSS2,
|
||||
Score: 4,
|
||||
Severity: "MEDIUM",
|
||||
Type: CVSS3,
|
||||
Score: 8.9,
|
||||
Severity: "HIGH",
|
||||
CalculatedBySeverity: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -861,7 +932,7 @@ func TestFormatMaxCvssScore(t *testing.T) {
|
||||
},
|
||||
RedHat: {
|
||||
Type: RedHat,
|
||||
Cvss2Severity: "HIGH",
|
||||
Cvss3Severity: "HIGH",
|
||||
Cvss3Score: 8.0,
|
||||
},
|
||||
Nvd: {
|
||||
@@ -871,7 +942,7 @@ func TestFormatMaxCvssScore(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
out: "8.3 HIGH (jvn)",
|
||||
out: "8.0 HIGH (redhat)",
|
||||
},
|
||||
{
|
||||
in: VulnInfo{
|
||||
@@ -897,10 +968,10 @@ func TestFormatMaxCvssScore(t *testing.T) {
|
||||
out: "9.9 HIGH (redhat)",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
for i, tt := range tests {
|
||||
actual := tt.in.FormatMaxCvssScore()
|
||||
if !reflect.DeepEqual(tt.out, actual) {
|
||||
t.Errorf("\nexpected: %v\n actual: %v\n", tt.out, actual)
|
||||
t.Errorf("[%d]\nexpected: %v\n actual: %v\n", i, tt.out, actual)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1170,3 +1241,372 @@ func TestVulnInfo_AttackVector(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVulnInfos_FilterByCvssOver(t *testing.T) {
|
||||
type args struct {
|
||||
over float64
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
v VulnInfos
|
||||
args args
|
||||
want VulnInfos
|
||||
}{
|
||||
{
|
||||
name: "over 7.0",
|
||||
args: args{over: 7.0},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Score: 7.1,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
CveContent{
|
||||
Type: Jvn,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 7.2,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss2Score: 7.1,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Nvd,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 6.9,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
CveContent{
|
||||
Type: Jvn,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss2Score: 7.2,
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "over high",
|
||||
args: args{over: 7.0},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Ubuntu,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss3Severity: "HIGH",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Debian,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss3Severity: "CRITICAL",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: GitHub,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss3Severity: "IMPORTANT",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Ubuntu,
|
||||
CveID: "CVE-2017-0001",
|
||||
Cvss3Severity: "HIGH",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: Debian,
|
||||
CveID: "CVE-2017-0002",
|
||||
Cvss3Severity: "CRITICAL",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: NewCveContents(
|
||||
CveContent{
|
||||
Type: GitHub,
|
||||
CveID: "CVE-2017-0003",
|
||||
Cvss3Severity: "IMPORTANT",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.v.FilterByCvssOver(tt.args.over); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("VulnInfos.FindByCvssOver() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVulnInfos_FilterIgnoreCves(t *testing.T) {
|
||||
type args struct {
|
||||
ignoreCveIDs []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
v VulnInfos
|
||||
args args
|
||||
want VulnInfos
|
||||
}{
|
||||
{
|
||||
name: "filter ignored",
|
||||
args: args{ignoreCveIDs: []string{"CVE-2017-0002"}},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.v.FilterIgnoreCves(tt.args.ignoreCveIDs); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("VulnInfos.FindIgnoreCves() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVulnInfos_FilterUnfixed(t *testing.T) {
|
||||
type args struct {
|
||||
ignoreUnfixed bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
v VulnInfos
|
||||
args args
|
||||
want VulnInfos
|
||||
}{
|
||||
{
|
||||
name: "filter ok",
|
||||
args: args{ignoreUnfixed: true},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "a",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "b",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "c",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "b",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "c",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
{
|
||||
Name: "d",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.v.FilterUnfixed(tt.args.ignoreUnfixed); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("VulnInfos.FilterUnfixed() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVulnInfos_FilterIgnorePkgs(t *testing.T) {
|
||||
type args struct {
|
||||
ignorePkgsRegexps []string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
v VulnInfos
|
||||
args args
|
||||
want VulnInfos
|
||||
}{
|
||||
{
|
||||
name: "filter pkgs 1",
|
||||
args: args{ignorePkgsRegexps: []string{"^kernel"}},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
},
|
||||
},
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter pkgs 2",
|
||||
args: args{ignorePkgsRegexps: []string{"^kernel"}},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "filter pkgs 3",
|
||||
args: args{ignorePkgsRegexps: []string{"^kernel", "^vim", "^bind"}},
|
||||
v: VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{Name: "kernel"},
|
||||
{Name: "vim"},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: VulnInfos{},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := tt.v.FilterIgnorePkgs(tt.args.ignorePkgsRegexps); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("VulnInfos.FilterIgnorePkgs() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
package msf
|
||||
75
msf/msf.go
75
msf/msf.go
@@ -1,75 +0,0 @@
|
||||
// +build !scanner
|
||||
|
||||
package msf
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
cnf "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/takuzoo3868/go-msfdb/db"
|
||||
metasploitmodels "github.com/takuzoo3868/go-msfdb/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// FillWithMetasploit fills metasploit module information that has in module
|
||||
func FillWithMetasploit(driver db.DB, r *models.ScanResult) (nMetasploitCve int, err error) {
|
||||
if driver == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
ms := driver.GetModuleByCveID(cveID)
|
||||
if len(ms) == 0 {
|
||||
continue
|
||||
}
|
||||
modules := ConvertToModels(ms)
|
||||
vuln.Metasploits = modules
|
||||
r.ScannedCves[cveID] = vuln
|
||||
nMetasploitCve++
|
||||
}
|
||||
|
||||
return nMetasploitCve, nil
|
||||
}
|
||||
|
||||
// ConvertToModels converts gost model to vuls model
|
||||
func ConvertToModels(ms []*metasploitmodels.Metasploit) (modules []models.Metasploit) {
|
||||
for _, m := range ms {
|
||||
var links []string
|
||||
if 0 < len(m.References) {
|
||||
for _, u := range m.References {
|
||||
links = append(links, u.Link)
|
||||
}
|
||||
}
|
||||
module := models.Metasploit{
|
||||
Name: m.Name,
|
||||
Title: m.Title,
|
||||
Description: m.Description,
|
||||
URLs: links,
|
||||
}
|
||||
modules = append(modules, module)
|
||||
}
|
||||
return modules
|
||||
}
|
||||
|
||||
// CheckHTTPHealth do health check
|
||||
func CheckHTTPHealth() error {
|
||||
if !cnf.Conf.Metasploit.IsFetchViaHTTP() {
|
||||
return nil
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/health", cnf.Conf.Metasploit.URL)
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
resp, _, errs = gorequest.New().Get(url).End()
|
||||
// resp, _, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to connect to metasploit server. url: %s, errs: %w", url, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -4,9 +4,9 @@ package oval
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/kotakanbe/goval-dictionary/db"
|
||||
)
|
||||
|
||||
// Alpine is the struct of Alpine Linux
|
||||
@@ -15,22 +15,33 @@ type Alpine struct {
|
||||
}
|
||||
|
||||
// NewAlpine creates OVAL client for SUSE
|
||||
func NewAlpine() Alpine {
|
||||
func NewAlpine(cnf config.VulnDictInterface) Alpine {
|
||||
return Alpine{
|
||||
Base{
|
||||
family: config.Alpine,
|
||||
family: constant.Alpine,
|
||||
Cnf: cnf,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Alpine) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err error) {
|
||||
func (o Alpine) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r); err != nil {
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf, r.Family)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -46,7 +57,7 @@ func (o Alpine) update(r *models.ScanResult, defPacks defPacks) {
|
||||
cveID := defPacks.def.Advisory.Cves[0].CveID
|
||||
vinfo, ok := r.ScannedCves[cveID]
|
||||
if !ok {
|
||||
util.Log.Debugf("%s is newly detected by OVAL", cveID)
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", cveID)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: cveID,
|
||||
Confidences: []models.Confidence{models.OvalMatch},
|
||||
|
||||
@@ -7,9 +7,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/kotakanbe/goval-dictionary/db"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -23,7 +24,7 @@ func (o DebianBase) update(r *models.ScanResult, defPacks defPacks) {
|
||||
ovalContent.Type = models.NewCveContentType(o.family)
|
||||
vinfo, ok := r.ScannedCves[defPacks.def.Debian.CveID]
|
||||
if !ok {
|
||||
util.Log.Debugf("%s is newly detected by OVAL", defPacks.def.Debian.CveID)
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", defPacks.def.Debian.CveID)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: defPacks.def.Debian.CveID,
|
||||
Confidences: []models.Confidence{models.OvalMatch},
|
||||
@@ -33,14 +34,14 @@ func (o DebianBase) update(r *models.ScanResult, defPacks defPacks) {
|
||||
cveContents := vinfo.CveContents
|
||||
ctype := models.NewCveContentType(o.family)
|
||||
if _, ok := vinfo.CveContents[ctype]; ok {
|
||||
util.Log.Debugf("%s OVAL will be overwritten",
|
||||
logging.Log.Debugf("%s OVAL will be overwritten",
|
||||
defPacks.def.Debian.CveID)
|
||||
} else {
|
||||
util.Log.Debugf("%s is also detected by OVAL",
|
||||
logging.Log.Debugf("%s is also detected by OVAL",
|
||||
defPacks.def.Debian.CveID)
|
||||
cveContents = models.CveContents{}
|
||||
}
|
||||
if r.Family != config.Raspbian {
|
||||
if r.Family != constant.Raspbian {
|
||||
vinfo.Confidences.AppendIfMissing(models.OvalMatch)
|
||||
} else {
|
||||
if len(vinfo.Confidences) == 0 {
|
||||
@@ -109,18 +110,19 @@ type Debian struct {
|
||||
}
|
||||
|
||||
// NewDebian creates OVAL client for Debian
|
||||
func NewDebian() Debian {
|
||||
func NewDebian(cnf config.VulnDictInterface) Debian {
|
||||
return Debian{
|
||||
DebianBase{
|
||||
Base{
|
||||
family: config.Debian,
|
||||
family: constant.Debian,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Debian) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err error) {
|
||||
func (o Debian) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
|
||||
//Debian's uname gives both of kernel release(uname -r), version(kernel-image version)
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
@@ -139,20 +141,30 @@ func (o Debian) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if r.Family != config.Raspbian {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r); err != nil {
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if r.Family != constant.Raspbian {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
// OVAL does not support Package for Raspbian, so skip it.
|
||||
result := r.RemoveRaspbianPackFromResult()
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(&result); err != nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(&result, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if r.Family != config.Raspbian {
|
||||
driver, err := newOvalDB(o.Cnf, r.Family)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if r.Family != constant.Raspbian {
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -199,19 +211,20 @@ type Ubuntu struct {
|
||||
}
|
||||
|
||||
// NewUbuntu creates OVAL client for Debian
|
||||
func NewUbuntu() Ubuntu {
|
||||
func NewUbuntu(cnf config.VulnDictInterface) Ubuntu {
|
||||
return Ubuntu{
|
||||
DebianBase{
|
||||
Base{
|
||||
family: config.Ubuntu,
|
||||
family: constant.Ubuntu,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Ubuntu) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err error) {
|
||||
switch major(r.Release) {
|
||||
func (o Ubuntu) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
switch util.Major(r.Release) {
|
||||
case "14":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
@@ -226,7 +239,7 @@ func (o Ubuntu) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err
|
||||
"linux-signed-lts-xenial",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(driver, r, kernelNamesInOval)
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "16":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
@@ -261,7 +274,7 @@ func (o Ubuntu) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(driver, r, kernelNamesInOval)
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "18":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
@@ -316,7 +329,7 @@ func (o Ubuntu) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(driver, r, kernelNamesInOval)
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "20":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
@@ -344,12 +357,12 @@ func (o Ubuntu) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err
|
||||
"linux-signed-oracle",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(driver, r, kernelNamesInOval)
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
}
|
||||
return 0, fmt.Errorf("Ubuntu %s is not support for now", r.Release)
|
||||
}
|
||||
|
||||
func (o Ubuntu) fillWithOval(driver db.DB, r *models.ScanResult, kernelNamesInOval []string) (nCVEs int, err error) {
|
||||
func (o Ubuntu) fillWithOval(r *models.ScanResult, kernelNamesInOval []string) (nCVEs int, err error) {
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
runningKernelVersion := ""
|
||||
kernelPkgInOVAL := ""
|
||||
@@ -361,7 +374,7 @@ func (o Ubuntu) fillWithOval(driver db.DB, r *models.ScanResult, kernelNamesInOv
|
||||
if v, ok := r.Packages[linuxImage]; ok {
|
||||
runningKernelVersion = v.Version
|
||||
} else {
|
||||
util.Log.Warnf("Unable to detect vulns of running kernel because the version of the running kernel is unknown. server: %s",
|
||||
logging.Log.Warnf("Unable to detect vulns of running kernel because the version of the running kernel is unknown. server: %s",
|
||||
r.ServerName)
|
||||
}
|
||||
|
||||
@@ -400,7 +413,7 @@ func (o Ubuntu) fillWithOval(driver db.DB, r *models.ScanResult, kernelNamesInOv
|
||||
}
|
||||
|
||||
if kernelPkgInOVAL == "" {
|
||||
util.Log.Warnf("The OVAL name of the running kernel image %+v is not found. So vulns of `linux` wll be detected. server: %s",
|
||||
logging.Log.Warnf("The OVAL name of the running kernel image %+v is not found. So vulns of `linux` wll be detected. server: %s",
|
||||
r.RunningKernel, r.ServerName)
|
||||
kernelPkgInOVAL = "linux"
|
||||
isOVALKernelPkgAdded = true
|
||||
@@ -415,11 +428,21 @@ func (o Ubuntu) fillWithOval(driver db.DB, r *models.ScanResult, kernelNamesInOv
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r); err != nil {
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf, r.Family)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
@@ -6,9 +6,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -56,7 +54,7 @@ func TestPackNamesOfUpdateDebian(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
util.Log = util.NewCustomLogger(config.ServerInfo{})
|
||||
// util.Log = util.NewCustomLogger()
|
||||
for i, tt := range tests {
|
||||
Debian{}.update(&tt.in, tt.defPacks)
|
||||
e := tt.out.ScannedCves["CVE-2000-1000"].AffectedPackages
|
||||
|
||||
120
oval/oval.go
120
oval/oval.go
@@ -4,11 +4,10 @@ package oval
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
cnf "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/kotakanbe/goval-dictionary/db"
|
||||
@@ -18,70 +17,78 @@ import (
|
||||
|
||||
// Client is the interface of OVAL client.
|
||||
type Client interface {
|
||||
CheckHTTPHealth() error
|
||||
FillWithOval(db.DB, *models.ScanResult) (int, error)
|
||||
|
||||
// CheckIfOvalFetched checks if oval entries are in DB by family, release.
|
||||
CheckIfOvalFetched(db.DB, string, string) (bool, error)
|
||||
CheckIfOvalFresh(db.DB, string, string) (bool, error)
|
||||
FillWithOval(*models.ScanResult) (int, error)
|
||||
CheckIfOvalFetched(string, string) (bool, error)
|
||||
CheckIfOvalFresh(string, string) (bool, error)
|
||||
}
|
||||
|
||||
// Base is a base struct
|
||||
type Base struct {
|
||||
family string
|
||||
}
|
||||
|
||||
// CheckHTTPHealth do health check
|
||||
func (b Base) CheckHTTPHealth() error {
|
||||
if !cnf.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
return nil
|
||||
}
|
||||
|
||||
url := fmt.Sprintf("%s/health", cnf.Conf.OvalDict.URL)
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
resp, _, errs = gorequest.New().Get(url).End()
|
||||
// resp, _, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
// resp, _, errs = gorequest.New().Proxy(api.httpProxy).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("Failed to request to OVAL server. url: %s, errs: %w",
|
||||
url, errs)
|
||||
}
|
||||
return nil
|
||||
Cnf config.VulnDictInterface
|
||||
}
|
||||
|
||||
// CheckIfOvalFetched checks if oval entries are in DB by family, release.
|
||||
func (b Base) CheckIfOvalFetched(driver db.DB, osFamily, release string) (fetched bool, err error) {
|
||||
if !cnf.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
count, err := driver.CountDefs(osFamily, release)
|
||||
func (b Base) CheckIfOvalFetched(osFamily, release string) (fetched bool, err error) {
|
||||
ovalFamily, err := GetFamilyInOval(osFamily)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !b.Cnf.IsFetchViaHTTP() {
|
||||
driver, err := newOvalDB(b.Cnf, ovalFamily)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to count OVAL defs: %s, %s, %w", osFamily, release, err)
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
count, err := driver.CountDefs(ovalFamily, release)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to count OVAL defs: %s, %s, %w", ovalFamily, release, err)
|
||||
}
|
||||
logging.Log.Infof("OVAL %s %s found. defs: %d", osFamily, release, count)
|
||||
return 0 < count, nil
|
||||
}
|
||||
|
||||
url, _ := util.URLPathJoin(cnf.Conf.OvalDict.URL, "count", osFamily, release)
|
||||
resp, body, errs := gorequest.New().Get(url).End()
|
||||
url, _ := util.URLPathJoin(config.Conf.OvalDict.URL, "count", ovalFamily, release)
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
count := 0
|
||||
if err := json.Unmarshal([]byte(body), &count); err != nil {
|
||||
return false, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
logging.Log.Infof("OVAL %s %s is fresh. defs: %d", osFamily, release, count)
|
||||
return 0 < count, nil
|
||||
}
|
||||
|
||||
// CheckIfOvalFresh checks if oval entries are fresh enough
|
||||
func (b Base) CheckIfOvalFresh(driver db.DB, osFamily, release string) (ok bool, err error) {
|
||||
func (b Base) CheckIfOvalFresh(osFamily, release string) (ok bool, err error) {
|
||||
ovalFamily, err := GetFamilyInOval(osFamily)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var lastModified time.Time
|
||||
if !cnf.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
lastModified = driver.GetLastModified(osFamily, release)
|
||||
if !b.Cnf.IsFetchViaHTTP() {
|
||||
driver, err := newOvalDB(b.Cnf, ovalFamily)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
lastModified = driver.GetLastModified(ovalFamily, release)
|
||||
} else {
|
||||
url, _ := util.URLPathJoin(cnf.Conf.OvalDict.URL, "lastmodified", osFamily, release)
|
||||
resp, body, errs := gorequest.New().Get(url).End()
|
||||
url, _ := util.URLPathJoin(config.Conf.OvalDict.URL, "lastmodified", ovalFamily, release)
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(body), &lastModified); err != nil {
|
||||
@@ -92,10 +99,37 @@ func (b Base) CheckIfOvalFresh(driver db.DB, osFamily, release string) (ok bool,
|
||||
since := time.Now()
|
||||
since = since.AddDate(0, 0, -3)
|
||||
if lastModified.Before(since) {
|
||||
util.Log.Warnf("OVAL for %s %s is old, last modified is %s. It's recommended to update OVAL to improve scanning accuracy. How to update OVAL database, see https://github.com/kotakanbe/goval-dictionary#usage",
|
||||
logging.Log.Warnf("OVAL for %s %s is old, last modified is %s. It's recommended to update OVAL to improve scanning accuracy. How to update OVAL database, see https://github.com/kotakanbe/goval-dictionary#usage",
|
||||
osFamily, release, lastModified)
|
||||
return false, nil
|
||||
}
|
||||
util.Log.Infof("OVAL is fresh: %s %s ", osFamily, release)
|
||||
logging.Log.Infof("OVAL %s %s is fresh. lastModified: %s", osFamily, release, lastModified.Format(time.RFC3339))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewOvalDB returns oval db client
|
||||
func newOvalDB(cnf config.VulnDictInterface, familyInScanResult string) (driver db.DB, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
|
||||
ovalFamily, err := GetFamilyInOval(familyInScanResult)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
driver, locked, err := db.NewDB(ovalFamily, cnf.GetType(), path, cnf.GetDebugSQL())
|
||||
if err != nil {
|
||||
if locked {
|
||||
err = xerrors.Errorf("SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
err = xerrors.Errorf("Failed to new OVAL DB. err: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -8,9 +8,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/kotakanbe/goval-dictionary/db"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -20,18 +20,29 @@ type RedHatBase struct {
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o RedHatBase) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err error) {
|
||||
func (o RedHatBase) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r); err != nil {
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf, r.Family)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v")
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
relatedDefs.Sort()
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
nCVEs += o.update(r, defPacks)
|
||||
}
|
||||
@@ -91,7 +102,7 @@ func (o RedHatBase) update(r *models.ScanResult, defPacks defPacks) (nCVEs int)
|
||||
ovalContent := *o.convertToModel(cve.CveID, &defPacks.def)
|
||||
vinfo, ok := r.ScannedCves[cve.CveID]
|
||||
if !ok {
|
||||
util.Log.Debugf("%s is newly detected by OVAL", cve.CveID)
|
||||
logging.Log.Debugf("%s is newly detected by OVAL: DefID: %s", cve.CveID, defPacks.def.DefinitionID)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: cve.CveID,
|
||||
Confidences: models.Confidences{models.OvalMatch},
|
||||
@@ -102,13 +113,12 @@ func (o RedHatBase) update(r *models.ScanResult, defPacks defPacks) (nCVEs int)
|
||||
cveContents := vinfo.CveContents
|
||||
if v, ok := vinfo.CveContents[ctype]; ok {
|
||||
if v.LastModified.After(ovalContent.LastModified) {
|
||||
util.Log.Debugf("%s, OvalID: %d ignored: ",
|
||||
cve.CveID, defPacks.def.ID)
|
||||
logging.Log.Debugf("%s ignored. DefID: %s ", cve.CveID, defPacks.def.DefinitionID)
|
||||
} else {
|
||||
util.Log.Debugf("%s OVAL will be overwritten", cve.CveID)
|
||||
logging.Log.Debugf("%s OVAL will be overwritten. DefID: %s", cve.CveID, defPacks.def.DefinitionID)
|
||||
}
|
||||
} else {
|
||||
util.Log.Debugf("%s also detected by OVAL", cve.CveID)
|
||||
logging.Log.Debugf("%s also detected by OVAL. DefID: %s", cve.CveID, defPacks.def.DefinitionID)
|
||||
cveContents = models.CveContents{}
|
||||
}
|
||||
|
||||
@@ -143,7 +153,7 @@ func (o RedHatBase) update(r *models.ScanResult, defPacks defPacks) (nCVEs int)
|
||||
|
||||
func (o RedHatBase) convertToDistroAdvisory(def *ovalmodels.Definition) *models.DistroAdvisory {
|
||||
advisoryID := def.Title
|
||||
if (o.family == config.RedHat || o.family == config.CentOS) && len(advisoryID) > 0 {
|
||||
if (o.family == constant.RedHat || o.family == constant.CentOS) && len(advisoryID) > 0 {
|
||||
ss := strings.Fields(def.Title)
|
||||
advisoryID = strings.TrimSuffix(ss[0], ":")
|
||||
}
|
||||
@@ -173,17 +183,15 @@ func (o RedHatBase) convertToModel(cveID string, def *ovalmodels.Definition) *mo
|
||||
score2, vec2 := o.parseCvss2(cve.Cvss2)
|
||||
score3, vec3 := o.parseCvss3(cve.Cvss3)
|
||||
|
||||
severity := def.Advisory.Severity
|
||||
sev2, sev3, severity := "", "", def.Advisory.Severity
|
||||
if cve.Impact != "" {
|
||||
severity = cve.Impact
|
||||
}
|
||||
|
||||
sev2, sev3 := "", ""
|
||||
if score2 == 0 {
|
||||
sev2 = severity
|
||||
}
|
||||
if score3 == 0 {
|
||||
if severity != "None" {
|
||||
sev3 = severity
|
||||
if score2 != 0 {
|
||||
sev2 = severity
|
||||
}
|
||||
}
|
||||
|
||||
// CWE-ID in RedHat OVAL may have multiple cweIDs separated by space
|
||||
@@ -248,11 +256,12 @@ type RedHat struct {
|
||||
}
|
||||
|
||||
// NewRedhat creates OVAL client for Redhat
|
||||
func NewRedhat() RedHat {
|
||||
func NewRedhat(cnf config.VulnDictInterface) RedHat {
|
||||
return RedHat{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: config.RedHat,
|
||||
family: constant.RedHat,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -264,11 +273,12 @@ type CentOS struct {
|
||||
}
|
||||
|
||||
// NewCentOS creates OVAL client for CentOS
|
||||
func NewCentOS() CentOS {
|
||||
func NewCentOS(cnf config.VulnDictInterface) CentOS {
|
||||
return CentOS{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: config.CentOS,
|
||||
family: constant.CentOS,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -280,11 +290,12 @@ type Oracle struct {
|
||||
}
|
||||
|
||||
// NewOracle creates OVAL client for Oracle
|
||||
func NewOracle() Oracle {
|
||||
func NewOracle(cnf config.VulnDictInterface) Oracle {
|
||||
return Oracle{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: config.Oracle,
|
||||
family: constant.Oracle,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -297,11 +308,12 @@ type Amazon struct {
|
||||
}
|
||||
|
||||
// NewAmazon creates OVAL client for Amazon Linux
|
||||
func NewAmazon() Amazon {
|
||||
func NewAmazon(cnf config.VulnDictInterface) Amazon {
|
||||
return Amazon{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: config.Amazon,
|
||||
family: constant.Amazon,
|
||||
Cnf: cnf,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -6,9 +6,7 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -132,7 +130,7 @@ func TestPackNamesOfUpdate(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
util.Log = util.NewCustomLogger(config.ServerInfo{})
|
||||
// util.Log = util.Logger{}.NewCustomLogger()
|
||||
for i, tt := range tests {
|
||||
RedHat{}.update(&tt.in, tt.defPacks)
|
||||
e := tt.out.ScannedCves["CVE-2000-1000"].AffectedPackages
|
||||
|
||||
31
oval/suse.go
31
oval/suse.go
@@ -4,9 +4,9 @@ package oval
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/kotakanbe/goval-dictionary/db"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -16,23 +16,34 @@ type SUSE struct {
|
||||
}
|
||||
|
||||
// NewSUSE creates OVAL client for SUSE
|
||||
func NewSUSE() SUSE {
|
||||
func NewSUSE(cnf config.VulnDictInterface) SUSE {
|
||||
// TODO implement other family
|
||||
return SUSE{
|
||||
Base{
|
||||
family: config.SUSEEnterpriseServer,
|
||||
family: constant.SUSEEnterpriseServer,
|
||||
Cnf: cnf,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o SUSE) FillWithOval(driver db.DB, r *models.ScanResult) (nCVEs int, err error) {
|
||||
func (o SUSE) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r); err != nil {
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf, r.Family)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v")
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -55,7 +66,7 @@ func (o SUSE) update(r *models.ScanResult, defPacks defPacks) {
|
||||
ovalContent.Type = models.NewCveContentType(o.family)
|
||||
vinfo, ok := r.ScannedCves[defPacks.def.Title]
|
||||
if !ok {
|
||||
util.Log.Debugf("%s is newly detected by OVAL", defPacks.def.Title)
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", defPacks.def.Title)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: defPacks.def.Title,
|
||||
Confidences: models.Confidences{models.OvalMatch},
|
||||
@@ -65,9 +76,9 @@ func (o SUSE) update(r *models.ScanResult, defPacks defPacks) {
|
||||
cveContents := vinfo.CveContents
|
||||
ctype := models.NewCveContentType(o.family)
|
||||
if _, ok := vinfo.CveContents[ctype]; ok {
|
||||
util.Log.Debugf("%s OVAL will be overwritten", defPacks.def.Title)
|
||||
logging.Log.Debugf("%s OVAL will be overwritten", defPacks.def.Title)
|
||||
} else {
|
||||
util.Log.Debugf("%s is also detected by OVAL", defPacks.def.Title)
|
||||
logging.Log.Debugf("%s is also detected by OVAL", defPacks.def.Title)
|
||||
cveContents = models.CveContents{}
|
||||
}
|
||||
vinfo.Confidences.AppendIfMissing(models.OvalMatch)
|
||||
|
||||
189
oval/util.go
189
oval/util.go
@@ -6,11 +6,13 @@ import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
apkver "github.com/knqyf263/go-apk-version"
|
||||
@@ -71,6 +73,12 @@ func (e *ovalResult) upsert(def ovalmodels.Definition, packName string, fstat fi
|
||||
return false
|
||||
}
|
||||
|
||||
func (e *ovalResult) Sort() {
|
||||
sort.SliceStable(e.entries, func(i, j int) bool {
|
||||
return e.entries[i].def.DefinitionID < e.entries[j].def.DefinitionID
|
||||
})
|
||||
}
|
||||
|
||||
type request struct {
|
||||
packName string
|
||||
versionRelease string
|
||||
@@ -78,6 +86,7 @@ type request struct {
|
||||
arch string
|
||||
binaryPackNames []string
|
||||
isSrcPack bool
|
||||
modularityLabel string // RHEL 8 or later only
|
||||
}
|
||||
|
||||
type response struct {
|
||||
@@ -86,8 +95,7 @@ type response struct {
|
||||
}
|
||||
|
||||
// getDefsByPackNameViaHTTP fetches OVAL information via HTTP
|
||||
func getDefsByPackNameViaHTTP(r *models.ScanResult) (
|
||||
relatedDefs ovalResult, err error) {
|
||||
func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ovalResult, err error) {
|
||||
|
||||
nReq := len(r.Packages) + len(r.SrcPackages)
|
||||
reqChan := make(chan request, nReq)
|
||||
@@ -125,7 +133,7 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult) (
|
||||
select {
|
||||
case req := <-reqChan:
|
||||
url, err := util.URLPathJoin(
|
||||
config.Conf.OvalDict.URL,
|
||||
url,
|
||||
"packs",
|
||||
r.Family,
|
||||
r.Release,
|
||||
@@ -134,7 +142,7 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult) (
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
util.Log.Debugf("HTTP Request to %s", url)
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
@@ -147,7 +155,7 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult) (
|
||||
select {
|
||||
case res := <-resChan:
|
||||
for _, def := range res.defs {
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(def, res.request, r.Family, r.RunningKernel)
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(def, res.request, r.Family, r.RunningKernel, r.EnabledDnfModules)
|
||||
if !affected {
|
||||
continue
|
||||
}
|
||||
@@ -188,19 +196,18 @@ func httpGet(url string, req request, resChan chan<- response, errChan chan<- er
|
||||
var resp *http.Response
|
||||
count, retryMax := 0, 3
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Get(url).End()
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %w", url, resp, errs)
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
util.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %s", t, err)
|
||||
logging.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify)
|
||||
if err != nil {
|
||||
@@ -244,13 +251,18 @@ func getDefsByPackNameFromOvalDB(driver db.DB, r *models.ScanResult) (relatedDef
|
||||
})
|
||||
}
|
||||
|
||||
ovalFamily, err := GetFamilyInOval(r.Family)
|
||||
if err != nil {
|
||||
return relatedDefs, err
|
||||
}
|
||||
|
||||
for _, req := range requests {
|
||||
definitions, err := driver.GetByPackName(r.Family, r.Release, req.packName, req.arch)
|
||||
definitions, err := driver.GetByPackName(ovalFamily, r.Release, req.packName, req.arch)
|
||||
if err != nil {
|
||||
return relatedDefs, xerrors.Errorf("Failed to get %s OVAL info by package: %#v, err: %w", r.Family, req, err)
|
||||
}
|
||||
for _, def := range definitions {
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(def, req, r.Family, r.RunningKernel)
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(def, req, ovalFamily, r.RunningKernel, r.EnabledDnfModules)
|
||||
if !affected {
|
||||
continue
|
||||
}
|
||||
@@ -277,32 +289,33 @@ func getDefsByPackNameFromOvalDB(driver db.DB, r *models.ScanResult) (relatedDef
|
||||
return
|
||||
}
|
||||
|
||||
func major(version string) string {
|
||||
if version == "" {
|
||||
return ""
|
||||
}
|
||||
ss := strings.SplitN(version, ":", 2)
|
||||
ver := ""
|
||||
if len(ss) == 1 {
|
||||
ver = ss[0]
|
||||
} else {
|
||||
ver = ss[1]
|
||||
}
|
||||
return ver[0:strings.Index(ver, ".")]
|
||||
}
|
||||
|
||||
func isOvalDefAffected(def ovalmodels.Definition, req request, family string, running models.Kernel) (affected, notFixedYet bool, fixedIn string) {
|
||||
func isOvalDefAffected(def ovalmodels.Definition, req request, family string, running models.Kernel, enabledMods []string) (affected, notFixedYet bool, fixedIn string) {
|
||||
for _, ovalPack := range def.AffectedPacks {
|
||||
if req.packName != ovalPack.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
isModularityLabelEmptyOrSame := false
|
||||
if ovalPack.ModularityLabel != "" {
|
||||
for _, mod := range enabledMods {
|
||||
if mod == ovalPack.ModularityLabel {
|
||||
isModularityLabelEmptyOrSame = true
|
||||
break
|
||||
}
|
||||
}
|
||||
} else {
|
||||
isModularityLabelEmptyOrSame = true
|
||||
}
|
||||
if !isModularityLabelEmptyOrSame {
|
||||
continue
|
||||
}
|
||||
|
||||
if running.Release != "" {
|
||||
switch family {
|
||||
case config.RedHat, config.CentOS:
|
||||
case constant.RedHat, constant.CentOS:
|
||||
// For kernel related packages, ignore OVAL information with different major versions
|
||||
if _, ok := kernelRelatedPackNames[ovalPack.Name]; ok {
|
||||
if major(ovalPack.Version) != major(running.Release) {
|
||||
if util.Major(ovalPack.Version) != util.Major(running.Release) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -316,7 +329,7 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
// Compare between the installed version vs the version in OVAL
|
||||
less, err := lessThan(family, req.versionRelease, ovalPack)
|
||||
if err != nil {
|
||||
util.Log.Debugf("Failed to parse versions: %s, Ver: %#v, OVAL: %#v, DefID: %s",
|
||||
logging.Log.Debugf("Failed to parse versions: %s, Ver: %#v, OVAL: %#v, DefID: %s",
|
||||
err, req.versionRelease, ovalPack, def.DefinitionID)
|
||||
return false, false, ovalPack.Version
|
||||
}
|
||||
@@ -328,12 +341,12 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
|
||||
// If the version of installed is less than in OVAL
|
||||
switch family {
|
||||
case config.RedHat,
|
||||
config.Amazon,
|
||||
config.SUSEEnterpriseServer,
|
||||
config.Debian,
|
||||
config.Ubuntu,
|
||||
config.Raspbian:
|
||||
case constant.RedHat,
|
||||
constant.Amazon,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.Debian,
|
||||
constant.Ubuntu,
|
||||
constant.Raspbian:
|
||||
// Use fixed state in OVAL for these distros.
|
||||
return true, false, ovalPack.Version
|
||||
}
|
||||
@@ -352,7 +365,7 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
// compare version: newVer vs oval
|
||||
less, err := lessThan(family, req.newVersionRelease, ovalPack)
|
||||
if err != nil {
|
||||
util.Log.Debugf("Failed to parse versions: %s, NewVer: %#v, OVAL: %#v, DefID: %s",
|
||||
logging.Log.Debugf("Failed to parse versions: %s, NewVer: %#v, OVAL: %#v, DefID: %s",
|
||||
err, req.newVersionRelease, ovalPack, def.DefinitionID)
|
||||
return false, false, ovalPack.Version
|
||||
}
|
||||
@@ -362,14 +375,11 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
return false, false, ""
|
||||
}
|
||||
|
||||
var centosVerPattern = regexp.MustCompile(`\.[es]l(\d+)(?:_\d+)?(?:\.centos)?`)
|
||||
var esVerPattern = regexp.MustCompile(`\.el(\d+)(?:_\d+)?`)
|
||||
|
||||
func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error) {
|
||||
switch family {
|
||||
case config.Debian,
|
||||
config.Ubuntu,
|
||||
config.Raspbian:
|
||||
case constant.Debian,
|
||||
constant.Ubuntu,
|
||||
constant.Raspbian:
|
||||
vera, err := debver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -380,7 +390,7 @@ func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
case config.Alpine:
|
||||
case constant.Alpine:
|
||||
vera, err := apkver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
@@ -391,21 +401,94 @@ func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
case config.Oracle,
|
||||
config.SUSEEnterpriseServer,
|
||||
config.Amazon:
|
||||
case constant.Oracle,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.Amazon:
|
||||
vera := rpmver.NewVersion(newVer)
|
||||
verb := rpmver.NewVersion(packInOVAL.Version)
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
case config.RedHat,
|
||||
config.CentOS:
|
||||
vera := rpmver.NewVersion(centosVerPattern.ReplaceAllString(newVer, ".el$1"))
|
||||
verb := rpmver.NewVersion(esVerPattern.ReplaceAllString(packInOVAL.Version, ".el$1"))
|
||||
case constant.RedHat,
|
||||
constant.CentOS:
|
||||
vera := rpmver.NewVersion(centOSVersionToRHEL(newVer))
|
||||
verb := rpmver.NewVersion(centOSVersionToRHEL(packInOVAL.Version))
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
default:
|
||||
util.Log.Errorf("Not implemented yet: %s", family)
|
||||
return false, xerrors.Errorf("Not implemented yet: %s", family)
|
||||
}
|
||||
return false, xerrors.Errorf("Package version comparison not supported: %s", family)
|
||||
}
|
||||
|
||||
var centosVerPattern = regexp.MustCompile(`\.[es]l(\d+)(?:_\d+)?(?:\.centos)?`)
|
||||
|
||||
func centOSVersionToRHEL(ver string) string {
|
||||
return centosVerPattern.ReplaceAllString(ver, ".el$1")
|
||||
}
|
||||
|
||||
// NewOVALClient returns a client for OVAL database
|
||||
func NewOVALClient(family string, cnf config.GovalDictConf) (Client, error) {
|
||||
switch family {
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return NewDebian(&cnf), nil
|
||||
case constant.Ubuntu:
|
||||
return NewUbuntu(&cnf), nil
|
||||
case constant.RedHat:
|
||||
return NewRedhat(&cnf), nil
|
||||
case constant.CentOS:
|
||||
//use RedHat's OVAL
|
||||
return NewCentOS(&cnf), nil
|
||||
case constant.Oracle:
|
||||
return NewOracle(&cnf), nil
|
||||
case constant.SUSEEnterpriseServer:
|
||||
// TODO other suse family
|
||||
return NewSUSE(&cnf), nil
|
||||
case constant.Alpine:
|
||||
return NewAlpine(&cnf), nil
|
||||
case constant.Amazon:
|
||||
return NewAmazon(&cnf), nil
|
||||
case constant.FreeBSD, constant.Windows:
|
||||
return nil, nil
|
||||
case constant.ServerTypePseudo:
|
||||
return nil, nil
|
||||
default:
|
||||
if family == "" {
|
||||
return nil, xerrors.New("Probably an error occurred during scanning. Check the error message")
|
||||
}
|
||||
return nil, xerrors.Errorf("OVAL for %s is not implemented yet", family)
|
||||
}
|
||||
}
|
||||
|
||||
// GetFamilyInOval returns the OS family name in OVAL
|
||||
// For example, CentOS uses Red Hat's OVAL, so return 'redhat'
|
||||
func GetFamilyInOval(familyInScanResult string) (string, error) {
|
||||
switch familyInScanResult {
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return constant.Debian, nil
|
||||
case constant.Ubuntu:
|
||||
return constant.Ubuntu, nil
|
||||
case constant.RedHat:
|
||||
return constant.RedHat, nil
|
||||
case constant.CentOS:
|
||||
//use RedHat's OVAL
|
||||
return constant.RedHat, nil
|
||||
case constant.Oracle:
|
||||
return constant.Oracle, nil
|
||||
case constant.SUSEEnterpriseServer:
|
||||
// TODO other suse family
|
||||
return constant.SUSEEnterpriseServer, nil
|
||||
case constant.Alpine:
|
||||
return constant.Alpine, nil
|
||||
case constant.Amazon:
|
||||
return constant.Amazon, nil
|
||||
case constant.FreeBSD, constant.Windows:
|
||||
return "", nil
|
||||
case constant.ServerTypePseudo:
|
||||
return "", nil
|
||||
default:
|
||||
if familyInScanResult == "" {
|
||||
return "", xerrors.New("Probably an error occurred during scanning. Check the error message")
|
||||
}
|
||||
return "", xerrors.Errorf("OVAL for %s is not implemented yet", familyInScanResult)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
ovalmodels "github.com/kotakanbe/goval-dictionary/models"
|
||||
)
|
||||
@@ -202,6 +202,7 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
req request
|
||||
family string
|
||||
kernel models.Kernel
|
||||
mods []string
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
@@ -1029,7 +1030,7 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
// For kernel related packages, ignore OVAL with different major versions
|
||||
{
|
||||
in: in{
|
||||
family: config.CentOS,
|
||||
family: constant.CentOS,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
@@ -1053,7 +1054,7 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
family: config.CentOS,
|
||||
family: constant.CentOS,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
@@ -1076,9 +1077,85 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
notFixedYet: false,
|
||||
fixedIn: "3.1.0",
|
||||
},
|
||||
// dnf module
|
||||
{
|
||||
in: in{
|
||||
family: constant.RedHat,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "nginx",
|
||||
Version: "1.16.1-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "nginx:1.16",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "nginx",
|
||||
versionRelease: "1.16.0-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
},
|
||||
mods: []string{
|
||||
"nginx:1.16",
|
||||
},
|
||||
},
|
||||
affected: true,
|
||||
notFixedYet: false,
|
||||
fixedIn: "1.16.1-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
},
|
||||
// dnf module 2
|
||||
{
|
||||
in: in{
|
||||
family: constant.RedHat,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "nginx",
|
||||
Version: "1.16.1-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "nginx:1.16",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "nginx",
|
||||
versionRelease: "1.16.2-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
},
|
||||
mods: []string{
|
||||
"nginx:1.16",
|
||||
},
|
||||
},
|
||||
affected: false,
|
||||
notFixedYet: false,
|
||||
},
|
||||
// dnf module 3
|
||||
{
|
||||
in: in{
|
||||
family: constant.RedHat,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "nginx",
|
||||
Version: "1.16.1-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "nginx:1.16",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "nginx",
|
||||
versionRelease: "1.16.0-1.module+el8.3.0+8844+e5e7039f.1",
|
||||
},
|
||||
mods: []string{
|
||||
"nginx:1.14",
|
||||
},
|
||||
},
|
||||
affected: false,
|
||||
notFixedYet: false,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(tt.in.def, tt.in.req, tt.in.family, tt.in.kernel)
|
||||
affected, notFixedYet, fixedIn := isOvalDefAffected(tt.in.def, tt.in.req, tt.in.family, tt.in.kernel, tt.in.mods)
|
||||
if tt.affected != affected {
|
||||
t.Errorf("[%d] affected\nexpected: %v\n actual: %v\n", i, tt.affected, affected)
|
||||
}
|
||||
@@ -1091,28 +1168,170 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_major(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in string
|
||||
expected string
|
||||
func Test_centOSVersionToRHEL(t *testing.T) {
|
||||
type args struct {
|
||||
ver string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
in: "",
|
||||
expected: "",
|
||||
name: "remove centos.",
|
||||
args: args{
|
||||
ver: "grub2-tools-2.02-0.80.el7.centos.x86_64",
|
||||
},
|
||||
want: "grub2-tools-2.02-0.80.el7.x86_64",
|
||||
},
|
||||
{
|
||||
in: "4.1",
|
||||
expected: "4",
|
||||
name: "noop",
|
||||
args: args{
|
||||
ver: "grub2-tools-2.02-0.80.el7.x86_64",
|
||||
},
|
||||
want: "grub2-tools-2.02-0.80.el7.x86_64",
|
||||
},
|
||||
{
|
||||
in: "0:4.1",
|
||||
expected: "4",
|
||||
name: "remove minor",
|
||||
args: args{
|
||||
ver: "sudo-1.8.23-10.el7_9.1",
|
||||
},
|
||||
want: "sudo-1.8.23-10.el7.1",
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
a := major(tt.in)
|
||||
if tt.expected != a {
|
||||
t.Errorf("[%d]\nexpected: %s\n actual: %s\n", i, tt.expected, a)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := centOSVersionToRHEL(tt.args.ver); got != tt.want {
|
||||
t.Errorf("centOSVersionToRHEL() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_lessThan(t *testing.T) {
|
||||
type args struct {
|
||||
family string
|
||||
newVer string
|
||||
AffectedPacks ovalmodels.Package
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "newVer and ovalmodels.Package both have underscoreMinorversion.",
|
||||
args: args{
|
||||
family: "centos",
|
||||
newVer: "1.8.23-10.el7_9.1",
|
||||
AffectedPacks: ovalmodels.Package{
|
||||
Name: "sudo",
|
||||
Version: "1.8.23-10.el7_9.1",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "only newVer has underscoreMinorversion.",
|
||||
args: args{
|
||||
family: "centos",
|
||||
newVer: "1.8.23-10.el7_9.1",
|
||||
AffectedPacks: ovalmodels.Package{
|
||||
Name: "sudo",
|
||||
Version: "1.8.23-10.el7.1",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "only ovalmodels.Package has underscoreMinorversion.",
|
||||
args: args{
|
||||
family: "centos",
|
||||
newVer: "1.8.23-10.el7.1",
|
||||
AffectedPacks: ovalmodels.Package{
|
||||
Name: "sudo",
|
||||
Version: "1.8.23-10.el7_9.1",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "neither newVer nor ovalmodels.Package have underscoreMinorversion.",
|
||||
args: args{
|
||||
family: "centos",
|
||||
newVer: "1.8.23-10.el7.1",
|
||||
AffectedPacks: ovalmodels.Package{
|
||||
Name: "sudo",
|
||||
Version: "1.8.23-10.el7.1",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, _ := lessThan(tt.args.family, tt.args.newVer, tt.args.AffectedPacks)
|
||||
if got != tt.want {
|
||||
t.Errorf("lessThan() = %t, want %t", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ovalResult_Sort(t *testing.T) {
|
||||
type fields struct {
|
||||
entries []defPacks
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
want fields
|
||||
}{
|
||||
{
|
||||
name: "already sorted",
|
||||
fields: fields{
|
||||
entries: []defPacks{
|
||||
{def: ovalmodels.Definition{DefinitionID: "0"}},
|
||||
{def: ovalmodels.Definition{DefinitionID: "1"}},
|
||||
},
|
||||
},
|
||||
want: fields{
|
||||
entries: []defPacks{
|
||||
{def: ovalmodels.Definition{DefinitionID: "0"}},
|
||||
{def: ovalmodels.Definition{DefinitionID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "sort",
|
||||
fields: fields{
|
||||
entries: []defPacks{
|
||||
{def: ovalmodels.Definition{DefinitionID: "1"}},
|
||||
{def: ovalmodels.Definition{DefinitionID: "0"}},
|
||||
},
|
||||
},
|
||||
want: fields{
|
||||
entries: []defPacks{
|
||||
{def: ovalmodels.Definition{DefinitionID: "0"}},
|
||||
{def: ovalmodels.Definition{DefinitionID: "1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &ovalResult{
|
||||
entries: tt.fields.entries,
|
||||
}
|
||||
o.Sort()
|
||||
|
||||
if !reflect.DeepEqual(o.entries, tt.want.entries) {
|
||||
t.Errorf("act %#v, want %#v", o.entries, tt.want.entries)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,233 +0,0 @@
|
||||
// +build !scanner
|
||||
|
||||
package report
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/util"
|
||||
gostdb "github.com/knqyf263/gost/db"
|
||||
cvedb "github.com/kotakanbe/go-cve-dictionary/db"
|
||||
ovaldb "github.com/kotakanbe/goval-dictionary/db"
|
||||
exploitdb "github.com/mozqnet/go-exploitdb/db"
|
||||
metasploitdb "github.com/takuzoo3868/go-msfdb/db"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// DBClient is DB client for reporting
|
||||
type DBClient struct {
|
||||
CveDB cvedb.DB
|
||||
OvalDB ovaldb.DB
|
||||
GostDB gostdb.DB
|
||||
ExploitDB exploitdb.DB
|
||||
MetasploitDB metasploitdb.DB
|
||||
}
|
||||
|
||||
// DBClientConf has a configuration of Vulnerability DBs
|
||||
type DBClientConf struct {
|
||||
CveDictCnf config.GoCveDictConf
|
||||
OvalDictCnf config.GovalDictConf
|
||||
GostCnf config.GostConf
|
||||
ExploitCnf config.ExploitConf
|
||||
MetasploitCnf config.MetasploitConf
|
||||
DebugSQL bool
|
||||
}
|
||||
|
||||
// NewDBClient returns db clients
|
||||
func NewDBClient(cnf DBClientConf) (dbclient *DBClient, locked bool, err error) {
|
||||
cveDriver, locked, err := NewCveDB(cnf)
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("CveDB is locked: %s",
|
||||
cnf.OvalDictCnf.SQLite3Path)
|
||||
} else if err != nil {
|
||||
return nil, locked, err
|
||||
}
|
||||
|
||||
ovaldb, locked, err := NewOvalDB(cnf)
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("OvalDB is locked: %s",
|
||||
cnf.OvalDictCnf.SQLite3Path)
|
||||
} else if err != nil {
|
||||
util.Log.Warnf("Unable to use OvalDB: %s, err: %s",
|
||||
cnf.OvalDictCnf.SQLite3Path, err)
|
||||
}
|
||||
|
||||
gostdb, locked, err := NewGostDB(cnf)
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("gostDB is locked: %s",
|
||||
cnf.GostCnf.SQLite3Path)
|
||||
} else if err != nil {
|
||||
util.Log.Warnf("Unable to use gostDB: %s, err: %s",
|
||||
cnf.GostCnf.SQLite3Path, err)
|
||||
}
|
||||
|
||||
exploitdb, locked, err := NewExploitDB(cnf)
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("exploitDB is locked: %s",
|
||||
cnf.ExploitCnf.SQLite3Path)
|
||||
} else if err != nil {
|
||||
util.Log.Warnf("Unable to use exploitDB: %s, err: %s",
|
||||
cnf.ExploitCnf.SQLite3Path, err)
|
||||
}
|
||||
|
||||
metasploitdb, locked, err := NewMetasploitDB(cnf)
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("metasploitDB is locked: %s",
|
||||
cnf.MetasploitCnf.SQLite3Path)
|
||||
} else if err != nil {
|
||||
util.Log.Warnf("Unable to use metasploitDB: %s, err: %s",
|
||||
cnf.MetasploitCnf.SQLite3Path, err)
|
||||
}
|
||||
|
||||
return &DBClient{
|
||||
CveDB: cveDriver,
|
||||
OvalDB: ovaldb,
|
||||
GostDB: gostdb,
|
||||
ExploitDB: exploitdb,
|
||||
MetasploitDB: metasploitdb,
|
||||
}, false, nil
|
||||
}
|
||||
|
||||
// NewCveDB returns cve db client
|
||||
func NewCveDB(cnf DBClientConf) (driver cvedb.DB, locked bool, err error) {
|
||||
if config.Conf.CveDict.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
util.Log.Debugf("open cve-dictionary db (%s)", cnf.CveDictCnf.Type)
|
||||
path := cnf.CveDictCnf.URL
|
||||
if cnf.CveDictCnf.Type == "sqlite3" {
|
||||
path = cnf.CveDictCnf.SQLite3Path
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
util.Log.Warnf("--cvedb-path=%s file not found. [CPE-scan](https://vuls.io/docs/en/usage-scan-non-os-packages.html#cpe-scan) needs cve-dictionary. if you specify cpe in config.toml, fetch cve-dictionary before reporting. For details, see `https://github.com/kotakanbe/go-cve-dictionary#deploy-go-cve-dictionary`", path)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Open cve-dictionary db (%s): %s", cnf.CveDictCnf.Type, path)
|
||||
driver, locked, err = cvedb.NewDB(cnf.CveDictCnf.Type, path, cnf.DebugSQL)
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("Failed to init CVE DB. err: %w, path: %s", err, path)
|
||||
return nil, locked, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
// NewOvalDB returns oval db client
|
||||
func NewOvalDB(cnf DBClientConf) (driver ovaldb.DB, locked bool, err error) {
|
||||
if config.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.OvalDictCnf.URL
|
||||
if cnf.OvalDictCnf.Type == "sqlite3" {
|
||||
path = cnf.OvalDictCnf.SQLite3Path
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
util.Log.Warnf("--ovaldb-path=%s file not found", path)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Open oval-dictionary db (%s): %s", cnf.OvalDictCnf.Type, path)
|
||||
driver, locked, err = ovaldb.NewDB("", cnf.OvalDictCnf.Type, path, cnf.DebugSQL)
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("Failed to new OVAL DB. err: %w", err)
|
||||
if locked {
|
||||
return nil, true, err
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
// NewGostDB returns db client for Gost
|
||||
func NewGostDB(cnf DBClientConf) (driver gostdb.DB, locked bool, err error) {
|
||||
if config.Conf.Gost.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.GostCnf.URL
|
||||
if cnf.GostCnf.Type == "sqlite3" {
|
||||
path = cnf.GostCnf.SQLite3Path
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
util.Log.Warnf("--gostdb-path=%s file not found. Vuls can detect `patch-not-released-CVE-ID` using gost if the scan target server is Debian, RHEL or CentOS, For details, see `https://github.com/knqyf263/gost#fetch-redhat`", path)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Open gost db (%s): %s", cnf.GostCnf.Type, path)
|
||||
if driver, locked, err = gostdb.NewDB(cnf.GostCnf.Type, path, cnf.DebugSQL); err != nil {
|
||||
if locked {
|
||||
util.Log.Errorf("gostDB is locked. err: %+v", err)
|
||||
return nil, true, err
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
// NewExploitDB returns db client for Exploit
|
||||
func NewExploitDB(cnf DBClientConf) (driver exploitdb.DB, locked bool, err error) {
|
||||
if config.Conf.Exploit.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.ExploitCnf.URL
|
||||
if cnf.ExploitCnf.Type == "sqlite3" {
|
||||
path = cnf.ExploitCnf.SQLite3Path
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
util.Log.Warnf("--exploitdb-path=%s file not found. Fetch go-exploit-db before reporting if you want to display exploit codes of detected CVE-IDs. For details, see `https://github.com/mozqnet/go-exploitdb`", path)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Open exploit db (%s): %s", cnf.ExploitCnf.Type, path)
|
||||
if driver, locked, err = exploitdb.NewDB(cnf.ExploitCnf.Type, path, cnf.DebugSQL); err != nil {
|
||||
if locked {
|
||||
util.Log.Errorf("exploitDB is locked. err: %+v", err)
|
||||
return nil, true, err
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
// NewMetasploitDB returns db client for Metasploit
|
||||
func NewMetasploitDB(cnf DBClientConf) (driver metasploitdb.DB, locked bool, err error) {
|
||||
if config.Conf.Metasploit.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
}
|
||||
path := cnf.MetasploitCnf.URL
|
||||
if cnf.MetasploitCnf.Type == "sqlite3" {
|
||||
path = cnf.MetasploitCnf.SQLite3Path
|
||||
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) {
|
||||
util.Log.Warnf("--msfdb-path=%s file not found. Fetch go-msfdb before reporting if you want to display metasploit modules of detected CVE-IDs. For details, see `https://github.com/takuzoo3868/go-msfdb`", path)
|
||||
return nil, false, nil
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Open metasploit db (%s): %s", cnf.MetasploitCnf.Type, path)
|
||||
if driver, locked, err = metasploitdb.NewDB(cnf.MetasploitCnf.Type, path, cnf.DebugSQL, false); err != nil {
|
||||
if locked {
|
||||
util.Log.Errorf("metasploitDB is locked. err: %+v", err)
|
||||
return nil, true, err
|
||||
}
|
||||
return nil, false, err
|
||||
}
|
||||
return driver, false, nil
|
||||
}
|
||||
|
||||
// CloseDB close dbs
|
||||
func (d DBClient) CloseDB() {
|
||||
if d.CveDB != nil {
|
||||
if err := d.CveDB.CloseDB(); err != nil {
|
||||
util.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}
|
||||
if d.OvalDB != nil {
|
||||
if err := d.OvalDB.CloseDB(); err != nil {
|
||||
util.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
581
report/report.go
581
report/report.go
@@ -1,581 +0,0 @@
|
||||
// +build !scanner
|
||||
|
||||
package report
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/libmanager"
|
||||
gostdb "github.com/knqyf263/gost/db"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/contrib/owasp-dependency-check/parser"
|
||||
"github.com/future-architect/vuls/cwe"
|
||||
"github.com/future-architect/vuls/exploit"
|
||||
"github.com/future-architect/vuls/github"
|
||||
"github.com/future-architect/vuls/gost"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/msf"
|
||||
"github.com/future-architect/vuls/oval"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/future-architect/vuls/wordpress"
|
||||
cvedb "github.com/kotakanbe/go-cve-dictionary/db"
|
||||
cvemodels "github.com/kotakanbe/go-cve-dictionary/models"
|
||||
ovaldb "github.com/kotakanbe/goval-dictionary/db"
|
||||
exploitdb "github.com/mozqnet/go-exploitdb/db"
|
||||
metasploitdb "github.com/takuzoo3868/go-msfdb/db"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// FillCveInfos fills CVE Detailed Information
|
||||
func FillCveInfos(dbclient DBClient, rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
|
||||
// Use the same reportedAt for all rs
|
||||
reportedAt := time.Now()
|
||||
for i, r := range rs {
|
||||
if !c.Conf.RefreshCve && !needToRefreshCve(r) {
|
||||
util.Log.Info("No need to refresh")
|
||||
continue
|
||||
}
|
||||
|
||||
if !reuseScannedCves(&r) {
|
||||
r.ScannedCves = models.VulnInfos{}
|
||||
}
|
||||
|
||||
cpeURIs := []string{}
|
||||
if len(r.Container.ContainerID) == 0 {
|
||||
cpeURIs = c.Conf.Servers[r.ServerName].CpeNames
|
||||
owaspDCXMLPath := c.Conf.Servers[r.ServerName].OwaspDCXMLPath
|
||||
if owaspDCXMLPath != "" {
|
||||
cpes, err := parser.Parse(owaspDCXMLPath)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read OWASP Dependency Check XML on %s, `%s`, err: %w",
|
||||
r.ServerName, owaspDCXMLPath, err)
|
||||
}
|
||||
cpeURIs = append(cpeURIs, cpes...)
|
||||
}
|
||||
} else {
|
||||
// runningContainer
|
||||
if s, ok := c.Conf.Servers[r.ServerName]; ok {
|
||||
if con, ok := s.Containers[r.Container.Name]; ok {
|
||||
cpeURIs = con.Cpes
|
||||
owaspDCXMLPath := con.OwaspDCXMLPath
|
||||
if owaspDCXMLPath != "" {
|
||||
cpes, err := parser.Parse(owaspDCXMLPath)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read OWASP Dependency Check XML on %s, `%s`, err: %w",
|
||||
r.ServerInfo(), owaspDCXMLPath, err)
|
||||
}
|
||||
cpeURIs = append(cpeURIs, cpes...)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := libmanager.DetectLibsCves(&r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with Library dependency: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectPkgCves(dbclient, &r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect Pkg CVE: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectCpeURIsCves(dbclient.CveDB, &r, cpeURIs); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect CVE of `%s`: %w", cpeURIs, err)
|
||||
}
|
||||
|
||||
if err := DetectGitHubCves(&r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect GitHub Cves: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectWordPressCves(&r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect WordPress Cves: %w", err)
|
||||
}
|
||||
|
||||
if err := FillCveInfo(dbclient, &r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r.ReportedBy, _ = os.Hostname()
|
||||
r.Lang = c.Conf.Lang
|
||||
r.ReportedAt = reportedAt
|
||||
r.ReportedVersion = c.Version
|
||||
r.ReportedRevision = c.Revision
|
||||
r.Config.Report = c.Conf
|
||||
r.Config.Report.Servers = map[string]c.ServerInfo{
|
||||
r.ServerName: c.Conf.Servers[r.ServerName],
|
||||
}
|
||||
rs[i] = r
|
||||
}
|
||||
|
||||
// Overwrite the json file every time to clear the fields specified in config.IgnoredJSONKeys
|
||||
for _, r := range rs {
|
||||
if s, ok := c.Conf.Servers[r.ServerName]; ok {
|
||||
r = r.ClearFields(s.IgnoredJSONKeys)
|
||||
}
|
||||
if err := overwriteJSONFile(dir, r); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to write JSON: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.Diff {
|
||||
prevs, err := loadPrevious(rs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
diff, err := diff(rs, prevs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, r := range diff {
|
||||
if err := fillCvesWithNvdJvn(dbclient.CveDB, &r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rs[i] = r
|
||||
}
|
||||
}
|
||||
|
||||
for i, r := range rs {
|
||||
r = r.FilterByCvssOver(c.Conf.CvssScoreOver)
|
||||
r = r.FilterIgnoreCves()
|
||||
r = r.FilterUnfixed()
|
||||
r = r.FilterIgnorePkgs()
|
||||
r = r.FilterInactiveWordPressLibs()
|
||||
if c.Conf.IgnoreUnscoredCves {
|
||||
r.ScannedCves = r.ScannedCves.FindScoredVulns()
|
||||
}
|
||||
rs[i] = r
|
||||
}
|
||||
return rs, nil
|
||||
}
|
||||
|
||||
// DetectPkgCves detects OS pkg cves
|
||||
func DetectPkgCves(dbclient DBClient, r *models.ScanResult) error {
|
||||
// Pkg Scan
|
||||
if r.Release != "" {
|
||||
// OVAL
|
||||
if err := detectPkgsCvesWithOval(dbclient.OvalDB, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with OVAL: %w", err)
|
||||
}
|
||||
|
||||
// gost
|
||||
if err := detectPkgsCvesWithGost(dbclient.GostDB, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with gost: %w", err)
|
||||
}
|
||||
} else if reuseScannedCves(r) {
|
||||
util.Log.Infof("r.Release is empty. Use CVEs as it as.")
|
||||
} else if r.Family == config.ServerTypePseudo {
|
||||
util.Log.Infof("pseudo type. Skip OVAL and gost detection")
|
||||
} else {
|
||||
return xerrors.Errorf("Failed to fill CVEs. r.Release is empty")
|
||||
}
|
||||
|
||||
for i, v := range r.ScannedCves {
|
||||
for j, p := range v.AffectedPackages {
|
||||
if p.NotFixedYet && p.FixState == "" {
|
||||
p.FixState = "Not fixed yet"
|
||||
r.ScannedCves[i].AffectedPackages[j] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// To keep backward compatibility
|
||||
// Newer versions use ListenPortStats,
|
||||
// but older versions of Vuls are set to ListenPorts.
|
||||
// Set ListenPorts to ListenPortStats to allow newer Vuls to report old results.
|
||||
for i, pkg := range r.Packages {
|
||||
for j, proc := range pkg.AffectedProcs {
|
||||
for _, ipPort := range proc.ListenPorts {
|
||||
ps, err := models.NewPortStat(ipPort)
|
||||
if err != nil {
|
||||
util.Log.Warnf("Failed to parse ip:port: %s, err:%+v", ipPort, err)
|
||||
continue
|
||||
}
|
||||
r.Packages[i].AffectedProcs[j].ListenPortStats = append(
|
||||
r.Packages[i].AffectedProcs[j].ListenPortStats, *ps)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectGitHubCves fetches CVEs from GitHub Security Alerts
|
||||
func DetectGitHubCves(r *models.ScanResult) error {
|
||||
repos := c.Conf.Servers[r.ServerName].GitHubRepos
|
||||
if len(repos) == 0 {
|
||||
return nil
|
||||
}
|
||||
githubInts := GithubSecurityAlerts(repos)
|
||||
|
||||
ints := &integrationResults{}
|
||||
for _, o := range []Integration{githubInts} {
|
||||
if err := o.apply(r, ints); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with integration: %w", err)
|
||||
}
|
||||
}
|
||||
util.Log.Infof("%s: %d CVEs are detected with GitHub Security Alerts",
|
||||
r.FormatServerName(), ints.GithubAlertsCveCounts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// DetectWordPressCves detects CVEs of WordPress
|
||||
func DetectWordPressCves(r *models.ScanResult) error {
|
||||
token := c.Conf.Servers[r.ServerName].WordPress.WPVulnDBToken
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
wpVulnCaches := map[string]string{}
|
||||
wpOpt := WordPressOption{
|
||||
token,
|
||||
&wpVulnCaches,
|
||||
}
|
||||
|
||||
ints := &integrationResults{}
|
||||
for _, o := range []Integration{wpOpt} {
|
||||
if err := o.apply(r, ints); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with integration: %w", err)
|
||||
}
|
||||
}
|
||||
util.Log.Infof("%s: %d CVEs are detected with wpscan API",
|
||||
r.FormatServerName(), ints.WordPressCveCounts)
|
||||
return nil
|
||||
}
|
||||
|
||||
// FillCveInfo fill scanResult with cve info.
|
||||
func FillCveInfo(dbclient DBClient, r *models.ScanResult) error {
|
||||
util.Log.Infof("Fill CVE detailed with gost")
|
||||
if err := gost.NewClient(r.Family).FillCVEsWithRedHat(dbclient.GostDB, r); err != nil {
|
||||
return xerrors.Errorf("Failed to fill with gost: %w", err)
|
||||
}
|
||||
|
||||
util.Log.Infof("Fill CVE detailed with CVE-DB")
|
||||
if err := fillCvesWithNvdJvn(dbclient.CveDB, r); err != nil {
|
||||
return xerrors.Errorf("Failed to fill with CVE: %w", err)
|
||||
}
|
||||
|
||||
util.Log.Infof("Fill exploit with Exploit-DB")
|
||||
nExploitCve, err := fillWithExploitDB(dbclient.ExploitDB, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to fill with exploit: %w", err)
|
||||
}
|
||||
util.Log.Infof("%s: %d exploits are detected",
|
||||
r.FormatServerName(), nExploitCve)
|
||||
|
||||
util.Log.Infof("Fill metasploit module with Metasploit-DB")
|
||||
nMetasploitCve, err := fillWithMetasploit(dbclient.MetasploitDB, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to fill with metasploit: %w", err)
|
||||
}
|
||||
util.Log.Infof("%s: %d modules are detected",
|
||||
r.FormatServerName(), nMetasploitCve)
|
||||
|
||||
util.Log.Infof("Fill CWE with NVD")
|
||||
fillCweDict(r)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// fillCvesWithNvdJvn fills CVE detail with NVD, JVN
|
||||
func fillCvesWithNvdJvn(driver cvedb.DB, r *models.ScanResult) error {
|
||||
cveIDs := []string{}
|
||||
for _, v := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, v.CveID)
|
||||
}
|
||||
|
||||
ds, err := CveClient.FetchCveDetails(driver, cveIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, d := range ds {
|
||||
nvd, exploits, mitigations := models.ConvertNvdJSONToModel(d.CveID, d.NvdJSON)
|
||||
jvn := models.ConvertJvnToModel(d.CveID, d.Jvn)
|
||||
|
||||
alerts := fillCertAlerts(&d)
|
||||
for cveID, vinfo := range r.ScannedCves {
|
||||
if vinfo.CveID == d.CveID {
|
||||
if vinfo.CveContents == nil {
|
||||
vinfo.CveContents = models.CveContents{}
|
||||
}
|
||||
for _, con := range []*models.CveContent{nvd, jvn} {
|
||||
if con != nil && !con.Empty() {
|
||||
vinfo.CveContents[con.Type] = *con
|
||||
}
|
||||
}
|
||||
vinfo.AlertDict = alerts
|
||||
vinfo.Exploits = append(vinfo.Exploits, exploits...)
|
||||
vinfo.Mitigations = append(vinfo.Mitigations, mitigations...)
|
||||
r.ScannedCves[cveID] = vinfo
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func fillCertAlerts(cvedetail *cvemodels.CveDetail) (dict models.AlertDict) {
|
||||
if cvedetail.NvdJSON != nil {
|
||||
for _, cert := range cvedetail.NvdJSON.Certs {
|
||||
dict.En = append(dict.En, models.Alert{
|
||||
URL: cert.Link,
|
||||
Title: cert.Title,
|
||||
Team: "us",
|
||||
})
|
||||
}
|
||||
}
|
||||
if cvedetail.Jvn != nil {
|
||||
for _, cert := range cvedetail.Jvn.Certs {
|
||||
dict.Ja = append(dict.Ja, models.Alert{
|
||||
URL: cert.Link,
|
||||
Title: cert.Title,
|
||||
Team: "jp",
|
||||
})
|
||||
}
|
||||
}
|
||||
return dict
|
||||
}
|
||||
|
||||
// detectPkgsCvesWithOval fetches OVAL database
|
||||
func detectPkgsCvesWithOval(driver ovaldb.DB, r *models.ScanResult) error {
|
||||
var ovalClient oval.Client
|
||||
var ovalFamily string
|
||||
|
||||
switch r.Family {
|
||||
case c.Debian, c.Raspbian:
|
||||
ovalClient = oval.NewDebian()
|
||||
ovalFamily = c.Debian
|
||||
case c.Ubuntu:
|
||||
ovalClient = oval.NewUbuntu()
|
||||
ovalFamily = c.Ubuntu
|
||||
case c.RedHat:
|
||||
ovalClient = oval.NewRedhat()
|
||||
ovalFamily = c.RedHat
|
||||
case c.CentOS:
|
||||
ovalClient = oval.NewCentOS()
|
||||
//use RedHat's OVAL
|
||||
ovalFamily = c.RedHat
|
||||
case c.Oracle:
|
||||
ovalClient = oval.NewOracle()
|
||||
ovalFamily = c.Oracle
|
||||
case c.SUSEEnterpriseServer:
|
||||
// TODO other suse family
|
||||
ovalClient = oval.NewSUSE()
|
||||
ovalFamily = c.SUSEEnterpriseServer
|
||||
case c.Alpine:
|
||||
ovalClient = oval.NewAlpine()
|
||||
ovalFamily = c.Alpine
|
||||
case c.Amazon:
|
||||
ovalClient = oval.NewAmazon()
|
||||
ovalFamily = c.Amazon
|
||||
case c.FreeBSD, c.Windows:
|
||||
return nil
|
||||
case c.ServerTypePseudo:
|
||||
return nil
|
||||
default:
|
||||
if r.Family == "" {
|
||||
return xerrors.New("Probably an error occurred during scanning. Check the error message")
|
||||
}
|
||||
return xerrors.Errorf("OVAL for %s is not implemented yet", r.Family)
|
||||
}
|
||||
|
||||
if !c.Conf.OvalDict.IsFetchViaHTTP() {
|
||||
if driver == nil {
|
||||
return xerrors.Errorf("You have to fetch OVAL data for %s before reporting. For details, see `https://github.com/kotakanbe/goval-dictionary#usage`", r.Family)
|
||||
}
|
||||
if err := driver.NewOvalDB(ovalFamily); err != nil {
|
||||
return xerrors.Errorf("Failed to New Oval DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
util.Log.Debugf("Check whether oval fetched: %s %s", ovalFamily, r.Release)
|
||||
ok, err := ovalClient.CheckIfOvalFetched(driver, ovalFamily, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return xerrors.Errorf("OVAL entries of %s %s are not found. Fetch OVAL before reporting. For details, see `https://github.com/kotakanbe/goval-dictionary#usage`", ovalFamily, r.Release)
|
||||
}
|
||||
|
||||
_, err = ovalClient.CheckIfOvalFresh(driver, ovalFamily, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nCVEs, err := ovalClient.FillWithOval(driver, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
util.Log.Infof("%s: %d CVEs are detected with OVAL", r.FormatServerName(), nCVEs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectPkgsCvesWithGost(driver gostdb.DB, r *models.ScanResult) error {
|
||||
nCVEs, err := gost.NewClient(r.Family).DetectUnfixed(driver, r, true)
|
||||
|
||||
util.Log.Infof("%s: %d unfixed CVEs are detected with gost",
|
||||
r.FormatServerName(), nCVEs)
|
||||
return err
|
||||
}
|
||||
|
||||
// fillWithExploitDB fills Exploits with exploit dataabase
|
||||
// https://github.com/mozqnet/go-exploitdb
|
||||
func fillWithExploitDB(driver exploitdb.DB, r *models.ScanResult) (nExploitCve int, err error) {
|
||||
return exploit.FillWithExploit(driver, r)
|
||||
}
|
||||
|
||||
// fillWithMetasploit fills metasploit modules with metasploit database
|
||||
// https://github.com/takuzoo3868/go-msfdb
|
||||
func fillWithMetasploit(driver metasploitdb.DB, r *models.ScanResult) (nMetasploitCve int, err error) {
|
||||
return msf.FillWithMetasploit(driver, r)
|
||||
}
|
||||
|
||||
// DetectCpeURIsCves detects CVEs of given CPE-URIs
|
||||
func DetectCpeURIsCves(driver cvedb.DB, r *models.ScanResult, cpeURIs []string) error {
|
||||
nCVEs := 0
|
||||
if len(cpeURIs) != 0 && driver == nil && !config.Conf.CveDict.IsFetchViaHTTP() {
|
||||
return xerrors.Errorf("cpeURIs %s specified, but cve-dictionary DB not found. Fetch cve-dictionary before reporting. For details, see `https://github.com/kotakanbe/go-cve-dictionary#deploy-go-cve-dictionary`",
|
||||
cpeURIs)
|
||||
}
|
||||
|
||||
for _, name := range cpeURIs {
|
||||
details, err := CveClient.FetchCveDetailsByCpeName(driver, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, detail := range details {
|
||||
if val, ok := r.ScannedCves[detail.CveID]; ok {
|
||||
names := val.CpeURIs
|
||||
names = util.AppendIfMissing(names, name)
|
||||
val.CpeURIs = names
|
||||
val.Confidences.AppendIfMissing(models.CpeNameMatch)
|
||||
r.ScannedCves[detail.CveID] = val
|
||||
} else {
|
||||
v := models.VulnInfo{
|
||||
CveID: detail.CveID,
|
||||
CpeURIs: []string{name},
|
||||
Confidences: models.Confidences{models.CpeNameMatch},
|
||||
}
|
||||
r.ScannedCves[detail.CveID] = v
|
||||
nCVEs++
|
||||
}
|
||||
}
|
||||
}
|
||||
util.Log.Infof("%s: %d CVEs are detected with CPE", r.FormatServerName(), nCVEs)
|
||||
return nil
|
||||
}
|
||||
|
||||
type integrationResults struct {
|
||||
GithubAlertsCveCounts int
|
||||
WordPressCveCounts int
|
||||
}
|
||||
|
||||
// Integration is integration of vuls report
|
||||
type Integration interface {
|
||||
apply(*models.ScanResult, *integrationResults) error
|
||||
}
|
||||
|
||||
// GithubSecurityAlerts :
|
||||
func GithubSecurityAlerts(githubConfs map[string]config.GitHubConf) Integration {
|
||||
return GithubSecurityAlertOption{
|
||||
GithubConfs: githubConfs,
|
||||
}
|
||||
}
|
||||
|
||||
// GithubSecurityAlertOption :
|
||||
type GithubSecurityAlertOption struct {
|
||||
GithubConfs map[string]config.GitHubConf
|
||||
}
|
||||
|
||||
// https://help.github.com/articles/about-security-alerts-for-vulnerable-dependencies/
|
||||
func (g GithubSecurityAlertOption) apply(r *models.ScanResult, ints *integrationResults) (err error) {
|
||||
var nCVEs int
|
||||
for ownerRepo, setting := range g.GithubConfs {
|
||||
ss := strings.Split(ownerRepo, "/")
|
||||
owner, repo := ss[0], ss[1]
|
||||
n, err := github.FillGitHubSecurityAlerts(r, owner, repo, setting.Token)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to access GitHub Security Alerts: %w", err)
|
||||
}
|
||||
nCVEs += n
|
||||
}
|
||||
ints.GithubAlertsCveCounts = nCVEs
|
||||
return nil
|
||||
}
|
||||
|
||||
// WordPressOption :
|
||||
type WordPressOption struct {
|
||||
token string
|
||||
wpVulnCaches *map[string]string
|
||||
}
|
||||
|
||||
func (g WordPressOption) apply(r *models.ScanResult, ints *integrationResults) (err error) {
|
||||
if g.token == "" {
|
||||
return nil
|
||||
}
|
||||
n, err := wordpress.FillWordPress(r, g.token, g.wpVulnCaches)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to fetch from WPVulnDB. Check the WPVulnDBToken in config.toml. err: %w", err)
|
||||
}
|
||||
ints.WordPressCveCounts = n
|
||||
return nil
|
||||
}
|
||||
|
||||
func fillCweDict(r *models.ScanResult) {
|
||||
uniqCweIDMap := map[string]bool{}
|
||||
for _, vinfo := range r.ScannedCves {
|
||||
for _, cont := range vinfo.CveContents {
|
||||
for _, id := range cont.CweIDs {
|
||||
if strings.HasPrefix(id, "CWE-") {
|
||||
id = strings.TrimPrefix(id, "CWE-")
|
||||
uniqCweIDMap[id] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
dict := map[string]models.CweDictEntry{}
|
||||
for id := range uniqCweIDMap {
|
||||
entry := models.CweDictEntry{}
|
||||
if e, ok := cwe.CweDictEn[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
entry.En = &e
|
||||
} else {
|
||||
util.Log.Debugf("CWE-ID %s is not found in English CWE Dict", id)
|
||||
entry.En = &cwe.Cwe{CweID: id}
|
||||
}
|
||||
|
||||
if c.Conf.Lang == "ja" {
|
||||
if e, ok := cwe.CweDictJa[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
entry.Ja = &e
|
||||
} else {
|
||||
util.Log.Debugf("CWE-ID %s is not found in Japanese CWE Dict", id)
|
||||
entry.Ja = &cwe.Cwe{CweID: id}
|
||||
}
|
||||
}
|
||||
dict[id] = entry
|
||||
}
|
||||
r.CweDict = dict
|
||||
return
|
||||
}
|
||||
@@ -1,437 +0,0 @@
|
||||
package report
|
||||
|
||||
import (
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/k0kubun/pp"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
util.Log = util.NewCustomLogger(config.ServerInfo{})
|
||||
code := m.Run()
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
func TestIsCveInfoUpdated(t *testing.T) {
|
||||
f := "2006-01-02"
|
||||
old, _ := time.Parse(f, "2015-12-15")
|
||||
new, _ := time.Parse(f, "2015-12-16")
|
||||
|
||||
type In struct {
|
||||
cveID string
|
||||
cur models.ScanResult
|
||||
prev models.ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in In
|
||||
expected bool
|
||||
}{
|
||||
// NVD compare non-initialized times
|
||||
{
|
||||
in: In{
|
||||
cveID: "CVE-2017-0001",
|
||||
cur: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0001": {
|
||||
CveID: "CVE-2017-0001",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0001",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// JVN not updated
|
||||
{
|
||||
in: In{
|
||||
cveID: "CVE-2017-0002",
|
||||
cur: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Jvn,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0002": {
|
||||
CveID: "CVE-2017-0002",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Jvn,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
// OVAL updated
|
||||
{
|
||||
in: In{
|
||||
cveID: "CVE-2017-0003",
|
||||
cur: models.ScanResult{
|
||||
Family: "ubuntu",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: new,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
Family: "ubuntu",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0003": {
|
||||
CveID: "CVE-2017-0003",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
// OVAL newly detected
|
||||
{
|
||||
in: In{
|
||||
cveID: "CVE-2017-0004",
|
||||
cur: models.ScanResult{
|
||||
Family: "redhat",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2017-0004": {
|
||||
CveID: "CVE-2017-0004",
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
Family: "redhat",
|
||||
ScannedCves: models.VulnInfos{},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
actual := isCveInfoUpdated(tt.in.cveID, tt.in.prev, tt.in.cur)
|
||||
if actual != tt.expected {
|
||||
t.Errorf("[%d] actual: %t, expected: %t", i, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDiff(t *testing.T) {
|
||||
atCurrent, _ := time.Parse("2006-01-02", "2014-12-31")
|
||||
atPrevious, _ := time.Parse("2006-01-02", "2014-11-31")
|
||||
var tests = []struct {
|
||||
inCurrent models.ScanResults
|
||||
inPrevious models.ScanResults
|
||||
out models.ScanResult
|
||||
}{
|
||||
{
|
||||
inCurrent: models.ScanResults{
|
||||
{
|
||||
ScannedAt: atCurrent,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2012-6702": {
|
||||
CveID: "CVE-2012-6702",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "libexpat1"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
"CVE-2014-9761": {
|
||||
CveID: "CVE-2014-9761",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "libc-bin"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
},
|
||||
Packages: models.Packages{},
|
||||
Errors: []string{},
|
||||
Optional: map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
inPrevious: models.ScanResults{
|
||||
{
|
||||
ScannedAt: atPrevious,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2012-6702": {
|
||||
CveID: "CVE-2012-6702",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "libexpat1"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
"CVE-2014-9761": {
|
||||
CveID: "CVE-2014-9761",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "libc-bin"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
},
|
||||
Packages: models.Packages{},
|
||||
Errors: []string{},
|
||||
Optional: map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
out: models.ScanResult{
|
||||
ScannedAt: atCurrent,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
Packages: models.Packages{},
|
||||
ScannedCves: models.VulnInfos{},
|
||||
Errors: []string{},
|
||||
Optional: map[string]interface{}{},
|
||||
},
|
||||
},
|
||||
{
|
||||
inCurrent: models.ScanResults{
|
||||
{
|
||||
ScannedAt: atCurrent,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2016-6662": {
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "mysql-libs"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
},
|
||||
Packages: models.Packages{
|
||||
"mysql-libs": {
|
||||
Name: "mysql-libs",
|
||||
Version: "5.1.73",
|
||||
Release: "7.el6",
|
||||
NewVersion: "5.1.73",
|
||||
NewRelease: "8.el6_8",
|
||||
Repository: "",
|
||||
Changelog: models.Changelog{
|
||||
Contents: "",
|
||||
Method: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
inPrevious: models.ScanResults{
|
||||
{
|
||||
ScannedAt: atPrevious,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
ScannedCves: models.VulnInfos{},
|
||||
},
|
||||
},
|
||||
out: models.ScanResult{
|
||||
ScannedAt: atCurrent,
|
||||
ServerName: "u16",
|
||||
Family: "ubuntu",
|
||||
Release: "16.04",
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2016-6662": {
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{{Name: "mysql-libs"}},
|
||||
DistroAdvisories: []models.DistroAdvisory{},
|
||||
CpeURIs: []string{},
|
||||
},
|
||||
},
|
||||
Packages: models.Packages{
|
||||
"mysql-libs": {
|
||||
Name: "mysql-libs",
|
||||
Version: "5.1.73",
|
||||
Release: "7.el6",
|
||||
NewVersion: "5.1.73",
|
||||
NewRelease: "8.el6_8",
|
||||
Repository: "",
|
||||
Changelog: models.Changelog{
|
||||
Contents: "",
|
||||
Method: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
diff, _ := diff(tt.inCurrent, tt.inPrevious)
|
||||
for _, actual := range diff {
|
||||
if !reflect.DeepEqual(actual.ScannedCves, tt.out.ScannedCves) {
|
||||
h := pp.Sprint(actual.ScannedCves)
|
||||
x := pp.Sprint(tt.out.ScannedCves)
|
||||
t.Errorf("[%d] cves actual: \n %s \n expected: \n %s", i, h, x)
|
||||
}
|
||||
|
||||
for j := range tt.out.Packages {
|
||||
if !reflect.DeepEqual(tt.out.Packages[j], actual.Packages[j]) {
|
||||
h := pp.Sprint(tt.out.Packages[j])
|
||||
x := pp.Sprint(actual.Packages[j])
|
||||
t.Errorf("[%d] packages actual: \n %s \n expected: \n %s", i, x, h)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsCveFixed(t *testing.T) {
|
||||
type In struct {
|
||||
v models.VulnInfo
|
||||
prev models.ScanResult
|
||||
}
|
||||
var tests = []struct {
|
||||
in In
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
in: In{
|
||||
v: models.VulnInfo{
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{
|
||||
Name: "mysql-libs",
|
||||
NotFixedYet: false,
|
||||
},
|
||||
},
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2016-6662",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2016-6662": {
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{
|
||||
Name: "mysql-libs",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2016-6662",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
in: In{
|
||||
v: models.VulnInfo{
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{
|
||||
Name: "mysql-libs",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2016-6662",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
prev: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2016-6662": {
|
||||
CveID: "CVE-2016-6662",
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{
|
||||
Name: "mysql-libs",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
CveContents: models.NewCveContents(
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2016-6662",
|
||||
LastModified: time.Time{},
|
||||
},
|
||||
),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
actual := isCveFixed(tt.in.v, tt.in.prev)
|
||||
if actual != tt.expected {
|
||||
t.Errorf("[%d] actual: %t, expected: %t", i, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +1,28 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
storage "github.com/Azure/azure-sdk-for-go/storage"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// AzureBlobWriter writes results to AzureBlob
|
||||
type AzureBlobWriter struct{}
|
||||
type AzureBlobWriter struct {
|
||||
FormatJSON bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Gzip bool
|
||||
|
||||
config.AzureConf
|
||||
}
|
||||
|
||||
// Write results to Azure Blob storage
|
||||
func (w AzureBlobWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
@@ -23,58 +30,46 @@ func (w AzureBlobWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
cli, err := getBlobClient()
|
||||
cli, err := w.getBlobClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Conf.FormatOneLineText {
|
||||
if w.FormatOneLineText {
|
||||
timestr := rs[0].ScannedAt.Format(time.RFC3339)
|
||||
k := fmt.Sprintf(timestr + "/summary.txt")
|
||||
text := formatOneLineSummary(rs...)
|
||||
b := []byte(text)
|
||||
if err := createBlockBlob(cli, k, b); err != nil {
|
||||
if err := w.createBlockBlob(cli, k, b, w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range rs {
|
||||
key := r.ReportKeyName()
|
||||
if c.Conf.FormatJSON {
|
||||
if w.FormatJSON {
|
||||
k := key + ".json"
|
||||
var b []byte
|
||||
if b, err = json.Marshal(r); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to JSON: %w", err)
|
||||
}
|
||||
if err := createBlockBlob(cli, k, b); err != nil {
|
||||
if err := w.createBlockBlob(cli, k, b, w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatList {
|
||||
if w.FormatList {
|
||||
k := key + "_short.txt"
|
||||
b := []byte(formatList(r))
|
||||
if err := createBlockBlob(cli, k, b); err != nil {
|
||||
if err := w.createBlockBlob(cli, k, b, w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatFullText {
|
||||
if w.FormatFullText {
|
||||
k := key + "_full.txt"
|
||||
b := []byte(formatFullPlainText(r))
|
||||
if err := createBlockBlob(cli, k, b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatXML {
|
||||
k := key + ".xml"
|
||||
var b []byte
|
||||
if b, err = xml.Marshal(r); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to XML: %w", err)
|
||||
}
|
||||
allBytes := bytes.Join([][]byte{[]byte(xml.Header + vulsOpenTag), b, []byte(vulsCloseTag)}, []byte{})
|
||||
if err := createBlockBlob(cli, k, allBytes); err != nil {
|
||||
if err := w.createBlockBlob(cli, k, b, w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -82,9 +77,9 @@ func (w AzureBlobWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// CheckIfAzureContainerExists check the existence of Azure storage container
|
||||
func CheckIfAzureContainerExists() error {
|
||||
cli, err := getBlobClient()
|
||||
// Validate check the existence of Azure storage container
|
||||
func (w AzureBlobWriter) Validate() error {
|
||||
cli, err := w.getBlobClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -95,39 +90,39 @@ func CheckIfAzureContainerExists() error {
|
||||
|
||||
found := false
|
||||
for _, con := range r.Containers {
|
||||
if con.Name == c.Conf.Azure.ContainerName {
|
||||
if con.Name == w.ContainerName {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return xerrors.Errorf("Container not found. Container: %s", c.Conf.Azure.ContainerName)
|
||||
return xerrors.Errorf("Container not found. Container: %s", w.ContainerName)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getBlobClient() (storage.BlobStorageClient, error) {
|
||||
api, err := storage.NewBasicClient(c.Conf.Azure.AccountName, c.Conf.Azure.AccountKey)
|
||||
func (w AzureBlobWriter) getBlobClient() (storage.BlobStorageClient, error) {
|
||||
api, err := storage.NewBasicClient(w.AccountName, w.AccountKey)
|
||||
if err != nil {
|
||||
return storage.BlobStorageClient{}, err
|
||||
}
|
||||
return api.GetBlobService(), nil
|
||||
}
|
||||
|
||||
func createBlockBlob(cli storage.BlobStorageClient, k string, b []byte) error {
|
||||
func (w AzureBlobWriter) createBlockBlob(cli storage.BlobStorageClient, k string, b []byte, gzip bool) error {
|
||||
var err error
|
||||
if c.Conf.GZIP {
|
||||
if gzip {
|
||||
if b, err = gz(b); err != nil {
|
||||
return err
|
||||
}
|
||||
k += ".gz"
|
||||
}
|
||||
|
||||
ref := cli.GetContainerReference(c.Conf.Azure.ContainerName)
|
||||
ref := cli.GetContainerReference(w.ContainerName)
|
||||
blob := ref.GetBlobReference(k)
|
||||
if err := blob.CreateBlockBlobFromReader(bytes.NewReader(b), nil); err != nil {
|
||||
return xerrors.Errorf("Failed to upload data to %s/%s, err: %w",
|
||||
c.Conf.Azure.ContainerName, k, err)
|
||||
w.ContainerName, k, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,25 +1,30 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
)
|
||||
|
||||
// ChatWorkWriter send report to ChatWork
|
||||
type ChatWorkWriter struct{}
|
||||
type ChatWorkWriter struct {
|
||||
Cnf config.ChatWorkConf
|
||||
Proxy string
|
||||
}
|
||||
|
||||
func (w ChatWorkWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
conf := config.Conf.ChatWork
|
||||
|
||||
for _, r := range rs {
|
||||
serverInfo := fmt.Sprintf("%s", r.ServerInfo())
|
||||
if err = chatWorkpostMessage(conf.Room, conf.APIToken, serverInfo); err != nil {
|
||||
if err = w.chatWorkpostMessage(serverInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -35,9 +40,9 @@ func (w ChatWorkWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
vinfo.CveID,
|
||||
strconv.FormatFloat(maxCvss.Value.Score, 'f', 1, 64),
|
||||
severity,
|
||||
vinfo.Summaries(config.Conf.Lang, r.Family)[0].Value)
|
||||
vinfo.Summaries(r.Lang, r.Family)[0].Value)
|
||||
|
||||
if err = chatWorkpostMessage(conf.Room, conf.APIToken, message); err != nil {
|
||||
if err = w.chatWorkpostMessage(message); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -46,28 +51,26 @@ func (w ChatWorkWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func chatWorkpostMessage(room, token, message string) error {
|
||||
uri := fmt.Sprintf("https://api.chatwork.com/v2/rooms/%s/messages=%s", room, token)
|
||||
|
||||
payload := url.Values{
|
||||
"body": {message},
|
||||
}
|
||||
|
||||
reqs, err := http.NewRequest("POST", uri, strings.NewReader(payload.Encode()))
|
||||
|
||||
reqs.Header.Add("X-ChatWorkToken", token)
|
||||
reqs.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
func (w ChatWorkWriter) chatWorkpostMessage(message string) error {
|
||||
uri := fmt.Sprintf("https://api.chatwork.com/v2/rooms/%s/messages=%s", w.Cnf.Room, w.Cnf.APIToken)
|
||||
payload := url.Values{"body": {message}}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uri, strings.NewReader(payload.Encode()))
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client := &http.Client{}
|
||||
|
||||
resp, err := client.Do(reqs)
|
||||
req.Header.Add("X-ChatWorkToken", w.Cnf.APIToken)
|
||||
req.Header.Add("Content-Type", "application/x-www-form-urlencoded")
|
||||
client, err := util.GetHTTPClient(w.Proxy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
@@ -16,15 +16,19 @@ import (
|
||||
)
|
||||
|
||||
// EMailWriter send mail
|
||||
type EMailWriter struct{}
|
||||
type EMailWriter struct {
|
||||
FormatOneEMail bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Cnf config.SMTPConf
|
||||
}
|
||||
|
||||
func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
conf := config.Conf
|
||||
var message string
|
||||
sender := NewEMailSender()
|
||||
sender := NewEMailSender(w.Cnf)
|
||||
m := map[string]int{}
|
||||
for _, r := range rs {
|
||||
if conf.FormatOneEMail {
|
||||
if w.FormatOneEMail {
|
||||
message += formatFullPlainText(r) + "\r\n\r\n"
|
||||
mm := r.ScannedCves.CountGroupBySeverity()
|
||||
keys := []string{"High", "Medium", "Low", "Unknown"}
|
||||
@@ -35,19 +39,19 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
var subject string
|
||||
if len(r.Errors) != 0 {
|
||||
subject = fmt.Sprintf("%s%s An error occurred while scanning",
|
||||
conf.EMail.SubjectPrefix, r.ServerInfo())
|
||||
w.Cnf.SubjectPrefix, r.ServerInfo())
|
||||
} else {
|
||||
subject = fmt.Sprintf("%s%s %s",
|
||||
conf.EMail.SubjectPrefix,
|
||||
w.Cnf.SubjectPrefix,
|
||||
r.ServerInfo(),
|
||||
r.ScannedCves.FormatCveSummary())
|
||||
}
|
||||
if conf.FormatList {
|
||||
if w.FormatList {
|
||||
message = formatList(r)
|
||||
} else {
|
||||
message = formatFullPlainText(r)
|
||||
}
|
||||
if conf.FormatOneLineText {
|
||||
if w.FormatOneLineText {
|
||||
message = fmt.Sprintf("One Line Summary\r\n================\r\n%s", formatOneLineSummary(r))
|
||||
}
|
||||
if err := sender.Send(subject, message); err != nil {
|
||||
@@ -55,24 +59,20 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
var summary string
|
||||
if config.Conf.IgnoreUnscoredCves {
|
||||
summary = fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"], m["High"], m["Medium"], m["Low"])
|
||||
} else {
|
||||
summary = fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
}
|
||||
|
||||
summary := fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
|
||||
origmessage := message
|
||||
if conf.FormatOneEMail {
|
||||
if w.FormatOneEMail {
|
||||
message = fmt.Sprintf("One Line Summary\r\n================\r\n%s", formatOneLineSummary(rs...))
|
||||
if !conf.FormatOneLineText {
|
||||
if !w.FormatOneLineText {
|
||||
message += fmt.Sprintf("\r\n\r\n%s", origmessage)
|
||||
}
|
||||
|
||||
subject := fmt.Sprintf("%s %s",
|
||||
conf.EMail.SubjectPrefix, summary)
|
||||
w.Cnf.SubjectPrefix, summary)
|
||||
return sender.Send(subject, message)
|
||||
}
|
||||
return nil
|
||||
@@ -123,11 +123,11 @@ func (e *emailSender) sendMail(smtpServerAddr, message string) (err error) {
|
||||
if ok, param := c.Extension("AUTH"); ok {
|
||||
authList := strings.Split(param, " ")
|
||||
auth = e.newSaslClient(authList)
|
||||
if err = c.Auth(auth); err != nil {
|
||||
return xerrors.Errorf("Failed to authenticate: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err = c.Auth(auth); err != nil {
|
||||
return xerrors.Errorf("Failed to authenticate: %w", err)
|
||||
}
|
||||
if err = c.Mail(emailConf.From, nil); err != nil {
|
||||
return xerrors.Errorf("Failed to send Mail command: %w", err)
|
||||
}
|
||||
@@ -196,8 +196,8 @@ func (e *emailSender) Send(subject, body string) (err error) {
|
||||
}
|
||||
|
||||
// NewEMailSender creates emailSender
|
||||
func NewEMailSender() EMailSender {
|
||||
return &emailSender{config.Conf.EMail}
|
||||
func NewEMailSender(cnf config.SMTPConf) EMailSender {
|
||||
return &emailSender{cnf}
|
||||
}
|
||||
|
||||
func (e *emailSender) newSaslClient(authList []string) sasl.Client {
|
||||
@@ -1,17 +1,18 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// HTTPRequestWriter writes results to HTTP request
|
||||
type HTTPRequestWriter struct{}
|
||||
type HTTPRequestWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write sends results as HTTP response
|
||||
func (w HTTPRequestWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
@@ -20,7 +21,7 @@ func (w HTTPRequestWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if err := json.NewEncoder(b).Encode(r); err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = http.Post(c.Conf.HTTP.URL, "application/json; charset=utf-8", b)
|
||||
_, err = http.Post(w.Proxy, "application/json; charset=utf-8", b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1,28 +1,33 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// LocalFileWriter writes results to a local file.
|
||||
type LocalFileWriter struct {
|
||||
CurrentDir string
|
||||
CurrentDir string
|
||||
DiffPlus bool
|
||||
DiffMinus bool
|
||||
FormatJSON bool
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Gzip bool
|
||||
}
|
||||
|
||||
func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if c.Conf.FormatOneLineText {
|
||||
if w.FormatOneLineText {
|
||||
path := filepath.Join(w.CurrentDir, "summary.txt")
|
||||
text := formatOneLineSummary(rs...)
|
||||
if err := writeFile(path, []byte(text), 0600); err != nil {
|
||||
if err := w.writeFile(path, []byte(text), 0600); err != nil {
|
||||
return xerrors.Errorf(
|
||||
"Failed to write to file. path: %s, err: %w",
|
||||
path, err)
|
||||
@@ -30,77 +35,52 @@ func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
|
||||
for _, r := range rs {
|
||||
r.SortForJSONOutput()
|
||||
|
||||
path := filepath.Join(w.CurrentDir, r.ReportFileName())
|
||||
|
||||
if c.Conf.FormatJSON {
|
||||
var p string
|
||||
if c.Conf.Diff {
|
||||
if w.FormatJSON {
|
||||
p := path + ".json"
|
||||
if w.DiffPlus || w.DiffMinus {
|
||||
p = path + "_diff.json"
|
||||
} else {
|
||||
p = path + ".json"
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if b, err = json.MarshalIndent(r, "", " "); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to JSON: %w", err)
|
||||
}
|
||||
if err := writeFile(p, b, 0600); err != nil {
|
||||
if err := w.writeFile(p, b, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write JSON. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatList {
|
||||
var p string
|
||||
if c.Conf.Diff {
|
||||
if w.FormatList {
|
||||
p := path + "_short.txt"
|
||||
if w.DiffPlus || w.DiffMinus {
|
||||
p = path + "_short_diff.txt"
|
||||
} else {
|
||||
p = path + "_short.txt"
|
||||
}
|
||||
|
||||
if err := writeFile(
|
||||
if err := w.writeFile(
|
||||
p, []byte(formatList(r)), 0600); err != nil {
|
||||
return xerrors.Errorf(
|
||||
"Failed to write text files. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatFullText {
|
||||
var p string
|
||||
if c.Conf.Diff {
|
||||
if w.FormatFullText {
|
||||
p := path + "_full.txt"
|
||||
if w.DiffPlus || w.DiffMinus {
|
||||
p = path + "_full_diff.txt"
|
||||
} else {
|
||||
p = path + "_full.txt"
|
||||
}
|
||||
|
||||
if err := writeFile(
|
||||
if err := w.writeFile(
|
||||
p, []byte(formatFullPlainText(r)), 0600); err != nil {
|
||||
return xerrors.Errorf(
|
||||
"Failed to write text files. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatXML {
|
||||
var p string
|
||||
if c.Conf.Diff {
|
||||
p = path + "_diff.xml"
|
||||
} else {
|
||||
p = path + ".xml"
|
||||
}
|
||||
|
||||
var b []byte
|
||||
if b, err = xml.Marshal(r); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to XML: %w", err)
|
||||
}
|
||||
allBytes := bytes.Join([][]byte{[]byte(xml.Header + vulsOpenTag), b, []byte(vulsCloseTag)}, []byte{})
|
||||
if err := writeFile(p, allBytes, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write XML. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatCsvList {
|
||||
p := path + "_short.csv"
|
||||
if c.Conf.Diff {
|
||||
p = path + "_short_diff.csv"
|
||||
if w.FormatCsv {
|
||||
p := path + ".csv"
|
||||
if w.DiffPlus || w.DiffMinus {
|
||||
p = path + "_diff.csv"
|
||||
}
|
||||
if err := formatCsvList(r, p); err != nil {
|
||||
return xerrors.Errorf("Failed to write CSV: %s, %w", p, err)
|
||||
@@ -111,10 +91,10 @@ func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeFile(path string, data []byte, perm os.FileMode) error {
|
||||
var err error
|
||||
if c.Conf.GZIP {
|
||||
if data, err = gz(data); err != nil {
|
||||
func (w LocalFileWriter) writeFile(path string, data []byte, perm os.FileMode) (err error) {
|
||||
if w.Gzip {
|
||||
data, err = gz(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path += ".gz"
|
||||
@@ -1,9 +1,8 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"path"
|
||||
"time"
|
||||
@@ -16,23 +15,31 @@ import (
|
||||
"github.com/aws/aws-sdk-go/service/s3"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// S3Writer writes results to S3
|
||||
type S3Writer struct{}
|
||||
type S3Writer struct {
|
||||
FormatJSON bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Gzip bool
|
||||
|
||||
func getS3() (*s3.S3, error) {
|
||||
config.AWSConf
|
||||
}
|
||||
|
||||
func (w S3Writer) getS3() (*s3.S3, error) {
|
||||
ses, err := session.NewSession()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config := &aws.Config{
|
||||
Region: aws.String(c.Conf.AWS.Region),
|
||||
Region: aws.String(w.Region),
|
||||
Credentials: credentials.NewChainCredentials([]credentials.Provider{
|
||||
&credentials.EnvProvider{},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: c.Conf.AWS.Profile},
|
||||
&credentials.SharedCredentialsProvider{Filename: "", Profile: w.Profile},
|
||||
&ec2rolecreds.EC2RoleProvider{Client: ec2metadata.New(ses)},
|
||||
}),
|
||||
}
|
||||
@@ -50,57 +57,45 @@ func (w S3Writer) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
svc, err := getS3()
|
||||
svc, err := w.getS3()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if c.Conf.FormatOneLineText {
|
||||
if w.FormatOneLineText {
|
||||
timestr := rs[0].ScannedAt.Format(time.RFC3339)
|
||||
k := fmt.Sprintf(timestr + "/summary.txt")
|
||||
text := formatOneLineSummary(rs...)
|
||||
if err := putObject(svc, k, []byte(text)); err != nil {
|
||||
if err := w.putObject(svc, k, []byte(text), w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, r := range rs {
|
||||
key := r.ReportKeyName()
|
||||
if c.Conf.FormatJSON {
|
||||
if w.FormatJSON {
|
||||
k := key + ".json"
|
||||
var b []byte
|
||||
if b, err = json.Marshal(r); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to JSON: %w", err)
|
||||
}
|
||||
if err := putObject(svc, k, b); err != nil {
|
||||
if err := w.putObject(svc, k, b, w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatList {
|
||||
if w.FormatList {
|
||||
k := key + "_short.txt"
|
||||
text := formatList(r)
|
||||
if err := putObject(svc, k, []byte(text)); err != nil {
|
||||
if err := w.putObject(svc, k, []byte(text), w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatFullText {
|
||||
if w.FormatFullText {
|
||||
k := key + "_full.txt"
|
||||
text := formatFullPlainText(r)
|
||||
if err := putObject(svc, k, []byte(text)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatXML {
|
||||
k := key + ".xml"
|
||||
var b []byte
|
||||
if b, err = xml.Marshal(r); err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to XML: %w", err)
|
||||
}
|
||||
allBytes := bytes.Join([][]byte{[]byte(xml.Header + vulsOpenTag), b, []byte(vulsCloseTag)}, []byte{})
|
||||
if err := putObject(svc, k, allBytes); err != nil {
|
||||
if err := w.putObject(svc, k, []byte(text), w.Gzip); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -108,38 +103,36 @@ func (w S3Writer) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// CheckIfBucketExists check the existence of S3 bucket
|
||||
func CheckIfBucketExists() error {
|
||||
svc, err := getS3()
|
||||
// Validate check the existence of S3 bucket
|
||||
func (w S3Writer) Validate() error {
|
||||
svc, err := w.getS3()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
result, err := svc.ListBuckets(&s3.ListBucketsInput{})
|
||||
if err != nil {
|
||||
return xerrors.Errorf(
|
||||
"Failed to list buckets. err: %w, profile: %s, region: %s",
|
||||
err, c.Conf.AWS.Profile, c.Conf.AWS.Region)
|
||||
return xerrors.Errorf("Failed to list buckets. err: %w, profile: %s, region: %s",
|
||||
err, w.Profile, w.Region)
|
||||
}
|
||||
|
||||
found := false
|
||||
for _, bucket := range result.Buckets {
|
||||
if *bucket.Name == c.Conf.AWS.S3Bucket {
|
||||
if *bucket.Name == w.S3Bucket {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return xerrors.Errorf(
|
||||
"Failed to find the buckets. profile: %s, region: %s, bucket: %s",
|
||||
c.Conf.AWS.Profile, c.Conf.AWS.Region, c.Conf.AWS.S3Bucket)
|
||||
return xerrors.Errorf("Failed to find the buckets. profile: %s, region: %s, bucket: %s",
|
||||
w.Profile, w.Region, w.S3Bucket)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func putObject(svc *s3.S3, k string, b []byte) error {
|
||||
func (w S3Writer) putObject(svc *s3.S3, k string, b []byte, gzip bool) error {
|
||||
var err error
|
||||
if c.Conf.GZIP {
|
||||
if gzip {
|
||||
if b, err = gz(b); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -147,18 +140,18 @@ func putObject(svc *s3.S3, k string, b []byte) error {
|
||||
}
|
||||
|
||||
putObjectInput := &s3.PutObjectInput{
|
||||
Bucket: aws.String(c.Conf.AWS.S3Bucket),
|
||||
Key: aws.String(path.Join(c.Conf.AWS.S3ResultsDir, k)),
|
||||
Bucket: aws.String(w.S3Bucket),
|
||||
Key: aws.String(path.Join(w.S3ResultsDir, k)),
|
||||
Body: bytes.NewReader(b),
|
||||
}
|
||||
|
||||
if c.Conf.AWS.S3ServerSideEncryption != "" {
|
||||
putObjectInput.ServerSideEncryption = aws.String(c.Conf.AWS.S3ServerSideEncryption)
|
||||
if w.S3ServerSideEncryption != "" {
|
||||
putObjectInput.ServerSideEncryption = aws.String(w.S3ServerSideEncryption)
|
||||
}
|
||||
|
||||
if _, err := svc.PutObject(putObjectInput); err != nil {
|
||||
return xerrors.Errorf("Failed to upload data to %s/%s, err: %w",
|
||||
c.Conf.AWS.S3Bucket, k, err)
|
||||
w.S3Bucket, k, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
@@ -9,13 +9,22 @@ import (
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/nlopes/slack"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// SlackWriter send report to slack
|
||||
type SlackWriter struct {
|
||||
FormatOneLineText bool
|
||||
lang string
|
||||
osFamily string
|
||||
Cnf config.SlackConf
|
||||
Proxy string
|
||||
}
|
||||
|
||||
type message struct {
|
||||
Text string `json:"text"`
|
||||
Username string `json:"username"`
|
||||
@@ -24,15 +33,11 @@ type message struct {
|
||||
Attachments []slack.Attachment `json:"attachments"`
|
||||
}
|
||||
|
||||
// SlackWriter send report to slack
|
||||
type SlackWriter struct{}
|
||||
|
||||
func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
conf := config.Conf.Slack
|
||||
channel := conf.Channel
|
||||
token := conf.LegacyToken
|
||||
|
||||
channel := w.Cnf.Channel
|
||||
for _, r := range rs {
|
||||
w.lang, w.osFamily = r.Lang, r.Family
|
||||
if channel == "${servername}" {
|
||||
channel = fmt.Sprintf("#%s", r.ServerName)
|
||||
}
|
||||
@@ -42,7 +47,7 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
// https://api.slack.com/methods/chat.postMessage
|
||||
maxAttachments := 100
|
||||
m := map[int][]slack.Attachment{}
|
||||
for i, a := range toSlackAttachments(r) {
|
||||
for i, a := range w.toSlackAttachments(r) {
|
||||
m[i/maxAttachments] = append(m[i/maxAttachments], a)
|
||||
}
|
||||
chunkKeys := []int{}
|
||||
@@ -52,15 +57,15 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
sort.Ints(chunkKeys)
|
||||
|
||||
summary := fmt.Sprintf("%s\n%s",
|
||||
getNotifyUsers(config.Conf.Slack.NotifyUsers),
|
||||
w.getNotifyUsers(w.Cnf.NotifyUsers),
|
||||
formatOneLineSummary(r))
|
||||
|
||||
// Send slack by API
|
||||
if 0 < len(token) {
|
||||
api := slack.New(token)
|
||||
if 0 < len(w.Cnf.LegacyToken) {
|
||||
api := slack.New(w.Cnf.LegacyToken)
|
||||
msgPrms := slack.PostMessageParameters{
|
||||
Username: conf.AuthUser,
|
||||
IconEmoji: conf.IconEmoji,
|
||||
Username: w.Cnf.AuthUser,
|
||||
IconEmoji: w.Cnf.IconEmoji,
|
||||
}
|
||||
|
||||
var ts string
|
||||
@@ -72,14 +77,14 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
if config.Conf.FormatOneLineText || 0 < len(r.Errors) {
|
||||
if w.FormatOneLineText || 0 < len(r.Errors) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, k := range chunkKeys {
|
||||
params := slack.PostMessageParameters{
|
||||
Username: conf.AuthUser,
|
||||
IconEmoji: conf.IconEmoji,
|
||||
Username: w.Cnf.AuthUser,
|
||||
IconEmoji: w.Cnf.IconEmoji,
|
||||
ThreadTimestamp: ts,
|
||||
}
|
||||
if _, _, err = api.PostMessage(
|
||||
@@ -94,15 +99,15 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
} else {
|
||||
msg := message{
|
||||
Text: summary,
|
||||
Username: conf.AuthUser,
|
||||
IconEmoji: conf.IconEmoji,
|
||||
Username: w.Cnf.AuthUser,
|
||||
IconEmoji: w.Cnf.IconEmoji,
|
||||
Channel: channel,
|
||||
}
|
||||
if err := send(msg); err != nil {
|
||||
if err := w.send(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if config.Conf.FormatOneLineText || 0 < len(r.Errors) {
|
||||
if w.FormatOneLineText || 0 < len(r.Errors) {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -114,12 +119,12 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
|
||||
msg := message{
|
||||
Text: txt,
|
||||
Username: conf.AuthUser,
|
||||
IconEmoji: conf.IconEmoji,
|
||||
Username: w.Cnf.AuthUser,
|
||||
IconEmoji: w.Cnf.IconEmoji,
|
||||
Channel: channel,
|
||||
Attachments: m[k],
|
||||
}
|
||||
if err = send(msg); err != nil {
|
||||
if err = w.send(msg); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -128,29 +133,28 @@ func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func send(msg message) error {
|
||||
conf := config.Conf.Slack
|
||||
count, retryMax := 0, 10
|
||||
func (w SlackWriter) send(msg message) error {
|
||||
|
||||
count, retryMax := 0, 10
|
||||
bytes, _ := json.Marshal(msg)
|
||||
jsonBody := string(bytes)
|
||||
|
||||
f := func() (err error) {
|
||||
resp, body, errs := gorequest.New().Proxy(config.Conf.HTTPProxy).Post(conf.HookURL).Send(string(jsonBody)).End()
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Proxy(w.Proxy).Post(w.Cnf.HookURL).Send(string(jsonBody)).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf(
|
||||
"HTTP POST error. url: %s, resp: %v, body: %s, err: %w",
|
||||
conf.HookURL, resp, body, errs)
|
||||
"HTTP POST error. url: %s, resp: %v, body: %s, err: %+v",
|
||||
w.Cnf.HookURL, resp, body, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
log.Warnf("Error %s", err)
|
||||
log.Warn("Retrying in ", t)
|
||||
logging.Log.Warnf("Error %s", err)
|
||||
logging.Log.Warn("Retrying in ", t)
|
||||
}
|
||||
boff := backoff.NewExponentialBackOff()
|
||||
if err := backoff.RetryNotify(f, boff, notify); err != nil {
|
||||
@@ -162,7 +166,7 @@ func send(msg message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func toSlackAttachments(r models.ScanResult) (attaches []slack.Attachment) {
|
||||
func (w SlackWriter) toSlackAttachments(r models.ScanResult) (attaches []slack.Attachment) {
|
||||
vinfos := r.ScannedCves.ToSortedSlice()
|
||||
for _, vinfo := range vinfos {
|
||||
|
||||
@@ -206,9 +210,9 @@ func toSlackAttachments(r models.ScanResult) (attaches []slack.Attachment) {
|
||||
}
|
||||
|
||||
a := slack.Attachment{
|
||||
Title: vinfo.CveID,
|
||||
Title: vinfo.CveIDDiffFormat(),
|
||||
TitleLink: "https://nvd.nist.gov/vuln/detail/" + vinfo.CveID,
|
||||
Text: attachmentText(vinfo, r.Family, r.CweDict, r.Packages),
|
||||
Text: w.attachmentText(vinfo, r.CweDict, r.Packages),
|
||||
MarkdownIn: []string{"text", "pretext"},
|
||||
Fields: []slack.AttachmentField{
|
||||
{
|
||||
@@ -244,11 +248,11 @@ func cvssColor(cvssScore float64) string {
|
||||
}
|
||||
}
|
||||
|
||||
func attachmentText(vinfo models.VulnInfo, osFamily string, cweDict map[string]models.CweDictEntry, packs models.Packages) string {
|
||||
func (w SlackWriter) attachmentText(vinfo models.VulnInfo, cweDict map[string]models.CweDictEntry, packs models.Packages) string {
|
||||
maxCvss := vinfo.MaxCvssScore()
|
||||
vectors := []string{}
|
||||
|
||||
scores := append(vinfo.Cvss3Scores(), vinfo.Cvss2Scores(osFamily)...)
|
||||
scores := append(vinfo.Cvss3Scores(), vinfo.Cvss2Scores()...)
|
||||
for _, cvss := range scores {
|
||||
if cvss.Value.Severity == "" {
|
||||
continue
|
||||
@@ -277,7 +281,7 @@ func attachmentText(vinfo models.VulnInfo, osFamily string, cweDict map[string]m
|
||||
} else {
|
||||
if 0 < len(vinfo.DistroAdvisories) {
|
||||
links := []string{}
|
||||
for _, v := range vinfo.CveContents.PrimarySrcURLs(config.Conf.Lang, osFamily, vinfo.CveID) {
|
||||
for _, v := range vinfo.CveContents.PrimarySrcURLs(w.lang, w.osFamily, vinfo.CveID) {
|
||||
links = append(links, fmt.Sprintf("<%s|%s>", v.Value, v.Type))
|
||||
}
|
||||
|
||||
@@ -312,16 +316,16 @@ func attachmentText(vinfo models.VulnInfo, osFamily string, cweDict map[string]m
|
||||
nwvec,
|
||||
vinfo.PatchStatus(packs),
|
||||
strings.Join(vectors, "\n"),
|
||||
vinfo.Summaries(config.Conf.Lang, osFamily)[0].Value,
|
||||
vinfo.Summaries(w.lang, w.osFamily)[0].Value,
|
||||
mitigation,
|
||||
cweIDs(vinfo, osFamily, cweDict),
|
||||
w.cweIDs(vinfo, w.osFamily, cweDict),
|
||||
)
|
||||
}
|
||||
|
||||
func cweIDs(vinfo models.VulnInfo, osFamily string, cweDict models.CweDict) string {
|
||||
func (w SlackWriter) cweIDs(vinfo models.VulnInfo, osFamily string, cweDict models.CweDict) string {
|
||||
links := []string{}
|
||||
for _, c := range vinfo.CveContents.UniqCweIDs(osFamily) {
|
||||
name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL := cweDict.Get(c.Value, osFamily)
|
||||
name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL := cweDict.Get(c.Value, w.lang)
|
||||
line := ""
|
||||
if top10Rank != "" {
|
||||
line = fmt.Sprintf("<%s|[OWASP Top %s]>",
|
||||
@@ -344,7 +348,7 @@ func cweIDs(vinfo models.VulnInfo, osFamily string, cweDict models.CweDict) stri
|
||||
}
|
||||
|
||||
// See testcase
|
||||
func getNotifyUsers(notifyUsers []string) string {
|
||||
func (w SlackWriter) getNotifyUsers(notifyUsers []string) string {
|
||||
slackStyleTexts := []string{}
|
||||
for _, username := range notifyUsers {
|
||||
slackStyleTexts = append(slackStyleTexts, fmt.Sprintf("<%s>", username))
|
||||
@@ -1,4 +1,4 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import "testing"
|
||||
|
||||
@@ -14,7 +14,7 @@ func TestGetNotifyUsers(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
actual := getNotifyUsers(tt.in)
|
||||
actual := SlackWriter{}.getNotifyUsers(tt.in)
|
||||
if tt.expected != actual {
|
||||
t.Errorf("expected %s, actual %s", tt.expected, actual)
|
||||
}
|
||||
@@ -1,14 +1,20 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
c "github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// StdoutWriter write to stdout
|
||||
type StdoutWriter struct{}
|
||||
type StdoutWriter struct {
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
}
|
||||
|
||||
//TODO support -format-jSON
|
||||
|
||||
// WriteScanSummary prints Scan summary at the end of scan
|
||||
func (w StdoutWriter) WriteScanSummary(rs ...models.ScanResult) {
|
||||
@@ -19,7 +25,7 @@ func (w StdoutWriter) WriteScanSummary(rs ...models.ScanResult) {
|
||||
}
|
||||
|
||||
func (w StdoutWriter) Write(rs ...models.ScanResult) error {
|
||||
if c.Conf.FormatOneLineText {
|
||||
if w.FormatOneLineText {
|
||||
fmt.Print("\n\n")
|
||||
fmt.Println("One Line Summary")
|
||||
fmt.Println("================")
|
||||
@@ -27,13 +33,13 @@ func (w StdoutWriter) Write(rs ...models.ScanResult) error {
|
||||
fmt.Print("\n")
|
||||
}
|
||||
|
||||
if c.Conf.FormatList || c.Conf.FormatCsvList {
|
||||
if w.FormatList || w.FormatCsv {
|
||||
for _, r := range rs {
|
||||
fmt.Println(formatList(r))
|
||||
}
|
||||
}
|
||||
|
||||
if c.Conf.FormatFullText {
|
||||
if w.FormatFullText {
|
||||
for _, r := range rs {
|
||||
fmt.Println(formatFullPlainText(r))
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log/syslog"
|
||||
"strings"
|
||||
|
||||
syslog "github.com/RackSec/srslog"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
@@ -12,15 +12,16 @@ import (
|
||||
)
|
||||
|
||||
// SyslogWriter send report to syslog
|
||||
type SyslogWriter struct{}
|
||||
type SyslogWriter struct {
|
||||
Cnf config.SyslogConf
|
||||
}
|
||||
|
||||
func (w SyslogWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
conf := config.Conf.Syslog
|
||||
facility, _ := conf.GetFacility()
|
||||
severity, _ := conf.GetSeverity()
|
||||
raddr := fmt.Sprintf("%s:%s", conf.Host, conf.Port)
|
||||
facility, _ := w.Cnf.GetFacility()
|
||||
severity, _ := w.Cnf.GetSeverity()
|
||||
raddr := fmt.Sprintf("%s:%s", w.Cnf.Host, w.Cnf.Port)
|
||||
|
||||
sysLog, err := syslog.Dial(conf.Protocol, raddr, severity|facility, conf.Tag)
|
||||
sysLog, err := syslog.Dial(w.Cnf.Protocol, raddr, severity|facility, w.Cnf.Tag)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to initialize syslog client: %w", err)
|
||||
}
|
||||
@@ -59,7 +60,7 @@ func (w SyslogWriter) encodeSyslog(result models.ScanResult) (messages []string)
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`packages="%s"`, pkgs))
|
||||
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`cve_id="%s"`, cveID))
|
||||
for _, cvss := range vinfo.Cvss2Scores(result.Family) {
|
||||
for _, cvss := range vinfo.Cvss2Scores() {
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`cvss_score_%s_v2="%.2f"`, cvss.Type, cvss.Value.Score))
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`cvss_vector_%s_v2="%s"`, cvss.Type, cvss.Value.Vector))
|
||||
}
|
||||
@@ -72,7 +73,7 @@ func (w SyslogWriter) encodeSyslog(result models.ScanResult) (messages []string)
|
||||
if content, ok := vinfo.CveContents[models.Nvd]; ok {
|
||||
cwes := strings.Join(content.CweIDs, ",")
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`cwe_ids="%s"`, cwes))
|
||||
if config.Conf.Syslog.Verbose {
|
||||
if w.Cnf.Verbose {
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`source_link="%s"`, content.SourceLink))
|
||||
kvPairs = append(kvPairs, fmt.Sprintf(`summary="%s"`, content.Summary))
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package report
|
||||
package reporter
|
||||
|
||||
import (
|
||||
"sort"
|
||||
@@ -51,6 +51,7 @@ func TestSyslogWriterEncodeSyslog(t *testing.T) {
|
||||
`scanned_at="2018-06-13 16:10:00 +0000 UTC" server_name="teste01" os_family="ubuntu" os_release="16.04" ipv4_addr="192.168.0.1,10.0.2.15" ipv6_addr="" packages="pkg3,pkg4" cve_id="CVE-2017-0002" cvss_score_nvd_v2="5.00" cvss_vector_nvd_v2="AV:L/AC:L/Au:N/C:N/I:N/A:C" cvss_score_nvd_v3="9.80" cvss_vector_nvd_v3="AV:L/AC:L/Au:N/C:N/I:N/A:C" cwe_ids="CWE-20"`,
|
||||
},
|
||||
},
|
||||
// 1
|
||||
{
|
||||
result: models.ScanResult{
|
||||
ScannedAt: time.Date(2018, 6, 13, 17, 10, 0, 0, time.UTC),
|
||||
@@ -65,10 +66,11 @@ func TestSyslogWriterEncodeSyslog(t *testing.T) {
|
||||
},
|
||||
CveContents: models.CveContents{
|
||||
models.RedHat: models.CveContent{
|
||||
Cvss3Score: 5.0,
|
||||
Cvss3Vector: "AV:L/AC:L/Au:N/C:N/I:N/A:C",
|
||||
CweIDs: []string{"CWE-284"},
|
||||
Title: "RHSA-2017:0001: pkg5 security update (Important)",
|
||||
Cvss3Score: 5.0,
|
||||
Cvss3Severity: "Medium",
|
||||
Cvss3Vector: "AV:L/AC:L/Au:N/C:N/I:N/A:C",
|
||||
CweIDs: []string{"CWE-284"},
|
||||
Title: "RHSA-2017:0001: pkg5 security update (Important)",
|
||||
},
|
||||
},
|
||||
},
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user