Compare commits
90 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a528362663 | ||
|
|
ee97d98c39 | ||
|
|
4e486dae1d | ||
|
|
897fef24a3 | ||
|
|
73f0adad95 | ||
|
|
704492963c | ||
|
|
1927ed344c | ||
|
|
ad2edbb844 | ||
|
|
bfe0db77b4 | ||
|
|
ff3b9cdc16 | ||
|
|
2deb1b9d32 | ||
|
|
ca64d7fc31 | ||
|
|
554ecc437e | ||
|
|
f6cd4d9223 | ||
|
|
03c59866d4 | ||
|
|
1d97e91341 | ||
|
|
96333f38c9 | ||
|
|
8b5d1c8e92 | ||
|
|
dea80f860c | ||
|
|
6eb4c5a5fe | ||
|
|
b219a8495e | ||
|
|
eb87d5d4e1 | ||
|
|
6963442a5e | ||
|
|
f7299b9dba | ||
|
|
379fc8a1a1 | ||
|
|
947fbbb29e | ||
|
|
06d2032c9c | ||
|
|
d055c48827 | ||
|
|
2a00339da1 | ||
|
|
2d959b3af8 | ||
|
|
595e26db41 | ||
|
|
1e457320c5 | ||
|
|
a06e689502 | ||
|
|
ca3f6b1dbf | ||
|
|
f1c78e42a2 | ||
|
|
2f3b8bf3cc | ||
|
|
ab54266f9e | ||
|
|
d79d138440 | ||
|
|
139f3a81b6 | ||
|
|
d1a617cfff | ||
|
|
48f7597bcf | ||
|
|
93731311a1 | ||
|
|
999529a05b | ||
|
|
847d820af7 | ||
|
|
5234306ded | ||
|
|
86b60e1478 | ||
|
|
42fdc08933 | ||
|
|
38b1d622f6 | ||
|
|
2477f9a8f8 | ||
|
|
ec6e90acd3 | ||
|
|
2aca2e4352 | ||
|
|
14518d925e | ||
|
|
948f8c0751 | ||
|
|
1c1e40058e | ||
|
|
2158fc6cb1 | ||
|
|
91ed318c5d | ||
|
|
bfc3828ce1 | ||
|
|
c7eac4e7fe | ||
|
|
cc63a0eccf | ||
|
|
fd18df1dd4 | ||
|
|
8775b5efdf | ||
|
|
a9f29a6c5d | ||
|
|
05fdde48f9 | ||
|
|
3dfbd6b616 | ||
|
|
04f246cf8b | ||
|
|
7500f41655 | ||
|
|
a1cc152e81 | ||
|
|
1c77bc1ba3 | ||
|
|
ec31c54caf | ||
|
|
2f05864813 | ||
|
|
2fbc0a001e | ||
|
|
7d8a24ee1a | ||
|
|
7750347010 | ||
|
|
9bcffcd721 | ||
|
|
787604de6a | ||
|
|
5164fb1423 | ||
|
|
07335617d3 | ||
|
|
e5855922c1 | ||
|
|
671be3f2f7 | ||
|
|
fe8d252c51 | ||
|
|
0cdc7a3af5 | ||
|
|
1cfe155a3a | ||
|
|
2923cbc645 | ||
|
|
7c209cc9dc | ||
|
|
84fa4ce432 | ||
|
|
f2e9cd9668 | ||
|
|
77049d6cbb | ||
|
|
b4c23c158b | ||
|
|
964b4aa389 | ||
|
|
dc5aa35db7 |
@@ -1,7 +1,6 @@
|
||||
.dockerignore
|
||||
Dockerfile
|
||||
vendor/
|
||||
cve.sqlite3*
|
||||
oval.sqlite3*
|
||||
*.sqlite3*
|
||||
setup/
|
||||
img/
|
||||
img/
|
||||
8
.github/workflows/codeql-analysis.yml
vendored
8
.github/workflows/codeql-analysis.yml
vendored
@@ -35,11 +35,11 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v1
|
||||
uses: github/codeql-action/init@v2
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
@@ -50,7 +50,7 @@ jobs:
|
||||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
@@ -64,4 +64,4 @@ jobs:
|
||||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v1
|
||||
uses: github/codeql-action/analyze@v2
|
||||
|
||||
42
.github/workflows/docker-publish.yml
vendored
42
.github/workflows/docker-publish.yml
vendored
@@ -12,34 +12,58 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
uses: docker/setup-qemu-action@v2
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
uses: docker/setup-buildx-action@v2
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v3
|
||||
- name: vuls/vuls image meta
|
||||
id: oss-meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: vuls/vuls
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
|
||||
- name: vuls/fvuls image meta
|
||||
id: fvuls-meta
|
||||
uses: docker/metadata-action@v4
|
||||
with:
|
||||
images: vuls/fvuls
|
||||
tags: |
|
||||
type=ref,event=tag
|
||||
|
||||
- name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Build and push
|
||||
- name: OSS image build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
vuls/vuls:latest
|
||||
${{ steps.meta.outputs.tags }}
|
||||
${{ steps.oss-meta.outputs.tags }}
|
||||
secrets: |
|
||||
"github_token=${{ secrets.GITHUB_TOKEN }}"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
- name: FutureVuls image build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./contrib/Dockerfile
|
||||
push: true
|
||||
tags: |
|
||||
vuls/fvuls:latest
|
||||
${{ steps.fvuls-meta.outputs.tags }}
|
||||
secrets: |
|
||||
"github_token=${{ secrets.GITHUB_TOKEN }}"
|
||||
platforms: linux/amd64,linux/arm64
|
||||
|
||||
13
.github/workflows/golangci.yml
vendored
13
.github/workflows/golangci.yml
vendored
@@ -11,12 +11,15 @@ jobs:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
- uses: actions/setup-go@v3
|
||||
with:
|
||||
# Required: the version of golangci-lint is required and must be specified without patch version: we always use the latest patch version.
|
||||
version: v1.42
|
||||
go-version: 1.18
|
||||
- uses: actions/checkout@v3
|
||||
- name: golangci-lint
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: v1.50.1
|
||||
args: --timeout=10m
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
|
||||
9
.github/workflows/goreleaser.yml
vendored
9
.github/workflows/goreleaser.yml
vendored
@@ -11,15 +11,18 @@ jobs:
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: install package for cross compile
|
||||
run: sudo apt update && sudo apt install -y gcc-aarch64-linux-gnu
|
||||
-
|
||||
name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
-
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.16
|
||||
go-version: 1.18
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
|
||||
6
.github/workflows/test.yml
vendored
6
.github/workflows/test.yml
vendored
@@ -9,13 +9,13 @@ jobs:
|
||||
steps:
|
||||
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.16.x
|
||||
go-version: 1.18.x
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Test
|
||||
run: make test
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -18,3 +18,5 @@ dist/
|
||||
vuls.*
|
||||
vuls
|
||||
!cmd/vuls
|
||||
future-vuls
|
||||
trivy-to-vuls
|
||||
|
||||
@@ -1,5 +1,9 @@
|
||||
name: golang-ci
|
||||
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.18'
|
||||
|
||||
linters-settings:
|
||||
revive:
|
||||
# see https://github.com/mgechev/revive#available-rules for details.
|
||||
@@ -31,6 +35,9 @@ linters-settings:
|
||||
- name: unused-parameter
|
||||
- name: unreachable-code
|
||||
- name: redefines-builtin-id
|
||||
staticcheck:
|
||||
# https://staticcheck.io/docs/options#checks
|
||||
checks: ["all", "-SA1019"]
|
||||
# errcheck:
|
||||
#exclude: /path/to/file.txt
|
||||
|
||||
|
||||
@@ -6,11 +6,29 @@ release:
|
||||
owner: future-architect
|
||||
name: vuls
|
||||
builds:
|
||||
- id: vuls
|
||||
- id: vuls-amd64
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=x86_64-linux-gnu-gcc
|
||||
main: ./cmd/vuls/main.go
|
||||
flags:
|
||||
- -a
|
||||
ldflags:
|
||||
- -s -w -X github.com/future-architect/vuls/config.Version={{.Version}} -X github.com/future-architect/vuls/config.Revision={{.Commit}}-{{ .CommitDate }}
|
||||
binary: vuls
|
||||
|
||||
- id: vuls-arm64
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
main: ./cmd/vuls/main.go
|
||||
flags:
|
||||
- -a
|
||||
@@ -74,7 +92,8 @@ archives:
|
||||
- id: vuls
|
||||
name_template: '{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}'
|
||||
builds:
|
||||
- vuls
|
||||
- vuls-amd64
|
||||
- vuls-arm64
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
|
||||
@@ -10,7 +10,7 @@ ENV REPOSITORY github.com/future-architect/vuls
|
||||
COPY . $GOPATH/src/$REPOSITORY
|
||||
RUN cd $GOPATH/src/$REPOSITORY && make install
|
||||
|
||||
FROM alpine:3.14
|
||||
FROM alpine:3.16
|
||||
|
||||
ENV LOGDIR /var/log/vuls
|
||||
ENV WORKDIR /vuls
|
||||
|
||||
25
GNUmakefile
25
GNUmakefile
@@ -23,12 +23,9 @@ CGO_UNABLED := CGO_ENABLED=0 go
|
||||
GO_OFF := GO111MODULE=off go
|
||||
|
||||
|
||||
all: b
|
||||
all: build test
|
||||
|
||||
build: ./cmd/vuls/main.go pretest fmt
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o vuls ./cmd/vuls
|
||||
|
||||
b: ./cmd/vuls/main.go
|
||||
build: ./cmd/vuls/main.go
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o vuls ./cmd/vuls
|
||||
|
||||
install: ./cmd/vuls/main.go
|
||||
@@ -41,13 +38,14 @@ install-scanner: ./cmd/scanner/main.go
|
||||
$(CGO_UNABLED) install -tags=scanner -ldflags "$(LDFLAGS)" ./cmd/scanner
|
||||
|
||||
lint:
|
||||
$(GO_OFF) get -u github.com/mgechev/revive
|
||||
$(GO) install github.com/mgechev/revive@latest
|
||||
revive -config ./.revive.toml -formatter plain $(PKGS)
|
||||
|
||||
vet:
|
||||
echo $(PKGS) | xargs env $(GO) vet || exit;
|
||||
|
||||
golangci:
|
||||
$(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
golangci-lint run
|
||||
|
||||
fmt:
|
||||
@@ -59,9 +57,9 @@ mlint:
|
||||
fmtcheck:
|
||||
$(foreach file,$(SRCS),gofmt -s -d $(file);)
|
||||
|
||||
pretest: lint vet fmtcheck golangci
|
||||
pretest: lint vet fmtcheck
|
||||
|
||||
test:
|
||||
test: pretest
|
||||
$(GO) test -cover -v ./... || exit;
|
||||
|
||||
unused:
|
||||
@@ -76,13 +74,12 @@ clean:
|
||||
echo $(PKGS) | xargs go clean || exit;
|
||||
|
||||
# trivy-to-vuls
|
||||
build-trivy-to-vuls: pretest fmt
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o trivy-to-vuls contrib/trivy/cmd/*.go
|
||||
build-trivy-to-vuls: ./contrib/trivy/cmd/main.go
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o trivy-to-vuls ./contrib/trivy/cmd
|
||||
|
||||
# future-vuls
|
||||
build-future-vuls: pretest fmt
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o future-vuls contrib/future-vuls/cmd/*.go
|
||||
|
||||
build-future-vuls: ./contrib/future-vuls/cmd/main.go
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o future-vuls ./contrib/future-vuls/cmd
|
||||
|
||||
# integration-test
|
||||
BASE_DIR := '${PWD}/integration/results'
|
||||
@@ -91,7 +88,7 @@ NOW=$(shell date --iso-8601=seconds)
|
||||
NOW_JSON_DIR := '${BASE_DIR}/$(NOW)'
|
||||
ONE_SEC_AFTER=$(shell date -d '+1 second' --iso-8601=seconds)
|
||||
ONE_SEC_AFTER_JSON_DIR := '${BASE_DIR}/$(ONE_SEC_AFTER)'
|
||||
LIBS := 'bundler' 'pip' 'pipenv' 'poetry' 'composer' 'npm' 'yarn' 'cargo' 'gomod' 'gobinary' 'jar' 'pom' 'nuget-lock' 'nuget-config' 'nvd_exact' 'nvd_rough' 'nvd_vendor_product' 'nvd_match_no_jvn' 'jvn_vendor_product' 'jvn_vendor_product_nover'
|
||||
LIBS := 'bundler' 'pip' 'pipenv' 'poetry' 'composer' 'npm' 'yarn' 'pnpm' 'cargo' 'gomod' 'gosum' 'gobinary' 'jar' 'pom' 'gradle' 'nuget-lock' 'nuget-config' 'dotnet-deps' 'conan' 'nvd_exact' 'nvd_rough' 'nvd_vendor_product' 'nvd_match_no_jvn' 'jvn_vendor_product' 'jvn_vendor_product_nover'
|
||||
|
||||
diff:
|
||||
# git clone git@github.com:vulsio/vulsctl.git
|
||||
|
||||
22
README.md
22
README.md
@@ -9,8 +9,8 @@
|
||||
|
||||

|
||||
|
||||
Vulnerability scanner for Linux/FreeBSD, agent-less, written in Go.
|
||||
We have a slack team. [Join slack team](http://goo.gl/forms/xm5KFo35tu)
|
||||
Vulnerability scanner for Linux/FreeBSD, agent-less, written in Go.
|
||||
We have a slack team. [Join slack team](https://join.slack.com/t/vuls-github/shared_invite/zt-1fculjwj4-6nex2JNE7DpOSiKZ1ztDFw)
|
||||
Twitter: [@vuls_en](https://twitter.com/vuls_en)
|
||||
|
||||

|
||||
@@ -50,7 +50,7 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
|
||||
[Supports major Linux/FreeBSD](https://vuls.io/docs/en/supported-os.html)
|
||||
|
||||
- Alpine, Amazon Linux, CentOS, Alma Linux, Rocky Linux, Debian, Oracle Linux, Raspbian, RHEL, SUSE Enterprise Linux, and Ubuntu
|
||||
- Alpine, Amazon Linux, CentOS, AlmaLinux, Rocky Linux, Debian, Oracle Linux, Raspbian, RHEL, openSUSE, openSUSE Leap, SUSE Enterprise Linux, Fedora, and Ubuntu
|
||||
- FreeBSD
|
||||
- Cloud, on-premise, Running Docker Container
|
||||
|
||||
@@ -82,6 +82,7 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
- [Metasploit-Framework modules](https://www.rapid7.com/db/?q=&type=metasploit)
|
||||
- [qazbnm456/awesome-cve-poc](https://github.com/qazbnm456/awesome-cve-poc)
|
||||
- [nomi-sec/PoC-in-GitHub](https://github.com/nomi-sec/PoC-in-GitHub)
|
||||
- [gmatuz/inthewilddb](https://github.com/gmatuz/inthewilddb)
|
||||
|
||||
- CERT
|
||||
- [US-CERT](https://www.us-cert.gov/ncas/alerts)
|
||||
@@ -90,12 +91,11 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
- CISA(Cybersecurity & Infrastructure Security Agency)
|
||||
- [Known Exploited Vulnerabilities Catalog](https://www.cisa.gov/known-exploited-vulnerabilities-catalog)
|
||||
|
||||
- Cyber Threat Intelligence(MITRE ATT&CK and CAPEC)
|
||||
- [mitre/cti](https://github.com/mitre/cti)
|
||||
|
||||
- Libraries
|
||||
- [Node.js Security Working Group](https://github.com/nodejs/security-wg)
|
||||
- [Ruby Advisory Database](https://github.com/rubysec/ruby-advisory-db)
|
||||
- [Safety DB(Python)](https://github.com/pyupio/safety-db)
|
||||
- [PHP Security Advisories Database](https://github.com/FriendsOfPHP/security-advisories)
|
||||
- [RustSec Advisory Database](https://github.com/RustSec/advisory-db)
|
||||
- [aquasecurity/vuln-list](https://github.com/aquasecurity/vuln-list)
|
||||
|
||||
- WordPress
|
||||
- [wpscan](https://wpscan.com/api)
|
||||
@@ -106,15 +106,15 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
|
||||
- Scan without root privilege, no dependencies
|
||||
- Almost no load on the scan target server
|
||||
- Offline mode scan with no internet access. (CentOS, Alma Linux, Rocky Linux, Debian, Oracle Linux, Red Hat, and Ubuntu)
|
||||
- Offline mode scan with no internet access. (CentOS, Alma Linux, Rocky Linux, Debian, Oracle Linux, Red Hat, Fedora, and Ubuntu)
|
||||
|
||||
[Fast Root Scan](https://vuls.io/docs/en/architecture-fast-root-scan.html)
|
||||
|
||||
- Scan with root privilege
|
||||
- Almost no load on the scan target server
|
||||
- Detect processes affected by update using yum-ps (Amazon Linux, CentOS, Alma Linux, Rocky Linux, Oracle Linux, and RedHat)
|
||||
- Detect processes affected by update using yum-ps (Amazon Linux, CentOS, Alma Linux, Rocky Linux, Oracle Linux, Fedora, and RedHat)
|
||||
- Detect processes which updated before but not restarting yet using checkrestart of debian-goodies (Debian and Ubuntu)
|
||||
- Offline mode scan with no internet access. (CentOS, Alma Linux, Rocky Linux, Debian, Oracle Linux, Red Hat, and Ubuntu)
|
||||
- Offline mode scan with no internet access. (CentOS, Alma Linux, Rocky Linux, Debian, Oracle Linux, Red Hat, Fedora, and Ubuntu)
|
||||
|
||||
### [Remote, Local scan mode, Server mode](https://vuls.io/docs/en/architecture-remote-local.html)
|
||||
|
||||
|
||||
7
cache/bolt.go
vendored
7
cache/bolt.go
vendored
@@ -4,10 +4,11 @@ import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Bolt holds a pointer of bolt.DB
|
||||
@@ -47,7 +48,7 @@ func (b Bolt) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a bucket that is specified by arg.
|
||||
// CreateBucketIfNotExists creates a bucket that is specified by arg.
|
||||
func (b *Bolt) createBucketIfNotExists(name string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(name))
|
||||
|
||||
3
cache/bolt_test.go
vendored
3
cache/bolt_test.go
vendored
@@ -5,7 +5,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
|
||||
@@ -21,7 +21,7 @@ var Revision string
|
||||
// Conf has Configuration
|
||||
var Conf Config
|
||||
|
||||
//Config is struct of Configuration
|
||||
// Config is struct of Configuration
|
||||
type Config struct {
|
||||
logging.LogOpts
|
||||
|
||||
@@ -42,6 +42,7 @@ type Config struct {
|
||||
Exploit ExploitConf `json:"exploit,omitempty"`
|
||||
Metasploit MetasploitConf `json:"metasploit,omitempty"`
|
||||
KEVuln KEVulnConf `json:"kevuln,omitempty"`
|
||||
Cti CtiConf `json:"cti,omitempty"`
|
||||
|
||||
Slack SlackConf `json:"-"`
|
||||
EMail SMTPConf `json:"-"`
|
||||
@@ -178,6 +179,7 @@ func (c *Config) ValidateOnReport() bool {
|
||||
&Conf.Exploit,
|
||||
&Conf.Metasploit,
|
||||
&Conf.KEVuln,
|
||||
&Conf.Cti,
|
||||
} {
|
||||
if err := cnf.Validate(); err != nil {
|
||||
errs = append(errs, xerrors.Errorf("Failed to validate %s: %+v", cnf.GetName(), err))
|
||||
@@ -211,9 +213,11 @@ type WpScanConf struct {
|
||||
|
||||
// ServerInfo has SSH Info, additional CPE packages to scan.
|
||||
type ServerInfo struct {
|
||||
BaseName string `toml:"-" json:"-"`
|
||||
ServerName string `toml:"-" json:"serverName,omitempty"`
|
||||
User string `toml:"user,omitempty" json:"user,omitempty"`
|
||||
Host string `toml:"host,omitempty" json:"host,omitempty"`
|
||||
IgnoreIPAddresses []string `toml:"ignoreIPAddresses,omitempty" json:"ignoreIPAddresses,omitempty"`
|
||||
JumpServer []string `toml:"jumpServer,omitempty" json:"jumpServer,omitempty"`
|
||||
Port string `toml:"port,omitempty" json:"port,omitempty"`
|
||||
SSHConfigPath string `toml:"sshConfigPath,omitempty" json:"sshConfigPath,omitempty"`
|
||||
@@ -236,6 +240,7 @@ type ServerInfo struct {
|
||||
Optional map[string]interface{} `toml:"optional,omitempty" json:"optional,omitempty"` // Optional key-value set that will be outputted to JSON
|
||||
Lockfiles []string `toml:"lockfiles,omitempty" json:"lockfiles,omitempty"` // ie) path/to/package-lock.json
|
||||
FindLock bool `toml:"findLock,omitempty" json:"findLock,omitempty"`
|
||||
FindLockDirs []string `toml:"findLockDirs,omitempty" json:"findLockDirs,omitempty"`
|
||||
Type string `toml:"type,omitempty" json:"type,omitempty"` // "pseudo" or ""
|
||||
IgnoredJSONKeys []string `toml:"ignoredJSONKeys,omitempty" json:"ignoredJSONKeys,omitempty"`
|
||||
WordPress *WordPressConf `toml:"wordpress,omitempty" json:"wordpress,omitempty"`
|
||||
@@ -300,11 +305,24 @@ func (l Distro) String() string {
|
||||
|
||||
// MajorVersion returns Major version
|
||||
func (l Distro) MajorVersion() (int, error) {
|
||||
if l.Family == constant.Amazon {
|
||||
switch l.Family {
|
||||
case constant.Amazon:
|
||||
return strconv.Atoi(getAmazonLinuxVersion(l.Release))
|
||||
}
|
||||
if 0 < len(l.Release) {
|
||||
return strconv.Atoi(strings.Split(l.Release, ".")[0])
|
||||
case constant.CentOS:
|
||||
if 0 < len(l.Release) {
|
||||
return strconv.Atoi(strings.Split(strings.TrimPrefix(l.Release, "stream"), ".")[0])
|
||||
}
|
||||
case constant.OpenSUSE:
|
||||
if l.Release != "" {
|
||||
if l.Release == "tumbleweed" {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.Atoi(strings.Split(l.Release, ".")[0])
|
||||
}
|
||||
default:
|
||||
if 0 < len(l.Release) {
|
||||
return strconv.Atoi(strings.Split(l.Release, ".")[0])
|
||||
}
|
||||
}
|
||||
return 0, xerrors.New("Release is empty")
|
||||
}
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
package config
|
||||
|
||||
// Load loads configuration
|
||||
func Load(path, keyPass string) error {
|
||||
var loader Loader
|
||||
loader = TOMLLoader{}
|
||||
return loader.Load(path, keyPass)
|
||||
func Load(path string) error {
|
||||
loader := TOMLLoader{}
|
||||
return loader.Load(path)
|
||||
}
|
||||
|
||||
// Loader is interface of concrete loader
|
||||
|
||||
162
config/os.go
162
config/os.go
@@ -41,8 +41,8 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
case constant.Amazon:
|
||||
eol, found = map[string]EOL{
|
||||
"1": {StandardSupportUntil: time.Date(2023, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2": {},
|
||||
"2022": {},
|
||||
"2": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2022": {StandardSupportUntil: time.Date(2026, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[getAmazonLinuxVersion(release)]
|
||||
case constant.RedHat:
|
||||
// https://access.redhat.com/support/policy/updates/errata
|
||||
@@ -56,29 +56,38 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
},
|
||||
"7": {
|
||||
StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2026, 6, 30, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"8": {
|
||||
StandardSupportUntil: time.Date(2029, 5, 31, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2031, 5, 31, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"9": {
|
||||
StandardSupportUntil: time.Date(2032, 5, 31, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2034, 5, 31, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[major(release)]
|
||||
case constant.CentOS:
|
||||
// https://en.wikipedia.org/wiki/CentOS#End-of-support_schedule
|
||||
// TODO Stream
|
||||
eol, found = map[string]EOL{
|
||||
"3": {Ended: true},
|
||||
"4": {Ended: true},
|
||||
"5": {Ended: true},
|
||||
"6": {Ended: true},
|
||||
"7": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"8": {StandardSupportUntil: time.Date(2021, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"3": {Ended: true},
|
||||
"4": {Ended: true},
|
||||
"5": {Ended: true},
|
||||
"6": {Ended: true},
|
||||
"7": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"8": {StandardSupportUntil: time.Date(2021, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"stream8": {StandardSupportUntil: time.Date(2024, 5, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"stream9": {StandardSupportUntil: time.Date(2027, 5, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Alma:
|
||||
eol, found = map[string]EOL{
|
||||
"8": {StandardSupportUntil: time.Date(2029, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"9": {StandardSupportUntil: time.Date(2032, 5, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Rocky:
|
||||
eol, found = map[string]EOL{
|
||||
"8": {StandardSupportUntil: time.Date(2029, 5, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"9": {StandardSupportUntil: time.Date(2032, 5, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Oracle:
|
||||
eol, found = map[string]EOL{
|
||||
@@ -90,13 +99,19 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"5": {Ended: true},
|
||||
"6": {
|
||||
StandardSupportUntil: time.Date(2021, 3, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 3, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 6, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"7": {
|
||||
StandardSupportUntil: time.Date(2024, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2026, 6, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"8": {
|
||||
StandardSupportUntil: time.Date(2029, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2031, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"9": {
|
||||
StandardSupportUntil: time.Date(2032, 6, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2034, 6, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[major(release)]
|
||||
case constant.Debian:
|
||||
@@ -115,18 +130,35 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
case constant.Ubuntu:
|
||||
// https://wiki.ubuntu.com/Releases
|
||||
eol, found = map[string]EOL{
|
||||
"14.10": {Ended: true},
|
||||
"6.06": {Ended: true},
|
||||
"6.10": {Ended: true},
|
||||
"7.04": {Ended: true},
|
||||
"7.10": {Ended: true},
|
||||
"8.04": {Ended: true},
|
||||
"8.10": {Ended: true},
|
||||
"9.04": {Ended: true},
|
||||
"9.10": {Ended: true},
|
||||
"10.04": {Ended: true},
|
||||
"10.10": {Ended: true},
|
||||
"11.04": {Ended: true},
|
||||
"11.10": {Ended: true},
|
||||
"12.04": {Ended: true},
|
||||
"12.10": {Ended: true},
|
||||
"13.04": {Ended: true},
|
||||
"13.10": {Ended: true},
|
||||
"14.04": {
|
||||
ExtendedSupportUntil: time.Date(2022, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"14.10": {Ended: true},
|
||||
"15.04": {Ended: true},
|
||||
"16.10": {Ended: true},
|
||||
"17.04": {Ended: true},
|
||||
"17.10": {Ended: true},
|
||||
"15.10": {Ended: true},
|
||||
"16.04": {
|
||||
StandardSupportUntil: time.Date(2021, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"16.10": {Ended: true},
|
||||
"17.04": {Ended: true},
|
||||
"17.10": {Ended: true},
|
||||
"18.04": {
|
||||
StandardSupportUntil: time.Date(2023, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2028, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
@@ -136,22 +168,99 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"19.10": {Ended: true},
|
||||
"20.04": {
|
||||
StandardSupportUntil: time.Date(2025, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2030, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"20.10": {
|
||||
StandardSupportUntil: time.Date(2021, 7, 22, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"21.04": {
|
||||
StandardSupportUntil: time.Date(2022, 1, 22, 23, 59, 59, 0, time.UTC),
|
||||
StandardSupportUntil: time.Date(2022, 1, 20, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"21.10": {
|
||||
StandardSupportUntil: time.Date(2022, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
StandardSupportUntil: time.Date(2022, 7, 14, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"22.04": {
|
||||
StandardSupportUntil: time.Date(2027, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2032, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"22.10": {
|
||||
StandardSupportUntil: time.Date(2023, 7, 20, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
// "23.04": {
|
||||
// StandardSupportUntil: time.Date(2024, 1, 31, 23, 59, 59, 0, time.UTC),
|
||||
// },
|
||||
}[release]
|
||||
case constant.OpenSUSE:
|
||||
// https://en.opensuse.org/Lifetime
|
||||
eol, found = map[string]EOL{
|
||||
"10.2": {Ended: true},
|
||||
"10.3": {Ended: true},
|
||||
"11.0": {Ended: true},
|
||||
"11.1": {Ended: true},
|
||||
"11.2": {Ended: true},
|
||||
"11.3": {Ended: true},
|
||||
"11.4": {Ended: true},
|
||||
"12.1": {Ended: true},
|
||||
"12.2": {Ended: true},
|
||||
"12.3": {Ended: true},
|
||||
"13.1": {Ended: true},
|
||||
"13.2": {Ended: true},
|
||||
"tumbleweed": {},
|
||||
}[release]
|
||||
case constant.OpenSUSELeap:
|
||||
// https://en.opensuse.org/Lifetime
|
||||
eol, found = map[string]EOL{
|
||||
"42.1": {Ended: true},
|
||||
"42.2": {Ended: true},
|
||||
"42.3": {Ended: true},
|
||||
"15.0": {Ended: true},
|
||||
"15.1": {Ended: true},
|
||||
"15.2": {Ended: true},
|
||||
"15.3": {StandardSupportUntil: time.Date(2022, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"15.4": {StandardSupportUntil: time.Date(2023, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[release]
|
||||
case constant.SUSEEnterpriseServer:
|
||||
//TODO
|
||||
// https://www.suse.com/lifecycle
|
||||
eol, found = map[string]EOL{
|
||||
"11": {Ended: true},
|
||||
"11.1": {Ended: true},
|
||||
"11.2": {Ended: true},
|
||||
"11.3": {Ended: true},
|
||||
"11.4": {Ended: true},
|
||||
"12": {Ended: true},
|
||||
"12.1": {Ended: true},
|
||||
"12.2": {Ended: true},
|
||||
"12.3": {Ended: true},
|
||||
"12.4": {Ended: true},
|
||||
"12.5": {StandardSupportUntil: time.Date(2024, 10, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"15": {Ended: true},
|
||||
"15.1": {Ended: true},
|
||||
"15.2": {Ended: true},
|
||||
"15.3": {StandardSupportUntil: time.Date(2022, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"15.4": {StandardSupportUntil: time.Date(2023, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[release]
|
||||
case constant.SUSEEnterpriseDesktop:
|
||||
// https://www.suse.com/lifecycle
|
||||
eol, found = map[string]EOL{
|
||||
"11": {Ended: true},
|
||||
"11.1": {Ended: true},
|
||||
"11.2": {Ended: true},
|
||||
"11.3": {Ended: true},
|
||||
"11.4": {Ended: true},
|
||||
"12": {Ended: true},
|
||||
"12.1": {Ended: true},
|
||||
"12.2": {Ended: true},
|
||||
"12.3": {Ended: true},
|
||||
"12.4": {Ended: true},
|
||||
"15": {Ended: true},
|
||||
"15.1": {Ended: true},
|
||||
"15.2": {Ended: true},
|
||||
"15.3": {StandardSupportUntil: time.Date(2022, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"15.4": {StandardSupportUntil: time.Date(2023, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[release]
|
||||
case constant.Alpine:
|
||||
// https://github.com/aquasecurity/trivy/blob/master/pkg/detector/ospkg/alpine/alpine.go#L19
|
||||
// https://wiki.alpinelinux.org/wiki/Alpine_Linux:Releases
|
||||
// https://alpinelinux.org/releases/
|
||||
eol, found = map[string]EOL{
|
||||
"2.0": {Ended: true},
|
||||
"2.1": {Ended: true},
|
||||
@@ -175,6 +284,10 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"3.11": {StandardSupportUntil: time.Date(2021, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.12": {StandardSupportUntil: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.13": {StandardSupportUntil: time.Date(2022, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.14": {StandardSupportUntil: time.Date(2023, 5, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.15": {StandardSupportUntil: time.Date(2023, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.16": {StandardSupportUntil: time.Date(2024, 5, 23, 23, 59, 59, 0, time.UTC)},
|
||||
"3.17": {StandardSupportUntil: time.Date(2024, 11, 22, 23, 59, 59, 0, time.UTC)},
|
||||
}[majorDotMinor(release)]
|
||||
case constant.FreeBSD:
|
||||
// https://www.freebsd.org/security/
|
||||
@@ -184,9 +297,20 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"9": {Ended: true},
|
||||
"10": {Ended: true},
|
||||
"11": {StandardSupportUntil: time.Date(2021, 9, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2023, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"13": {StandardSupportUntil: time.Date(2026, 1, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Fedora:
|
||||
// https://docs.fedoraproject.org/en-US/releases/eol/
|
||||
// https://endoflife.date/fedora
|
||||
eol, found = map[string]EOL{
|
||||
"32": {StandardSupportUntil: time.Date(2021, 5, 24, 23, 59, 59, 0, time.UTC)},
|
||||
"33": {StandardSupportUntil: time.Date(2021, 11, 29, 23, 59, 59, 0, time.UTC)},
|
||||
"34": {StandardSupportUntil: time.Date(2022, 6, 6, 23, 59, 59, 0, time.UTC)},
|
||||
"35": {StandardSupportUntil: time.Date(2022, 12, 12, 23, 59, 59, 0, time.UTC)},
|
||||
"36": {StandardSupportUntil: time.Date(2023, 5, 16, 23, 59, 59, 0, time.UTC)},
|
||||
"37": {StandardSupportUntil: time.Date(2023, 12, 15, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -53,7 +53,23 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "amazon linux 2024 not found",
|
||||
fields: fields{family: Amazon, release: "2024 (Amazon Linux)"},
|
||||
now: time.Date(2023, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//RHEL
|
||||
{
|
||||
name: "RHEL6 eol",
|
||||
fields: fields{family: RedHat, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL7 supported",
|
||||
fields: fields{family: RedHat, release: "7"},
|
||||
@@ -71,22 +87,30 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL6 eol",
|
||||
fields: fields{family: RedHat, release: "6"},
|
||||
name: "RHEL9 supported",
|
||||
fields: fields{family: RedHat, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "RHEL9 not found",
|
||||
fields: fields{family: RedHat, release: "9"},
|
||||
name: "RHEL10 not found",
|
||||
fields: fields{family: RedHat, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//CentOS
|
||||
{
|
||||
name: "CentOS 6 eol",
|
||||
fields: fields{family: CentOS, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 7 supported",
|
||||
fields: fields{family: CentOS, release: "7"},
|
||||
@@ -104,16 +128,24 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 6 eol",
|
||||
fields: fields{family: CentOS, release: "6"},
|
||||
name: "CentOS stream8 supported",
|
||||
fields: fields{family: CentOS, release: "stream8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS 9 not found",
|
||||
fields: fields{family: CentOS, release: "9"},
|
||||
name: "CentOS stream9 supported",
|
||||
fields: fields{family: CentOS, release: "stream9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "CentOS stream10 Not Found",
|
||||
fields: fields{family: CentOS, release: "stream10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
@@ -129,16 +161,16 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alma Linux 8 EOL",
|
||||
fields: fields{family: Alma, release: "8"},
|
||||
now: time.Date(2029, 2, 1, 0, 0, 0, 0, time.UTC),
|
||||
name: "Alma Linux 9 supported",
|
||||
fields: fields{family: Alma, release: "9"},
|
||||
now: time.Date(2021, 7, 2, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alma Linux 9 Not Found",
|
||||
fields: fields{family: Alma, release: "9"},
|
||||
name: "Alma Linux 10 Not Found",
|
||||
fields: fields{family: Alma, release: "10"},
|
||||
now: time.Date(2021, 7, 2, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
@@ -154,22 +186,30 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Rocky Linux 8 EOL",
|
||||
fields: fields{family: Rocky, release: "8"},
|
||||
now: time.Date(2026, 2, 1, 0, 0, 0, 0, time.UTC),
|
||||
name: "Rocky Linux 9 supported",
|
||||
fields: fields{family: Rocky, release: "9"},
|
||||
now: time.Date(2021, 7, 2, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Rocky Linux 9 Not Found",
|
||||
fields: fields{family: Rocky, release: "9"},
|
||||
name: "Rocky Linux 10 Not Found",
|
||||
fields: fields{family: Rocky, release: "10"},
|
||||
now: time.Date(2021, 7, 2, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
//Oracle
|
||||
{
|
||||
name: "Oracle Linux 6 eol",
|
||||
fields: fields{family: Oracle, release: "6"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 7 supported",
|
||||
fields: fields{family: Oracle, release: "7"},
|
||||
@@ -187,16 +227,16 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 6 eol",
|
||||
fields: fields{family: Oracle, release: "6"},
|
||||
name: "Oracle Linux 9 supported",
|
||||
fields: fields{family: Oracle, release: "9"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Oracle Linux 9 not found",
|
||||
fields: fields{family: Oracle, release: "9"},
|
||||
name: "Oracle Linux 10 not found",
|
||||
fields: fields{family: Oracle, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
@@ -204,28 +244,12 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
},
|
||||
//Ubuntu
|
||||
{
|
||||
name: "Ubuntu 18.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
name: "Ubuntu 5.10 not found",
|
||||
fields: fields{family: Ubuntu, release: "5.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: false,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 18.04 ext supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2025, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 16.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 14.04 eol",
|
||||
@@ -244,12 +268,44 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 12.10 not found",
|
||||
fields: fields{family: Ubuntu, release: "12.10"},
|
||||
name: "Ubuntu 16.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: false,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 18.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 18.04 ext supported",
|
||||
fields: fields{family: Ubuntu, release: "18.04"},
|
||||
now: time.Date(2025, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 20.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "20.04"},
|
||||
now: time.Date(2021, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 20.04 ext supported",
|
||||
fields: fields{family: Ubuntu, release: "20.04"},
|
||||
now: time.Date(2025, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: true,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 20.10 supported",
|
||||
@@ -267,6 +323,30 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 21.10 supported",
|
||||
fields: fields{family: Ubuntu, release: "21.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 22.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "22.04"},
|
||||
now: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 22.10 supported",
|
||||
fields: fields{family: Ubuntu, release: "22.10"},
|
||||
now: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
//Debian
|
||||
{
|
||||
name: "Debian 9 supported",
|
||||
@@ -342,14 +422,54 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.14 not found",
|
||||
name: "Alpine 3.14 supported",
|
||||
fields: fields{family: Alpine, release: "3.14"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.15 supported",
|
||||
fields: fields{family: Alpine, release: "3.15"},
|
||||
now: time.Date(2022, 11, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.16 supported",
|
||||
fields: fields{family: Alpine, release: "3.16"},
|
||||
now: time.Date(2024, 5, 23, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.17 supported",
|
||||
fields: fields{family: Alpine, release: "3.17"},
|
||||
now: time.Date(2022, 1, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.18 not found",
|
||||
fields: fields{family: Alpine, release: "3.18"},
|
||||
now: time.Date(2022, 1, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
// freebsd
|
||||
{
|
||||
name: "freebsd 10 eol",
|
||||
fields: fields{family: FreeBSD, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 11 supported",
|
||||
fields: fields{family: FreeBSD, release: "11"},
|
||||
@@ -382,14 +502,111 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
// Fedora
|
||||
{
|
||||
name: "freebsd 10 eol",
|
||||
fields: fields{family: FreeBSD, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
name: "Fedora 32 supported",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 5, 24, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 32 eol since 2021-5-25",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 5, 25, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 33 supported",
|
||||
fields: fields{family: Fedora, release: "33"},
|
||||
now: time.Date(2021, 11, 29, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 33 eol since 2021-11-30",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 11, 30, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 34 supported",
|
||||
fields: fields{family: Fedora, release: "34"},
|
||||
now: time.Date(2022, 6, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 34 eol since 2022-6-7",
|
||||
fields: fields{family: Fedora, release: "34"},
|
||||
now: time.Date(2022, 6, 7, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 35 supported",
|
||||
fields: fields{family: Fedora, release: "35"},
|
||||
now: time.Date(2022, 12, 12, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 35 eol since 2022-12-13",
|
||||
fields: fields{family: Fedora, release: "35"},
|
||||
now: time.Date(2022, 12, 13, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 36 supported",
|
||||
fields: fields{family: Fedora, release: "36"},
|
||||
now: time.Date(2023, 5, 16, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 36 eol since 2023-05-17",
|
||||
fields: fields{family: Fedora, release: "36"},
|
||||
now: time.Date(2023, 5, 17, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 37 supported",
|
||||
fields: fields{family: Fedora, release: "37"},
|
||||
now: time.Date(2023, 12, 15, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 37 eol since 2023-12-16",
|
||||
fields: fields{family: Fedora, release: "37"},
|
||||
now: time.Date(2023, 12, 16, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 38 not found",
|
||||
fields: fields{family: Fedora, release: "38"},
|
||||
now: time.Date(2023, 12, 15, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
|
||||
@@ -1,13 +1,17 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/BurntSushi/toml"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/c-robinson/iplib"
|
||||
"github.com/knqyf263/go-cpe/naming"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
// TOMLLoader loads config
|
||||
@@ -15,7 +19,7 @@ type TOMLLoader struct {
|
||||
}
|
||||
|
||||
// Load load the configuration TOML file specified by path arg.
|
||||
func (c TOMLLoader) Load(pathToToml, _ string) error {
|
||||
func (c TOMLLoader) Load(pathToToml string) error {
|
||||
// util.Log.Infof("Loading config: %s", pathToToml)
|
||||
if _, err := toml.DecodeFile(pathToToml, &Conf); err != nil {
|
||||
return err
|
||||
@@ -28,13 +32,27 @@ func (c TOMLLoader) Load(pathToToml, _ string) error {
|
||||
&Conf.Exploit,
|
||||
&Conf.Metasploit,
|
||||
&Conf.KEVuln,
|
||||
&Conf.Cti,
|
||||
} {
|
||||
cnf.Init()
|
||||
}
|
||||
|
||||
index := 0
|
||||
servers := map[string]ServerInfo{}
|
||||
for name, server := range Conf.Servers {
|
||||
server.ServerName = name
|
||||
server.BaseName = name
|
||||
|
||||
if server.Type != constant.ServerTypePseudo && server.Host == "" {
|
||||
return xerrors.New("Failed to find hosts. err: server.host is empty")
|
||||
}
|
||||
serverHosts, err := hosts(server.Host, server.IgnoreIPAddresses)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to find hosts. err: %w", err)
|
||||
}
|
||||
if len(serverHosts) == 0 {
|
||||
return xerrors.New("Failed to find hosts. err: zero enumerated hosts")
|
||||
}
|
||||
|
||||
if err := setDefaultIfEmpty(&server); err != nil {
|
||||
return xerrors.Errorf("Failed to set default value to config. server: %s, err: %w", name, err)
|
||||
}
|
||||
@@ -93,20 +111,17 @@ func (c TOMLLoader) Load(pathToToml, _ string) error {
|
||||
for _, reg := range cont.IgnorePkgsRegexp {
|
||||
_, err := regexp.Compile(reg)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse %s in %s@%s. err: %w",
|
||||
reg, contName, name, err)
|
||||
return xerrors.Errorf("Failed to parse %s in %s@%s. err: %w", reg, contName, name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ownerRepo, githubSetting := range server.GitHubRepos {
|
||||
if ss := strings.Split(ownerRepo, "/"); len(ss) != 2 {
|
||||
return xerrors.Errorf("Failed to parse GitHub owner/repo: %s in %s",
|
||||
ownerRepo, name)
|
||||
return xerrors.Errorf("Failed to parse GitHub owner/repo: %s in %s", ownerRepo, name)
|
||||
}
|
||||
if githubSetting.Token == "" {
|
||||
return xerrors.Errorf("GitHub owner/repo: %s in %s token is empty",
|
||||
ownerRepo, name)
|
||||
return xerrors.Errorf("GitHub owner/repo: %s in %s token is empty", ownerRepo, name)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,9 +134,7 @@ func (c TOMLLoader) Load(pathToToml, _ string) error {
|
||||
case "base", "updates":
|
||||
// nop
|
||||
default:
|
||||
return xerrors.Errorf(
|
||||
"For now, enablerepo have to be base or updates: %s",
|
||||
server.Enablerepo)
|
||||
return xerrors.Errorf("For now, enablerepo have to be base or updates: %s", server.Enablerepo)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -130,37 +143,103 @@ func (c TOMLLoader) Load(pathToToml, _ string) error {
|
||||
server.PortScan.IsUseExternalScanner = true
|
||||
}
|
||||
|
||||
server.LogMsgAnsiColor = Colors[index%len(Colors)]
|
||||
index++
|
||||
|
||||
Conf.Servers[name] = server
|
||||
if !isCIDRNotation(server.Host) {
|
||||
server.ServerName = name
|
||||
servers[server.ServerName] = server
|
||||
continue
|
||||
}
|
||||
for _, host := range serverHosts {
|
||||
server.Host = host
|
||||
server.ServerName = fmt.Sprintf("%s(%s)", name, host)
|
||||
server.LogMsgAnsiColor = Colors[index%len(Colors)]
|
||||
index++
|
||||
servers[server.ServerName] = server
|
||||
}
|
||||
}
|
||||
Conf.Servers = servers
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func hosts(host string, ignores []string) ([]string, error) {
|
||||
hostMap := map[string]struct{}{}
|
||||
hosts, err := enumerateHosts(host)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to enumarate hosts. err: %w", err)
|
||||
}
|
||||
for _, host := range hosts {
|
||||
hostMap[host] = struct{}{}
|
||||
}
|
||||
|
||||
for _, ignore := range ignores {
|
||||
hosts, err := enumerateHosts(ignore)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to enumarate hosts. err: %w", err)
|
||||
}
|
||||
if len(hosts) == 1 && net.ParseIP(hosts[0]) == nil {
|
||||
return nil, xerrors.Errorf("Failed to ignore hosts. err: a non-IP address has been entered in ignoreIPAddress")
|
||||
}
|
||||
for _, host := range hosts {
|
||||
delete(hostMap, host)
|
||||
}
|
||||
}
|
||||
|
||||
hosts = []string{}
|
||||
for host := range hostMap {
|
||||
hosts = append(hosts, host)
|
||||
}
|
||||
return hosts, nil
|
||||
}
|
||||
|
||||
func enumerateHosts(host string) ([]string, error) {
|
||||
if !isCIDRNotation(host) {
|
||||
return []string{host}, nil
|
||||
}
|
||||
|
||||
ipAddr, ipNet, err := net.ParseCIDR(host)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to parse CIDR. err: %w", err)
|
||||
}
|
||||
maskLen, _ := ipNet.Mask.Size()
|
||||
|
||||
addrs := []string{}
|
||||
if net.ParseIP(ipAddr.String()).To4() != nil {
|
||||
n := iplib.NewNet4(ipAddr, int(maskLen))
|
||||
for _, addr := range n.Enumerate(int(n.Count()), 0) {
|
||||
addrs = append(addrs, addr.String())
|
||||
}
|
||||
} else if net.ParseIP(ipAddr.String()).To16() != nil {
|
||||
n := iplib.NewNet6(ipAddr, int(maskLen), 0)
|
||||
if !n.Count().IsInt64() {
|
||||
return nil, xerrors.Errorf("Failed to enumerate IP address. err: mask bitsize too big")
|
||||
}
|
||||
for _, addr := range n.Enumerate(int(n.Count().Int64()), 0) {
|
||||
addrs = append(addrs, addr.String())
|
||||
}
|
||||
}
|
||||
return addrs, nil
|
||||
}
|
||||
|
||||
func isCIDRNotation(host string) bool {
|
||||
ss := strings.Split(host, "/")
|
||||
if len(ss) == 1 || net.ParseIP(ss[0]) == nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func setDefaultIfEmpty(server *ServerInfo) error {
|
||||
if server.Type != constant.ServerTypePseudo {
|
||||
if len(server.Host) == 0 {
|
||||
return xerrors.Errorf("server.host is empty")
|
||||
}
|
||||
|
||||
if len(server.JumpServer) == 0 {
|
||||
server.JumpServer = Conf.Default.JumpServer
|
||||
}
|
||||
|
||||
if server.Port == "" {
|
||||
if Conf.Default.Port != "" {
|
||||
server.Port = Conf.Default.Port
|
||||
} else {
|
||||
server.Port = "22"
|
||||
}
|
||||
server.Port = Conf.Default.Port
|
||||
}
|
||||
|
||||
if server.User == "" {
|
||||
server.User = Conf.Default.User
|
||||
if server.User == "" && server.Port != "local" {
|
||||
return xerrors.Errorf("server.user is empty")
|
||||
}
|
||||
}
|
||||
|
||||
if server.SSHConfigPath == "" {
|
||||
|
||||
@@ -1,9 +1,102 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestHosts(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in string
|
||||
ignore []string
|
||||
expected []string
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
in: "127.0.0.1",
|
||||
expected: []string{"127.0.0.1"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "127.0.0.1",
|
||||
ignore: []string{"127.0.0.1"},
|
||||
expected: []string{},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "ssh/host",
|
||||
expected: []string{"ssh/host"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/30",
|
||||
expected: []string{"192.168.1.1", "192.168.1.2"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/30",
|
||||
ignore: []string{"192.168.1.1"},
|
||||
expected: []string{"192.168.1.2"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/30",
|
||||
ignore: []string{"ignore"},
|
||||
err: true,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/30",
|
||||
ignore: []string{"192.168.1.1/30"},
|
||||
expected: []string{},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/31",
|
||||
expected: []string{"192.168.1.0", "192.168.1.1"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "192.168.1.1/32",
|
||||
expected: []string{"192.168.1.1"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "2001:4860:4860::8888/126",
|
||||
expected: []string{"2001:4860:4860::8888", "2001:4860:4860::8889", "2001:4860:4860::888a", "2001:4860:4860::888b"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "2001:4860:4860::8888/127",
|
||||
expected: []string{"2001:4860:4860::8888", "2001:4860:4860::8889"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "2001:4860:4860::8888/128",
|
||||
expected: []string{"2001:4860:4860::8888"},
|
||||
err: false,
|
||||
},
|
||||
{
|
||||
in: "2001:4860:4860::8888/32",
|
||||
err: true,
|
||||
},
|
||||
}
|
||||
for i, tt := range tests {
|
||||
actual, err := hosts(tt.in, tt.ignore)
|
||||
sort.Slice(actual, func(i, j int) bool { return actual[i] < actual[j] })
|
||||
if err != nil && !tt.err {
|
||||
t.Errorf("[%d] unexpected error occurred, in: %s act: %s, exp: %s",
|
||||
i, tt.in, actual, tt.expected)
|
||||
} else if err == nil && tt.err {
|
||||
t.Errorf("[%d] expected error is not occurred, in: %s act: %s, exp: %s",
|
||||
i, tt.in, actual, tt.expected)
|
||||
}
|
||||
if !reflect.DeepEqual(actual, tt.expected) {
|
||||
t.Errorf("[%d] in: %s, actual: %q, expected: %q", i, tt.in, actual, tt.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestToCpeURI(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in string
|
||||
|
||||
@@ -301,3 +301,30 @@ func (cnf *KEVulnConf) Init() {
|
||||
cnf.setDefault("go-kev.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
// CtiConf is go-cti config
|
||||
type CtiConf struct {
|
||||
VulnDict
|
||||
}
|
||||
|
||||
const ctiDBType = "CTI_TYPE"
|
||||
const ctiDBURL = "CTI_URL"
|
||||
const ctiDBPATH = "CTI_SQLITE3_PATH"
|
||||
|
||||
// Init set options with the following priority.
|
||||
// 1. Environment variable
|
||||
// 2. config.toml
|
||||
func (cnf *CtiConf) Init() {
|
||||
cnf.Name = "cti"
|
||||
if os.Getenv(ctiDBType) != "" {
|
||||
cnf.Type = os.Getenv(ctiDBType)
|
||||
}
|
||||
if os.Getenv(ctiDBURL) != "" {
|
||||
cnf.URL = os.Getenv(ctiDBURL)
|
||||
}
|
||||
if os.Getenv(ctiDBPATH) != "" {
|
||||
cnf.SQLite3Path = os.Getenv(ctiDBPATH)
|
||||
}
|
||||
cnf.setDefault("go-cti.sqlite3")
|
||||
cnf.DebugSQL = Conf.DebugSQL
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ const (
|
||||
Rocky = "rocky"
|
||||
|
||||
// Fedora is
|
||||
// Fedora = "fedora"
|
||||
Fedora = "fedora"
|
||||
|
||||
// Amazon is
|
||||
Amazon = "amazon"
|
||||
@@ -53,9 +53,6 @@ const (
|
||||
// SUSEEnterpriseDesktop is
|
||||
SUSEEnterpriseDesktop = "suse.linux.enterprise.desktop"
|
||||
|
||||
// SUSEOpenstackCloud is
|
||||
SUSEOpenstackCloud = "suse.openstack.cloud"
|
||||
|
||||
// Alpine is
|
||||
Alpine = "alpine"
|
||||
|
||||
|
||||
33
contrib/Dockerfile
Normal file
33
contrib/Dockerfile
Normal file
@@ -0,0 +1,33 @@
|
||||
FROM golang:alpine as builder
|
||||
|
||||
RUN apk add --no-cache \
|
||||
git \
|
||||
make \
|
||||
gcc \
|
||||
musl-dev
|
||||
|
||||
ENV REPOSITORY github.com/future-architect/vuls
|
||||
COPY . $GOPATH/src/$REPOSITORY
|
||||
RUN cd $GOPATH/src/$REPOSITORY && \
|
||||
make build-scanner && mv vuls $GOPATH/bin && \
|
||||
make build-trivy-to-vuls && mv trivy-to-vuls $GOPATH/bin && \
|
||||
make build-future-vuls && mv future-vuls $GOPATH/bin
|
||||
|
||||
FROM alpine:3.15
|
||||
|
||||
ENV LOGDIR /var/log/vuls
|
||||
ENV WORKDIR /vuls
|
||||
|
||||
RUN apk add --no-cache \
|
||||
openssh-client \
|
||||
ca-certificates \
|
||||
git \
|
||||
nmap \
|
||||
&& mkdir -p $WORKDIR $LOGDIR
|
||||
|
||||
COPY --from=builder /go/bin/vuls /go/bin/trivy-to-vuls /go/bin/future-vuls /usr/local/bin/
|
||||
COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy
|
||||
|
||||
VOLUME ["$WORKDIR", "$LOGDIR"]
|
||||
WORKDIR $WORKDIR
|
||||
ENV PWD $WORKDIR
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
@@ -21,6 +22,7 @@ var (
|
||||
serverUUID string
|
||||
groupID int64
|
||||
token string
|
||||
tags []string
|
||||
url string
|
||||
)
|
||||
|
||||
@@ -47,6 +49,9 @@ func main() {
|
||||
if len(token) == 0 {
|
||||
token = os.Getenv("VULS_TOKEN")
|
||||
}
|
||||
if len(tags) == 0 {
|
||||
tags = strings.Split(os.Getenv("VULS_TAGS"), ",")
|
||||
}
|
||||
|
||||
var scanResultJSON []byte
|
||||
if stdIn {
|
||||
@@ -69,6 +74,12 @@ func main() {
|
||||
return
|
||||
}
|
||||
scanResult.ServerUUID = serverUUID
|
||||
if 0 < len(tags) {
|
||||
if scanResult.Optional == nil {
|
||||
scanResult.Optional = map[string]interface{}{}
|
||||
}
|
||||
scanResult.Optional["VULS_TAGS"] = tags
|
||||
}
|
||||
|
||||
config.Conf.Saas.GroupID = groupID
|
||||
config.Conf.Saas.Token = token
|
||||
|
||||
@@ -2,7 +2,7 @@ package parser
|
||||
|
||||
import (
|
||||
"encoding/xml"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
@@ -41,7 +41,7 @@ func Parse(path string) ([]string, error) {
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
b, err := ioutil.ReadAll(file)
|
||||
b, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
log.Warnf("Failed to read OWASP Dependency Check XML: %s", path)
|
||||
return []string{}, nil
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -39,7 +38,7 @@ func main() {
|
||||
}
|
||||
trivyJSON = buf.Bytes()
|
||||
} else {
|
||||
if trivyJSON, err = ioutil.ReadFile(jsonFilePath); err != nil {
|
||||
if trivyJSON, err = os.ReadFile(jsonFilePath); err != nil {
|
||||
fmt.Printf("Failed to read file. err: %+v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
@@ -2,9 +2,12 @@ package v2
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/report"
|
||||
"github.com/aquasecurity/trivy/pkg/types"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/contrib/trivy/pkg"
|
||||
@@ -17,7 +20,7 @@ type ParserV2 struct {
|
||||
|
||||
// Parse trivy's JSON and convert to the Vuls struct
|
||||
func (p ParserV2) Parse(vulnJSON []byte) (result *models.ScanResult, err error) {
|
||||
var report report.Report
|
||||
var report types.Report
|
||||
if err = json.Unmarshal(vulnJSON, &report); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -27,34 +30,50 @@ func (p ParserV2) Parse(vulnJSON []byte) (result *models.ScanResult, err error)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
setScanResultMeta(scanResult, &report)
|
||||
if err := setScanResultMeta(scanResult, &report); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return scanResult, nil
|
||||
}
|
||||
|
||||
func setScanResultMeta(scanResult *models.ScanResult, report *report.Report) {
|
||||
for _, r := range report.Results {
|
||||
const trivyTarget = "trivy-target"
|
||||
if pkg.IsTrivySupportedOS(r.Type) {
|
||||
scanResult.Family = r.Type
|
||||
scanResult.ServerName = r.Target
|
||||
scanResult.Optional = map[string]interface{}{
|
||||
trivyTarget: r.Target,
|
||||
}
|
||||
} else if pkg.IsTrivySupportedLib(r.Type) {
|
||||
if scanResult.Family == "" {
|
||||
scanResult.Family = constant.ServerTypePseudo
|
||||
}
|
||||
if scanResult.ServerName == "" {
|
||||
scanResult.ServerName = "library scan by trivy"
|
||||
}
|
||||
if _, ok := scanResult.Optional[trivyTarget]; !ok {
|
||||
scanResult.Optional = map[string]interface{}{
|
||||
trivyTarget: r.Target,
|
||||
}
|
||||
}
|
||||
}
|
||||
scanResult.ScannedAt = time.Now()
|
||||
scanResult.ScannedBy = "trivy"
|
||||
scanResult.ScannedVia = "trivy"
|
||||
var dockerTagPattern = regexp.MustCompile(`^(.*):(.*)$`)
|
||||
|
||||
func setScanResultMeta(scanResult *models.ScanResult, report *types.Report) error {
|
||||
if len(report.Results) == 0 {
|
||||
return xerrors.Errorf("scanned images or libraries are not supported by Trivy. see https://aquasecurity.github.io/trivy/dev/vulnerability/detection/os/, https://aquasecurity.github.io/trivy/dev/vulnerability/detection/language/")
|
||||
}
|
||||
|
||||
scanResult.ServerName = report.ArtifactName
|
||||
if report.ArtifactType == "container_image" {
|
||||
matches := dockerTagPattern.FindStringSubmatch(report.ArtifactName)
|
||||
var imageName, imageTag string
|
||||
if 2 < len(matches) {
|
||||
// including the image tag
|
||||
imageName = matches[1]
|
||||
imageTag = matches[2]
|
||||
} else {
|
||||
// no image tag
|
||||
imageName = report.ArtifactName
|
||||
imageTag = "latest" // Complement if the tag is omitted
|
||||
}
|
||||
scanResult.ServerName = fmt.Sprintf("%s:%s", imageName, imageTag)
|
||||
if scanResult.Optional == nil {
|
||||
scanResult.Optional = map[string]interface{}{}
|
||||
}
|
||||
scanResult.Optional["TRIVY_IMAGE_NAME"] = imageName
|
||||
scanResult.Optional["TRIVY_IMAGE_TAG"] = imageTag
|
||||
}
|
||||
|
||||
if report.Metadata.OS != nil {
|
||||
scanResult.Family = report.Metadata.OS.Family
|
||||
scanResult.Release = report.Metadata.OS.Name
|
||||
} else {
|
||||
scanResult.Family = constant.ServerTypePseudo
|
||||
}
|
||||
|
||||
scanResult.ScannedAt = time.Now()
|
||||
scanResult.ScannedBy = "trivy"
|
||||
scanResult.ScannedVia = "trivy"
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/d4l3k/messagediff"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
@@ -202,8 +203,9 @@ var redisTrivy = []byte(`
|
||||
`)
|
||||
var redisSR = &models.ScanResult{
|
||||
JSONVersion: 4,
|
||||
ServerName: "redis (debian 10.10)",
|
||||
ServerName: "redis:latest",
|
||||
Family: "debian",
|
||||
Release: "10.10",
|
||||
ScannedBy: "trivy",
|
||||
ScannedVia: "trivy",
|
||||
ScannedCves: models.VulnInfos{
|
||||
@@ -262,7 +264,8 @@ var redisSR = &models.ScanResult{
|
||||
},
|
||||
},
|
||||
Optional: map[string]interface{}{
|
||||
"trivy-target": "redis (debian 10.10)",
|
||||
"TRIVY_IMAGE_NAME": "redis",
|
||||
"TRIVY_IMAGE_TAG": "latest",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -372,7 +375,7 @@ var strutsTrivy = []byte(`
|
||||
|
||||
var strutsSR = &models.ScanResult{
|
||||
JSONVersion: 4,
|
||||
ServerName: "library scan by trivy",
|
||||
ServerName: "/data/struts-1.2.7/lib",
|
||||
Family: "pseudo",
|
||||
ScannedBy: "trivy",
|
||||
ScannedVia: "trivy",
|
||||
@@ -458,9 +461,7 @@ var strutsSR = &models.ScanResult{
|
||||
},
|
||||
Packages: models.Packages{},
|
||||
SrcPackages: models.SrcPackages{},
|
||||
Optional: map[string]interface{}{
|
||||
"trivy-target": "Java",
|
||||
},
|
||||
Optional: nil,
|
||||
}
|
||||
|
||||
var osAndLibTrivy = []byte(`
|
||||
@@ -632,8 +633,9 @@ var osAndLibTrivy = []byte(`
|
||||
|
||||
var osAndLibSR = &models.ScanResult{
|
||||
JSONVersion: 4,
|
||||
ServerName: "quay.io/fluentd_elasticsearch/fluentd:v2.9.0 (debian 10.2)",
|
||||
ServerName: "quay.io/fluentd_elasticsearch/fluentd:v2.9.0",
|
||||
Family: "debian",
|
||||
Release: "10.2",
|
||||
ScannedBy: "trivy",
|
||||
ScannedVia: "trivy",
|
||||
ScannedCves: models.VulnInfos{
|
||||
@@ -720,6 +722,84 @@ var osAndLibSR = &models.ScanResult{
|
||||
},
|
||||
},
|
||||
Optional: map[string]interface{}{
|
||||
"trivy-target": "quay.io/fluentd_elasticsearch/fluentd:v2.9.0 (debian 10.2)",
|
||||
"TRIVY_IMAGE_NAME": "quay.io/fluentd_elasticsearch/fluentd",
|
||||
"TRIVY_IMAGE_TAG": "v2.9.0",
|
||||
},
|
||||
}
|
||||
|
||||
func TestParseError(t *testing.T) {
|
||||
cases := map[string]struct {
|
||||
vulnJSON []byte
|
||||
expected error
|
||||
}{
|
||||
"image hello-world": {
|
||||
vulnJSON: helloWorldTrivy,
|
||||
expected: xerrors.Errorf("scanned images or libraries are not supported by Trivy. see https://aquasecurity.github.io/trivy/dev/vulnerability/detection/os/, https://aquasecurity.github.io/trivy/dev/vulnerability/detection/language/"),
|
||||
},
|
||||
}
|
||||
|
||||
for testcase, v := range cases {
|
||||
_, err := ParserV2{}.Parse(v.vulnJSON)
|
||||
|
||||
diff, equal := messagediff.PrettyDiff(
|
||||
v.expected,
|
||||
err,
|
||||
messagediff.IgnoreStructField("frame"),
|
||||
)
|
||||
if !equal {
|
||||
t.Errorf("test: %s, diff %s", testcase, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var helloWorldTrivy = []byte(`
|
||||
{
|
||||
"SchemaVersion": 2,
|
||||
"ArtifactName": "hello-world:latest",
|
||||
"ArtifactType": "container_image",
|
||||
"Metadata": {
|
||||
"ImageID": "sha256:feb5d9fea6a5e9606aa995e879d862b825965ba48de054caab5ef356dc6b3412",
|
||||
"DiffIDs": [
|
||||
"sha256:e07ee1baac5fae6a26f30cabfe54a36d3402f96afda318fe0a96cec4ca393359"
|
||||
],
|
||||
"RepoTags": [
|
||||
"hello-world:latest"
|
||||
],
|
||||
"RepoDigests": [
|
||||
"hello-world@sha256:97a379f4f88575512824f3b352bc03cd75e239179eea0fecc38e597b2209f49a"
|
||||
],
|
||||
"ImageConfig": {
|
||||
"architecture": "amd64",
|
||||
"container": "8746661ca3c2f215da94e6d3f7dfdcafaff5ec0b21c9aff6af3dc379a82fbc72",
|
||||
"created": "2021-09-23T23:47:57.442225064Z",
|
||||
"docker_version": "20.10.7",
|
||||
"history": [
|
||||
{
|
||||
"created": "2021-09-23T23:47:57Z",
|
||||
"created_by": "/bin/sh -c #(nop) COPY file:50563a97010fd7ce1ceebd1fa4f4891ac3decdf428333fb2683696f4358af6c2 in / "
|
||||
},
|
||||
{
|
||||
"created": "2021-09-23T23:47:57Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/hello\"]",
|
||||
"empty_layer": true
|
||||
}
|
||||
],
|
||||
"os": "linux",
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:e07ee1baac5fae6a26f30cabfe54a36d3402f96afda318fe0a96cec4ca393359"
|
||||
]
|
||||
},
|
||||
"config": {
|
||||
"Cmd": [
|
||||
"/hello"
|
||||
],
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Image": "sha256:b9935d4e8431fb1a7f0989304ec86b3329a99a25f5efdc7f09f3f8c41434ca6d"
|
||||
}
|
||||
}
|
||||
}
|
||||
}`)
|
||||
|
||||
@@ -4,16 +4,14 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
ftypes "github.com/aquasecurity/fanal/types"
|
||||
|
||||
"github.com/aquasecurity/fanal/analyzer/os"
|
||||
"github.com/aquasecurity/trivy/pkg/report"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer/os"
|
||||
"github.com/aquasecurity/trivy/pkg/types"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// Convert :
|
||||
func Convert(results report.Results) (result *models.ScanResult, err error) {
|
||||
func Convert(results types.Results) (result *models.ScanResult, err error) {
|
||||
scanResult := &models.ScanResult{
|
||||
JSONVersion: models.JSONVersion,
|
||||
ScannedCves: models.VulnInfos{},
|
||||
@@ -79,8 +77,8 @@ func Convert(results report.Results) (result *models.ScanResult, err error) {
|
||||
LastModified: lastModified,
|
||||
}},
|
||||
}
|
||||
// do onlyIif image type is Vuln
|
||||
if IsTrivySupportedOS(trivyResult.Type) {
|
||||
// do only if image type is Vuln
|
||||
if isTrivySupportedOS(trivyResult.Type) {
|
||||
pkgs[vuln.PkgName] = models.Package{
|
||||
Name: vuln.PkgName,
|
||||
Version: vuln.InstalledVersion,
|
||||
@@ -111,7 +109,7 @@ func Convert(results report.Results) (result *models.ScanResult, err error) {
|
||||
}
|
||||
|
||||
// --list-all-pkgs flg of trivy will output all installed packages, so collect them.
|
||||
if trivyResult.Class == report.ClassOSPkg {
|
||||
if trivyResult.Class == types.ClassOSPkg {
|
||||
for _, p := range trivyResult.Packages {
|
||||
pkgs[p.Name] = models.Package{
|
||||
Name: p.Name,
|
||||
@@ -130,7 +128,7 @@ func Convert(results report.Results) (result *models.ScanResult, err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if trivyResult.Class == report.ClassLangPkg {
|
||||
} else if trivyResult.Class == types.ClassLangPkg {
|
||||
libScanner := uniqueLibraryScannerPaths[trivyResult.Target]
|
||||
libScanner.Type = trivyResult.Type
|
||||
for _, p := range trivyResult.Packages {
|
||||
@@ -178,50 +176,25 @@ func Convert(results report.Results) (result *models.ScanResult, err error) {
|
||||
return scanResult, nil
|
||||
}
|
||||
|
||||
// IsTrivySupportedOS :
|
||||
func IsTrivySupportedOS(family string) bool {
|
||||
supportedFamilies := map[string]interface{}{
|
||||
os.RedHat: struct{}{},
|
||||
os.Debian: struct{}{},
|
||||
os.Ubuntu: struct{}{},
|
||||
os.CentOS: struct{}{},
|
||||
os.Rocky: struct{}{},
|
||||
os.Alma: struct{}{},
|
||||
os.Fedora: struct{}{},
|
||||
os.Amazon: struct{}{},
|
||||
os.Oracle: struct{}{},
|
||||
os.Windows: struct{}{},
|
||||
os.OpenSUSE: struct{}{},
|
||||
os.OpenSUSELeap: struct{}{},
|
||||
os.OpenSUSETumbleweed: struct{}{},
|
||||
os.SLES: struct{}{},
|
||||
os.Photon: struct{}{},
|
||||
os.Alpine: struct{}{},
|
||||
func isTrivySupportedOS(family string) bool {
|
||||
supportedFamilies := map[string]struct{}{
|
||||
os.RedHat: {},
|
||||
os.Debian: {},
|
||||
os.Ubuntu: {},
|
||||
os.CentOS: {},
|
||||
os.Rocky: {},
|
||||
os.Alma: {},
|
||||
os.Fedora: {},
|
||||
os.Amazon: {},
|
||||
os.Oracle: {},
|
||||
os.Windows: {},
|
||||
os.OpenSUSE: {},
|
||||
os.OpenSUSELeap: {},
|
||||
os.OpenSUSETumbleweed: {},
|
||||
os.SLES: {},
|
||||
os.Photon: {},
|
||||
os.Alpine: {},
|
||||
}
|
||||
_, ok := supportedFamilies[family]
|
||||
return ok
|
||||
}
|
||||
|
||||
// IsTrivySupportedLib :
|
||||
func IsTrivySupportedLib(typestr string) bool {
|
||||
supportedLibs := map[string]interface{}{
|
||||
ftypes.Bundler: struct{}{},
|
||||
ftypes.GemSpec: struct{}{},
|
||||
ftypes.Cargo: struct{}{},
|
||||
ftypes.Composer: struct{}{},
|
||||
ftypes.Npm: struct{}{},
|
||||
ftypes.NuGet: struct{}{},
|
||||
ftypes.Pip: struct{}{},
|
||||
ftypes.Pipenv: struct{}{},
|
||||
ftypes.Poetry: struct{}{},
|
||||
ftypes.PythonPkg: struct{}{},
|
||||
ftypes.NodePkg: struct{}{},
|
||||
ftypes.Yarn: struct{}{},
|
||||
ftypes.Jar: struct{}{},
|
||||
ftypes.Pom: struct{}{},
|
||||
ftypes.GoBinary: struct{}{},
|
||||
ftypes.GoMod: struct{}{},
|
||||
}
|
||||
_, ok := supportedLibs[typestr]
|
||||
return ok
|
||||
}
|
||||
|
||||
4052
cti/cti.go
Normal file
4052
cti/cti.go
Normal file
File diff suppressed because it is too large
Load Diff
104
cwe/cwe.go
104
cwe/cwe.go
@@ -1,7 +1,14 @@
|
||||
package cwe
|
||||
|
||||
// CweTopTwentyfive2019 has CWE-ID in CWE Top 25
|
||||
var CweTopTwentyfive2019 = map[string]string{
|
||||
// CweTopTwentyfives has CWE-ID in CWE Top 25
|
||||
var CweTopTwentyfives = map[string]map[string]string{
|
||||
"2019": cweTopTwentyfive2019,
|
||||
"2020": cweTopTwentyfive2020,
|
||||
"2021": cweTopTwentyfive2021,
|
||||
"2022": cweTopTwentyfive2022,
|
||||
}
|
||||
|
||||
var cweTopTwentyfive2019 = map[string]string{
|
||||
"119": "1",
|
||||
"79": "2",
|
||||
"20": "3",
|
||||
@@ -29,5 +36,94 @@ var CweTopTwentyfive2019 = map[string]string{
|
||||
"295": "25",
|
||||
}
|
||||
|
||||
// CweTopTwentyfive2019URL has CWE Top25 links
|
||||
var CweTopTwentyfive2019URL = "https://cwe.mitre.org/top25/archive/2019/2019_cwe_top25.html"
|
||||
var cweTopTwentyfive2020 = map[string]string{
|
||||
"79": "1",
|
||||
"787": "2",
|
||||
"20": "3",
|
||||
"125": "4",
|
||||
"119": "5",
|
||||
"89": "6",
|
||||
"200": "7",
|
||||
"416": "8",
|
||||
"352": "9",
|
||||
"78": "10",
|
||||
"190": "11",
|
||||
"22": "12",
|
||||
"476": "13",
|
||||
"287": "14",
|
||||
"434": "16",
|
||||
"732": "16",
|
||||
"94": "17",
|
||||
"522": "18",
|
||||
"611": "19",
|
||||
"798": "20",
|
||||
"502": "21",
|
||||
"269": "22",
|
||||
"400": "23",
|
||||
"306": "24",
|
||||
"862": "25",
|
||||
}
|
||||
|
||||
var cweTopTwentyfive2021 = map[string]string{
|
||||
"787": "1",
|
||||
"79": "2",
|
||||
"125": "3",
|
||||
"20": "4",
|
||||
"78": "5",
|
||||
"89": "6",
|
||||
"416": "7",
|
||||
"22": "8",
|
||||
"352": "9",
|
||||
"434": "10",
|
||||
"306": "11",
|
||||
"190": "12",
|
||||
"502": "13",
|
||||
"287": "14",
|
||||
"476": "16",
|
||||
"798": "16",
|
||||
"119": "17",
|
||||
"862": "18",
|
||||
"276": "19",
|
||||
"200": "20",
|
||||
"522": "21",
|
||||
"732": "22",
|
||||
"611": "23",
|
||||
"918": "24",
|
||||
"77": "25",
|
||||
}
|
||||
|
||||
var cweTopTwentyfive2022 = map[string]string{
|
||||
"787": "1",
|
||||
"79": "2",
|
||||
"89": "3",
|
||||
"20": "4",
|
||||
"125": "5",
|
||||
"78": "6",
|
||||
"416": "7",
|
||||
"22": "8",
|
||||
"352": "9",
|
||||
"434": "10",
|
||||
"476": "11",
|
||||
"502": "12",
|
||||
"190": "13",
|
||||
"287": "14",
|
||||
"798": "16",
|
||||
"862": "16",
|
||||
"77": "17",
|
||||
"306": "18",
|
||||
"119": "19",
|
||||
"276": "20",
|
||||
"918": "21",
|
||||
"362": "22",
|
||||
"400": "23",
|
||||
"611": "24",
|
||||
"94": "25",
|
||||
}
|
||||
|
||||
// CweTopTwentyfiveURLs has CWE Top25 links
|
||||
var CweTopTwentyfiveURLs = map[string]string{
|
||||
"2019": "https://cwe.mitre.org/top25/archive/2019/2019_cwe_top25.html",
|
||||
"2020": "https://cwe.mitre.org/top25/archive/2020/2020_cwe_top25.html",
|
||||
"2021": "https://cwe.mitre.org/top25/archive/2021/2021_cwe_top25.html",
|
||||
"2022": "https://cwe.mitre.org/top25/archive/2022/2022_cwe_top25.html",
|
||||
}
|
||||
|
||||
292
cwe/owasp.go
292
cwe/owasp.go
@@ -1,7 +1,12 @@
|
||||
package cwe
|
||||
|
||||
// OwaspTopTen2017 has CWE-ID in OWSP Top 10
|
||||
var OwaspTopTen2017 = map[string]string{
|
||||
// OwaspTopTens has CWE-ID in OWASP Top 10
|
||||
var OwaspTopTens = map[string]map[string]string{
|
||||
"2017": owaspTopTen2017,
|
||||
"2021": owaspTopTen2021,
|
||||
}
|
||||
|
||||
var owaspTopTen2017 = map[string]string{
|
||||
"77": "1",
|
||||
"89": "1",
|
||||
"564": "1",
|
||||
@@ -36,30 +41,265 @@ var OwaspTopTen2017 = map[string]string{
|
||||
"778": "10",
|
||||
}
|
||||
|
||||
// OwaspTopTen2017GitHubURLEn has GitHub links
|
||||
var OwaspTopTen2017GitHubURLEn = map[string]string{
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa1-injection.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa2-broken-authentication.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa3-sensitive-data-disclosure.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa4-xxe.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa5-broken-access-control.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa6-security-misconfiguration.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa7-xss.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa8-insecure-deserialization.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa9-known-vulns.md<Paste>",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2017/en/0xaa-logging-detection-response.md",
|
||||
var owaspTopTen2021 = map[string]string{
|
||||
"22": "1",
|
||||
"23": "1",
|
||||
"35": "1",
|
||||
"59": "1",
|
||||
"200": "1",
|
||||
"201": "1",
|
||||
"219": "1",
|
||||
"264": "1",
|
||||
"275": "1",
|
||||
"276": "1",
|
||||
"284": "1",
|
||||
"285": "1",
|
||||
"352": "1",
|
||||
"359": "1",
|
||||
"377": "1",
|
||||
"402": "1",
|
||||
"425": "1",
|
||||
"441": "1",
|
||||
"497": "1",
|
||||
"538": "1",
|
||||
"540": "1",
|
||||
"552": "1",
|
||||
"566": "1",
|
||||
"601": "1",
|
||||
"639": "1",
|
||||
"651": "1",
|
||||
"668": "1",
|
||||
"706": "1",
|
||||
"862": "1",
|
||||
"863": "1",
|
||||
"913": "1",
|
||||
"922": "1",
|
||||
"1275": "1",
|
||||
|
||||
"261": "2",
|
||||
"296": "2",
|
||||
"310": "2",
|
||||
"319": "2",
|
||||
"321": "2",
|
||||
"322": "2",
|
||||
"323": "2",
|
||||
"324": "2",
|
||||
"325": "2",
|
||||
"326": "2",
|
||||
"327": "2",
|
||||
"328": "2",
|
||||
"329": "2",
|
||||
"330": "2",
|
||||
"331": "2",
|
||||
"335": "2",
|
||||
"336": "2",
|
||||
"337": "2",
|
||||
"338": "2",
|
||||
"340": "2",
|
||||
"347": "2",
|
||||
"523": "2",
|
||||
"720": "2",
|
||||
"757": "2",
|
||||
"759": "2",
|
||||
"760": "2",
|
||||
"780": "2",
|
||||
"818": "2",
|
||||
"916": "2",
|
||||
|
||||
"20": "3",
|
||||
"74": "3",
|
||||
"75": "3",
|
||||
"77": "3",
|
||||
"78": "3",
|
||||
"79": "3",
|
||||
"80": "3",
|
||||
"83": "3",
|
||||
"87": "3",
|
||||
"88": "3",
|
||||
"89": "3",
|
||||
"90": "3",
|
||||
"91": "3",
|
||||
"93": "3",
|
||||
"94": "3",
|
||||
"95": "3",
|
||||
"96": "3",
|
||||
"97": "3",
|
||||
"98": "3",
|
||||
"99": "3",
|
||||
"100": "3",
|
||||
"113": "3",
|
||||
"116": "3",
|
||||
"138": "3",
|
||||
"184": "3",
|
||||
"470": "3",
|
||||
"471": "3",
|
||||
"564": "3",
|
||||
"610": "3",
|
||||
"643": "3",
|
||||
"644": "3",
|
||||
"652": "3",
|
||||
"917": "3",
|
||||
|
||||
"73": "4",
|
||||
"183": "4",
|
||||
"209": "4",
|
||||
"213": "4",
|
||||
"235": "4",
|
||||
"256": "4",
|
||||
"257": "4",
|
||||
"266": "4",
|
||||
"269": "4",
|
||||
"280": "4",
|
||||
"311": "4",
|
||||
"312": "4",
|
||||
"313": "4",
|
||||
"316": "4",
|
||||
"419": "4",
|
||||
"430": "4",
|
||||
"434": "4",
|
||||
"444": "4",
|
||||
"451": "4",
|
||||
"472": "4",
|
||||
"501": "4",
|
||||
"522": "4",
|
||||
"525": "4",
|
||||
"539": "4",
|
||||
"579": "4",
|
||||
"598": "4",
|
||||
"602": "4",
|
||||
"642": "4",
|
||||
"646": "4",
|
||||
"650": "4",
|
||||
"653": "4",
|
||||
"656": "4",
|
||||
"657": "4",
|
||||
"799": "4",
|
||||
"807": "4",
|
||||
"840": "4",
|
||||
"841": "4",
|
||||
"927": "4",
|
||||
"1021": "4",
|
||||
"1173": "4",
|
||||
|
||||
"2": "5",
|
||||
"11": "5",
|
||||
"13": "5",
|
||||
"15": "5",
|
||||
"16": "5",
|
||||
"260": "5",
|
||||
"315": "5",
|
||||
"520": "5",
|
||||
"526": "5",
|
||||
"537": "5",
|
||||
"541": "5",
|
||||
"547": "5",
|
||||
"611": "5",
|
||||
"614": "5",
|
||||
"756": "5",
|
||||
"776": "5",
|
||||
"942": "5",
|
||||
"1004": "5",
|
||||
"1032": "5",
|
||||
"1174": "5",
|
||||
|
||||
"937": "6",
|
||||
"1035": "6",
|
||||
"1104": "6",
|
||||
|
||||
"255": "7",
|
||||
"259": "7",
|
||||
"287": "7",
|
||||
"288": "7",
|
||||
"290": "7",
|
||||
"294": "7",
|
||||
"295": "7",
|
||||
"297": "7",
|
||||
"300": "7",
|
||||
"302": "7",
|
||||
"304": "7",
|
||||
"306": "7",
|
||||
"307": "7",
|
||||
"346": "7",
|
||||
"384": "7",
|
||||
"521": "7",
|
||||
"613": "7",
|
||||
"620": "7",
|
||||
"640": "7",
|
||||
"798": "7",
|
||||
"940": "7",
|
||||
"1216": "7",
|
||||
|
||||
"345": "8",
|
||||
"353": "8",
|
||||
"426": "8",
|
||||
"494": "8",
|
||||
"502": "8",
|
||||
"565": "8",
|
||||
"784": "8",
|
||||
"829": "8",
|
||||
"830": "8",
|
||||
"915": "8",
|
||||
|
||||
"117": "9",
|
||||
"223": "9",
|
||||
"532": "9",
|
||||
"778": "9",
|
||||
|
||||
"918": "10",
|
||||
}
|
||||
|
||||
// OwaspTopTen2017GitHubURLJa has GitHub links
|
||||
var OwaspTopTen2017GitHubURLJa = map[string]string{
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa1-injection.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa2-broken-authentication.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa3-sensitive-data-disclosure.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa4-xxe.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa5-broken-access-control.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa6-security-misconfiguration.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa7-xss.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa8-insecure-deserialization.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa9-known-vulns.md<Paste>",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xaa-logging-detection-response.md",
|
||||
// OwaspTopTenURLsEn has GitHub links
|
||||
var OwaspTopTenURLsEn = map[string]map[string]string{
|
||||
"2017": {
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa1-injection.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa2-broken-authentication.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa3-sensitive-data-disclosure.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa4-xxe.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa5-broken-access-control.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa6-security-misconfiguration.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa7-xss.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa8-insecure-deserialization.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2017/en/0xa9-known-vulns.md",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2017/en/0xaa-logging-detection-response.md",
|
||||
},
|
||||
"2021": {
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2021/docs/A01_2021-Broken_Access_Control.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2021/docs/A02_2021-Cryptographic_Failures.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2021/docs/A03_2021-Injection.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2021/docs/A04_2021-Insecure_Design.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2021/docs/A05_2021-Security_Misconfiguration.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2021/docs/A06_2021-Vulnerable_and_Outdated_Components.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2021/docs/A07_2021-Identification_and_Authentication_Failures.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2021/docs/A08_2021-Software_and_Data_Integrity_Failures.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2021/docs/A09_2021-Security_Logging_and_Monitoring_Failures.md",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2021/docs/A10_2021-Server-Side_Request_Forgery_(SSRF).md",
|
||||
},
|
||||
}
|
||||
|
||||
// OwaspTopTenURLsJa has GitHub links
|
||||
var OwaspTopTenURLsJa = map[string]map[string]string{
|
||||
"2017": {
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa1-injection.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa2-broken-authentication.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa3-sensitive-data-disclosure.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa4-xxe.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa5-broken-access-control.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa6-security-misconfiguration.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa7-xss.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa8-insecure-deserialization.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xa9-known-vulns.md",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2017/ja/0xaa-logging-detection-response.md",
|
||||
},
|
||||
"2021": {
|
||||
"1": "https://github.com/OWASP/Top10/blob/master/2021/docs/A01_2021-Broken_Access_Control.ja.md",
|
||||
"2": "https://github.com/OWASP/Top10/blob/master/2021/docs/A02_2021-Cryptographic_Failures.ja.md",
|
||||
"3": "https://github.com/OWASP/Top10/blob/master/2021/docs/A03_2021-Injection.ja.md",
|
||||
"4": "https://github.com/OWASP/Top10/blob/master/2021/docs/A04_2021-Insecure_Design.ja.md",
|
||||
"5": "https://github.com/OWASP/Top10/blob/master/2021/docs/A05_2021-Security_Misconfiguration.ja.md",
|
||||
"6": "https://github.com/OWASP/Top10/blob/master/2021/docs/A06_2021-Vulnerable_and_Outdated_Components.ja.md",
|
||||
"7": "https://github.com/OWASP/Top10/blob/master/2021/docs/A07_2021-Identification_and_Authentication_Failures.ja.md",
|
||||
"8": "https://github.com/OWASP/Top10/blob/master/2021/docs/A08_2021-Software_and_Data_Integrity_Failures.ja.md",
|
||||
"9": "https://github.com/OWASP/Top10/blob/master/2021/docs/A09_2021-Security_Logging_and_Monitoring_Failures.ja.md",
|
||||
"10": "https://github.com/OWASP/Top10/blob/master/2021/docs/A10_2021-Server-Side_Request_Forgery_(SSRF).ja.md",
|
||||
},
|
||||
}
|
||||
|
||||
74
cwe/sans.go
74
cwe/sans.go
@@ -1,7 +1,41 @@
|
||||
package cwe
|
||||
|
||||
// SansTopTwentyfive has CWE-ID in CWE/SANS Top 25
|
||||
var SansTopTwentyfive = map[string]string{
|
||||
// SansTopTwentyfives has CWE-ID in CWE/SANS Top 25
|
||||
var SansTopTwentyfives = map[string]map[string]string{
|
||||
"2010": sansTopTwentyfive2010,
|
||||
"2011": sansTopTwentyfive2011,
|
||||
"latest": sansTopTwentyfiveLatest,
|
||||
}
|
||||
|
||||
var sansTopTwentyfive2010 = map[string]string{
|
||||
"79": "1",
|
||||
"89": "2",
|
||||
"120": "3",
|
||||
"352": "4",
|
||||
"285": "5",
|
||||
"807": "6",
|
||||
"22": "7",
|
||||
"434": "8",
|
||||
"78": "9",
|
||||
"311": "10",
|
||||
"798": "11",
|
||||
"805": "12",
|
||||
"98": "13",
|
||||
"129": "14",
|
||||
"754": "15",
|
||||
"209": "16",
|
||||
"190": "17",
|
||||
"131": "18",
|
||||
"306": "19",
|
||||
"494": "20",
|
||||
"732": "21",
|
||||
"770": "22",
|
||||
"601": "23",
|
||||
"327": "24",
|
||||
"362": "25",
|
||||
}
|
||||
|
||||
var sansTopTwentyfive2011 = map[string]string{
|
||||
"89": "1",
|
||||
"78": "2",
|
||||
"120": "3",
|
||||
@@ -29,5 +63,37 @@ var SansTopTwentyfive = map[string]string{
|
||||
"759": "25",
|
||||
}
|
||||
|
||||
// SansTopTwentyfiveURL is a URL of sans 25
|
||||
var SansTopTwentyfiveURL = "https://www.sans.org/top25-software-errors/"
|
||||
var sansTopTwentyfiveLatest = map[string]string{
|
||||
"119": "1",
|
||||
"79": "2",
|
||||
"20": "3",
|
||||
"200": "4",
|
||||
"125": "5",
|
||||
"89": "6",
|
||||
"416": "7",
|
||||
"190": "8",
|
||||
"352": "9",
|
||||
"22": "10",
|
||||
"78": "11",
|
||||
"787": "12",
|
||||
"287": "13",
|
||||
"476": "14",
|
||||
"732": "15",
|
||||
"434": "16",
|
||||
"611": "17",
|
||||
"94": "18",
|
||||
"798": "19",
|
||||
"400": "20",
|
||||
"772": "21",
|
||||
"426": "22",
|
||||
"502": "23",
|
||||
"269": "24",
|
||||
"295": "25",
|
||||
}
|
||||
|
||||
// SansTopTwentyfiveURLs has CWE/SANS Top25 links
|
||||
var SansTopTwentyfiveURLs = map[string]string{
|
||||
"2010": "https://cwe.mitre.org/top25/archive/2010/2010_cwe_sans_top25.html",
|
||||
"2011": "https://cwe.mitre.org/top25/archive/2011/2011_cwe_sans_top25.html",
|
||||
"latest": "https://www.sans.org/top25-software-errors/",
|
||||
}
|
||||
|
||||
222
detector/cti.go
Normal file
222
detector/cti.go
Normal file
@@ -0,0 +1,222 @@
|
||||
//go:build !scanner
|
||||
// +build !scanner
|
||||
|
||||
package detector
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
ctidb "github.com/vulsio/go-cti/db"
|
||||
ctilog "github.com/vulsio/go-cti/utils"
|
||||
)
|
||||
|
||||
// goCTIDBClient is a DB Driver
|
||||
type goCTIDBClient struct {
|
||||
driver ctidb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// closeDB close a DB connection
|
||||
func (client goCTIDBClient) closeDB() error {
|
||||
if client.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return client.driver.CloseDB()
|
||||
}
|
||||
|
||||
func newGoCTIDBClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goCTIDBClient, error) {
|
||||
if err := ctilog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set go-cti logger. err: %w", err)
|
||||
}
|
||||
|
||||
db, err := newCTIDB(cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newCTIDB. err: %w", err)
|
||||
}
|
||||
return &goCTIDBClient{driver: db, baseURL: cnf.GetURL()}, nil
|
||||
}
|
||||
|
||||
// FillWithCTI :
|
||||
func FillWithCTI(r *models.ScanResult, cnf config.CtiConf, logOpts logging.LogOpts) error {
|
||||
client, err := newGoCTIDBClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
nCti := 0
|
||||
if client.driver == nil {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, err := util.URLPathJoin(client.baseURL, "cves")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses, err := getCTIsViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, res := range responses {
|
||||
var techniqueIDs []string
|
||||
if err := json.Unmarshal([]byte(res.json), &techniqueIDs); err != nil {
|
||||
return err
|
||||
}
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
if ok {
|
||||
v.Ctis = techniqueIDs
|
||||
nCti++
|
||||
}
|
||||
r.ScannedCves[res.request.cveID] = v
|
||||
}
|
||||
} else {
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
techniqueIDs, err := client.driver.GetTechniqueIDsByCveID(cveID)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to get CTIs by CVE-ID. err: %w", err)
|
||||
}
|
||||
if len(techniqueIDs) == 0 {
|
||||
continue
|
||||
}
|
||||
vuln.Ctis = techniqueIDs
|
||||
nCti++
|
||||
r.ScannedCves[cveID] = vuln
|
||||
}
|
||||
}
|
||||
|
||||
logging.Log.Infof("%s: Cyber Threat Intelligences are detected for %d CVEs", r.FormatServerName(), nCti)
|
||||
return nil
|
||||
}
|
||||
|
||||
type ctiResponse struct {
|
||||
request ctiRequest
|
||||
json string
|
||||
}
|
||||
|
||||
func getCTIsViaHTTP(cveIDs []string, urlPrefix string) (responses []ctiResponse, err error) {
|
||||
nReq := len(cveIDs)
|
||||
reqChan := make(chan ctiRequest, nReq)
|
||||
resChan := make(chan ctiResponse, nReq)
|
||||
errChan := make(chan error, nReq)
|
||||
defer close(reqChan)
|
||||
defer close(resChan)
|
||||
defer close(errChan)
|
||||
|
||||
go func() {
|
||||
for _, cveID := range cveIDs {
|
||||
reqChan <- ctiRequest{
|
||||
cveID: cveID,
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
concurrency := 10
|
||||
tasks := util.GenWorkers(concurrency)
|
||||
for i := 0; i < nReq; i++ {
|
||||
tasks <- func() {
|
||||
req := <-reqChan
|
||||
url, err := util.URLPathJoin(
|
||||
urlPrefix,
|
||||
req.cveID,
|
||||
)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGetCTI(url, req, resChan, errChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.After(2 * 60 * time.Second)
|
||||
var errs []error
|
||||
for i := 0; i < nReq; i++ {
|
||||
select {
|
||||
case res := <-resChan:
|
||||
responses = append(responses, res)
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching CTI")
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, xerrors.Errorf("Failed to fetch CTI. err: %w", errs)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type ctiRequest struct {
|
||||
cveID string
|
||||
}
|
||||
|
||||
func httpGetCTI(url string, req ctiRequest, resChan chan<- ctiResponse, errChan chan<- error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
count, retryMax := 0, 3
|
||||
f := func() (err error) {
|
||||
// resp, body, errs = gorequest.New().SetDebug(config.Conf.Debug).Get(url).End()
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
count++
|
||||
if count == retryMax {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed to HTTP GET. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
if err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify); err != nil {
|
||||
errChan <- xerrors.Errorf("HTTP Error %w", err)
|
||||
return
|
||||
}
|
||||
if count == retryMax {
|
||||
errChan <- xerrors.New("Retry count exceeded")
|
||||
return
|
||||
}
|
||||
|
||||
resChan <- ctiResponse{
|
||||
request: req,
|
||||
json: body,
|
||||
}
|
||||
}
|
||||
|
||||
func newCTIDB(cnf config.VulnDictInterface) (ctidb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := ctidb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ctidb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("Failed to init cti DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init cti DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, nil
|
||||
}
|
||||
@@ -22,40 +22,27 @@ import (
|
||||
)
|
||||
|
||||
type goCveDictClient struct {
|
||||
cnf config.VulnDictInterface
|
||||
driver cvedb.DB
|
||||
driver cvedb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
func newGoCveDictClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goCveDictClient, error) {
|
||||
if err := cvelog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("Failed to set go-cve-dictionary logger. err: %w", err)
|
||||
}
|
||||
|
||||
driver, locked, err := newCveDB(cnf)
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
driver, err := newCveDB(cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newCveDB. err: %w", err)
|
||||
}
|
||||
return &goCveDictClient{cnf: cnf, driver: driver}, nil
|
||||
return &goCveDictClient{driver: driver, baseURL: cnf.GetURL()}, nil
|
||||
}
|
||||
|
||||
func (api goCveDictClient) closeDB() error {
|
||||
if api.driver == nil {
|
||||
func (client goCveDictClient) closeDB() error {
|
||||
if client.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return api.driver.CloseDB()
|
||||
}
|
||||
|
||||
func (api goCveDictClient) fetchCveDetails(cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
m, err := api.driver.GetMulti(cveIDs)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to GetMulti. err: %w", err)
|
||||
}
|
||||
for _, v := range m {
|
||||
cveDetails = append(cveDetails, v)
|
||||
}
|
||||
return cveDetails, nil
|
||||
return client.driver.CloseDB()
|
||||
}
|
||||
|
||||
type response struct {
|
||||
@@ -63,57 +50,67 @@ type response struct {
|
||||
CveDetail cvemodels.CveDetail
|
||||
}
|
||||
|
||||
func (api goCveDictClient) fetchCveDetailsViaHTTP(cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
reqChan := make(chan string, len(cveIDs))
|
||||
resChan := make(chan response, len(cveIDs))
|
||||
errChan := make(chan error, len(cveIDs))
|
||||
defer close(reqChan)
|
||||
defer close(resChan)
|
||||
defer close(errChan)
|
||||
func (client goCveDictClient) fetchCveDetails(cveIDs []string) (cveDetails []cvemodels.CveDetail, err error) {
|
||||
if client.driver == nil {
|
||||
reqChan := make(chan string, len(cveIDs))
|
||||
resChan := make(chan response, len(cveIDs))
|
||||
errChan := make(chan error, len(cveIDs))
|
||||
defer close(reqChan)
|
||||
defer close(resChan)
|
||||
defer close(errChan)
|
||||
|
||||
go func() {
|
||||
for _, cveID := range cveIDs {
|
||||
reqChan <- cveID
|
||||
}
|
||||
}()
|
||||
go func() {
|
||||
for _, cveID := range cveIDs {
|
||||
reqChan <- cveID
|
||||
}
|
||||
}()
|
||||
|
||||
concurrency := 10
|
||||
tasks := util.GenWorkers(concurrency)
|
||||
for range cveIDs {
|
||||
tasks <- func() {
|
||||
select {
|
||||
case cveID := <-reqChan:
|
||||
url, err := util.URLPathJoin(api.cnf.GetURL(), "cves", cveID)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
api.httpGet(cveID, url, resChan, errChan)
|
||||
concurrency := 10
|
||||
tasks := util.GenWorkers(concurrency)
|
||||
for range cveIDs {
|
||||
tasks <- func() {
|
||||
select {
|
||||
case cveID := <-reqChan:
|
||||
url, err := util.URLPathJoin(client.baseURL, "cves", cveID)
|
||||
if err != nil {
|
||||
errChan <- err
|
||||
} else {
|
||||
logging.Log.Debugf("HTTP Request to %s", url)
|
||||
httpGet(cveID, url, resChan, errChan)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
timeout := time.After(2 * 60 * time.Second)
|
||||
var errs []error
|
||||
for range cveIDs {
|
||||
select {
|
||||
case res := <-resChan:
|
||||
cveDetails = append(cveDetails, res.CveDetail)
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching CVE")
|
||||
timeout := time.After(2 * 60 * time.Second)
|
||||
var errs []error
|
||||
for range cveIDs {
|
||||
select {
|
||||
case res := <-resChan:
|
||||
cveDetails = append(cveDetails, res.CveDetail)
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching CVE")
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil,
|
||||
xerrors.Errorf("Failed to fetch CVE. err: %w", errs)
|
||||
}
|
||||
} else {
|
||||
m, err := client.driver.GetMulti(cveIDs)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to GetMulti. err: %w", err)
|
||||
}
|
||||
for _, v := range m {
|
||||
cveDetails = append(cveDetails, v)
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil,
|
||||
xerrors.Errorf("Failed to fetch CVE. err: %w", errs)
|
||||
}
|
||||
return
|
||||
return cveDetails, nil
|
||||
}
|
||||
|
||||
func (api goCveDictClient) httpGet(key, url string, resChan chan<- response, errChan chan<- error) {
|
||||
func httpGet(key, url string, resChan chan<- response, errChan chan<- error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
@@ -144,21 +141,21 @@ func (api goCveDictClient) httpGet(key, url string, resChan chan<- response, err
|
||||
}
|
||||
}
|
||||
|
||||
func (api goCveDictClient) detectCveByCpeURI(cpeURI string, useJVN bool) (cves []cvemodels.CveDetail, err error) {
|
||||
if api.cnf.IsFetchViaHTTP() {
|
||||
url, err := util.URLPathJoin(api.cnf.GetURL(), "cpes")
|
||||
func (client goCveDictClient) detectCveByCpeURI(cpeURI string, useJVN bool) (cves []cvemodels.CveDetail, err error) {
|
||||
if client.driver == nil {
|
||||
url, err := util.URLPathJoin(client.baseURL, "cpes")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
query := map[string]string{"name": cpeURI}
|
||||
logging.Log.Debugf("HTTP Request to %s, query: %#v", url, query)
|
||||
if cves, err = api.httpPost(url, query); err != nil {
|
||||
return nil, err
|
||||
if cves, err = httpPost(url, query); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to post HTTP Request. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
if cves, err = api.driver.GetByCpeURI(cpeURI); err != nil {
|
||||
return nil, err
|
||||
if cves, err = client.driver.GetByCpeURI(cpeURI); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get CVEs by CPEURI. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,7 +174,7 @@ func (api goCveDictClient) detectCveByCpeURI(cpeURI string, useJVN bool) (cves [
|
||||
return nvdCves, nil
|
||||
}
|
||||
|
||||
func (api goCveDictClient) httpPost(url string, query map[string]string) ([]cvemodels.CveDetail, error) {
|
||||
func httpPost(url string, query map[string]string) ([]cvemodels.CveDetail, error) {
|
||||
var body string
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
@@ -208,18 +205,20 @@ func (api goCveDictClient) httpPost(url string, query map[string]string) ([]cvem
|
||||
return cveDetails, nil
|
||||
}
|
||||
|
||||
func newCveDB(cnf config.VulnDictInterface) (driver cvedb.DB, locked bool, err error) {
|
||||
func newCveDB(cnf config.VulnDictInterface) (cvedb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err = cvedb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), cvedb.Option{})
|
||||
driver, locked, err := cvedb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), cvedb.Option{})
|
||||
if err != nil {
|
||||
err = xerrors.Errorf("Failed to init CVE DB. err: %w, path: %s", err, path)
|
||||
return nil, locked, err
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("Failed to init CVE DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init CVE DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, false, nil
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/contrib/owasp-dependency-check/parser"
|
||||
@@ -19,7 +21,6 @@ import (
|
||||
"github.com/future-architect/vuls/reporter"
|
||||
"github.com/future-architect/vuls/util"
|
||||
cvemodels "github.com/vulsio/go-cve-dictionary/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Cpe :
|
||||
@@ -47,7 +48,7 @@ func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
return nil, xerrors.Errorf("Failed to fill with Library dependency: %w", err)
|
||||
}
|
||||
|
||||
if err := DetectPkgCves(&r, config.Conf.OvalDict, config.Conf.Gost); err != nil {
|
||||
if err := DetectPkgCves(&r, config.Conf.OvalDict, config.Conf.Gost, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to detect Pkg CVE: %w", err)
|
||||
}
|
||||
|
||||
@@ -91,7 +92,7 @@ func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
return nil, xerrors.Errorf("Failed to detect WordPress Cves: %w", err)
|
||||
}
|
||||
|
||||
if err := gost.FillCVEsWithRedHat(&r, config.Conf.Gost); err != nil {
|
||||
if err := gost.FillCVEsWithRedHat(&r, config.Conf.Gost, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with gost: %w", err)
|
||||
}
|
||||
|
||||
@@ -99,22 +100,26 @@ func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
return nil, xerrors.Errorf("Failed to fill with CVE: %w", err)
|
||||
}
|
||||
|
||||
nExploitCve, err := FillWithExploit(&r, config.Conf.Exploit)
|
||||
nExploitCve, err := FillWithExploit(&r, config.Conf.Exploit, config.Conf.LogOpts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with exploit: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: %d PoC are detected", r.FormatServerName(), nExploitCve)
|
||||
|
||||
nMetasploitCve, err := FillWithMetasploit(&r, config.Conf.Metasploit)
|
||||
nMetasploitCve, err := FillWithMetasploit(&r, config.Conf.Metasploit, config.Conf.LogOpts)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with metasploit: %w", err)
|
||||
}
|
||||
logging.Log.Infof("%s: %d exploits are detected", r.FormatServerName(), nMetasploitCve)
|
||||
|
||||
if err := FillWithKEVuln(&r, config.Conf.KEVuln); err != nil {
|
||||
if err := FillWithKEVuln(&r, config.Conf.KEVuln, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with Known Exploited Vulnerabilities: %w", err)
|
||||
}
|
||||
|
||||
if err := FillWithCTI(&r, config.Conf.Cti, config.Conf.LogOpts); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to fill with Cyber Threat Intelligences: %w", err)
|
||||
}
|
||||
|
||||
FillCweDict(&r)
|
||||
|
||||
r.ReportedBy, _ = os.Hostname()
|
||||
@@ -143,7 +148,7 @@ func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
if config.Conf.DiffPlus || config.Conf.DiffMinus {
|
||||
prevs, err := loadPrevious(rs, config.Conf.ResultsDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("Failed to load previous results. err: %w", err)
|
||||
}
|
||||
rs = diff(rs, prevs, config.Conf.DiffPlus, config.Conf.DiffMinus)
|
||||
}
|
||||
@@ -205,33 +210,23 @@ func Detect(rs []models.ScanResult, dir string) ([]models.ScanResult, error) {
|
||||
|
||||
// DetectPkgCves detects OS pkg cves
|
||||
// pass 2 configs
|
||||
func DetectPkgCves(r *models.ScanResult, ovalCnf config.GovalDictConf, gostCnf config.GostConf) error {
|
||||
func DetectPkgCves(r *models.ScanResult, ovalCnf config.GovalDictConf, gostCnf config.GostConf, logOpts logging.LogOpts) error {
|
||||
// Pkg Scan
|
||||
if r.Release != "" {
|
||||
if len(r.Packages)+len(r.SrcPackages) > 0 {
|
||||
// OVAL, gost(Debian Security Tracker) does not support Package for Raspbian, so skip it.
|
||||
if r.Family == constant.Raspbian {
|
||||
r = r.RemoveRaspbianPackFromResult()
|
||||
}
|
||||
|
||||
// OVAL
|
||||
if err := detectPkgsCvesWithOval(ovalCnf, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with OVAL: %w", err)
|
||||
}
|
||||
|
||||
// gost
|
||||
if err := detectPkgsCvesWithGost(gostCnf, r); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with gost: %w", err)
|
||||
}
|
||||
} else {
|
||||
logging.Log.Infof("Number of packages is 0. Skip OVAL and gost detection")
|
||||
if isPkgCvesDetactable(r) {
|
||||
// OVAL, gost(Debian Security Tracker) does not support Package for Raspbian, so skip it.
|
||||
if r.Family == constant.Raspbian {
|
||||
r = r.RemoveRaspbianPackFromResult()
|
||||
}
|
||||
|
||||
// OVAL
|
||||
if err := detectPkgsCvesWithOval(ovalCnf, r, logOpts); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with OVAL: %w", err)
|
||||
}
|
||||
|
||||
// gost
|
||||
if err := detectPkgsCvesWithGost(gostCnf, r, logOpts); err != nil {
|
||||
return xerrors.Errorf("Failed to detect CVE with gost: %w", err)
|
||||
}
|
||||
} else if reuseScannedCves(r) {
|
||||
logging.Log.Infof("r.Release is empty. Use CVEs as it as.")
|
||||
} else if r.Family == constant.ServerTypePseudo {
|
||||
logging.Log.Infof("pseudo type. Skip OVAL and gost detection")
|
||||
} else {
|
||||
logging.Log.Infof("r.Release is empty. detect as pseudo type. Skip OVAL and gost detection")
|
||||
}
|
||||
|
||||
for i, v := range r.ScannedCves {
|
||||
@@ -264,11 +259,40 @@ func DetectPkgCves(r *models.ScanResult, ovalCnf config.GovalDictConf, gostCnf c
|
||||
return nil
|
||||
}
|
||||
|
||||
// isPkgCvesDetactable checks whether CVEs is detactable with gost and oval from the result
|
||||
func isPkgCvesDetactable(r *models.ScanResult) bool {
|
||||
switch r.Family {
|
||||
case constant.FreeBSD, constant.ServerTypePseudo:
|
||||
logging.Log.Infof("%s type. Skip OVAL and gost detection", r.Family)
|
||||
return false
|
||||
case constant.Windows:
|
||||
return true
|
||||
default:
|
||||
if r.ScannedVia == "trivy" {
|
||||
logging.Log.Infof("r.ScannedVia is trivy. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
if r.Release == "" {
|
||||
logging.Log.Infof("r.Release is empty. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
if len(r.Packages)+len(r.SrcPackages) == 0 {
|
||||
logging.Log.Infof("Number of packages is 0. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
// DetectGitHubCves fetches CVEs from GitHub Security Alerts
|
||||
func DetectGitHubCves(r *models.ScanResult, githubConfs map[string]config.GitHubConf) error {
|
||||
if len(githubConfs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GitHubManifests = models.DependencyGraphManifests{}
|
||||
for ownerRepo, setting := range githubConfs {
|
||||
ss := strings.Split(ownerRepo, "/")
|
||||
if len(ss) != 2 {
|
||||
@@ -281,6 +305,10 @@ func DetectGitHubCves(r *models.ScanResult, githubConfs map[string]config.GitHub
|
||||
}
|
||||
logging.Log.Infof("%s: %d CVEs detected with GHSA %s/%s",
|
||||
r.FormatServerName(), n, owner, repo)
|
||||
|
||||
if err = DetectGitHubDependencyGraph(r, owner, repo, setting.Token); err != nil {
|
||||
return xerrors.Errorf("Failed to access GitHub Dependency graph: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -308,7 +336,7 @@ func FillCvesWithNvdJvn(r *models.ScanResult, cnf config.GoCveDictConf, logOpts
|
||||
|
||||
client, err := newGoCveDictClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("Failed to newGoCveDictClient. err: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
@@ -316,14 +344,9 @@ func FillCvesWithNvdJvn(r *models.ScanResult, cnf config.GoCveDictConf, logOpts
|
||||
}
|
||||
}()
|
||||
|
||||
var ds []cvemodels.CveDetail
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
ds, err = client.fetchCveDetailsViaHTTP(cveIDs)
|
||||
} else {
|
||||
ds, err = client.fetchCveDetails(cveIDs)
|
||||
}
|
||||
ds, err := client.fetchCveDetails(cveIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("Failed to fetchCveDetails. err: %w", err)
|
||||
}
|
||||
|
||||
for _, d := range ds {
|
||||
@@ -391,37 +414,43 @@ func fillCertAlerts(cvedetail *cvemodels.CveDetail) (dict models.AlertDict) {
|
||||
}
|
||||
|
||||
// detectPkgsCvesWithOval fetches OVAL database
|
||||
func detectPkgsCvesWithOval(cnf config.GovalDictConf, r *models.ScanResult) error {
|
||||
ovalClient, err := oval.NewOVALClient(r.Family, cnf)
|
||||
func detectPkgsCvesWithOval(cnf config.GovalDictConf, r *models.ScanResult, logOpts logging.LogOpts) error {
|
||||
client, err := oval.NewOVALClient(r.Family, cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ovalClient == nil {
|
||||
return nil
|
||||
}
|
||||
defer func() {
|
||||
if err := client.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close the OVAL DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
logging.Log.Debugf("Check if oval fetched: %s %s", r.Family, r.Release)
|
||||
ok, err := ovalClient.CheckIfOvalFetched(r.Family, r.Release)
|
||||
ok, err := client.CheckIfOvalFetched(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
if r.Family == constant.Debian {
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Ubuntu:
|
||||
logging.Log.Infof("Skip OVAL and Scan with gost alone.")
|
||||
logging.Log.Infof("%s: %d CVEs are detected with OVAL", r.FormatServerName(), 0)
|
||||
return nil
|
||||
case constant.Windows, constant.FreeBSD, constant.ServerTypePseudo:
|
||||
return nil
|
||||
default:
|
||||
return xerrors.Errorf("OVAL entries of %s %s are not found. Fetch OVAL before reporting. For details, see `https://github.com/vulsio/goval-dictionary#usage`", r.Family, r.Release)
|
||||
}
|
||||
return xerrors.Errorf("OVAL entries of %s %s are not found. Fetch OVAL before reporting. For details, see `https://github.com/vulsio/goval-dictionary#usage`", r.Family, r.Release)
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Check if oval fresh: %s %s", r.Family, r.Release)
|
||||
_, err = ovalClient.CheckIfOvalFresh(r.Family, r.Release)
|
||||
_, err = client.CheckIfOvalFresh(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Fill with oval: %s %s", r.Family, r.Release)
|
||||
nCVEs, err := ovalClient.FillWithOval(r)
|
||||
nCVEs, err := client.FillWithOval(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -430,12 +459,11 @@ func detectPkgsCvesWithOval(cnf config.GovalDictConf, r *models.ScanResult) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult) error {
|
||||
client, err := gost.NewClient(cnf, r.Family)
|
||||
func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult, logOpts logging.LogOpts) error {
|
||||
client, err := gost.NewGostClient(cnf, r.Family, logOpts)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to new a gost client: %w", err)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := client.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close the gost DB. err: %+v", err)
|
||||
@@ -444,19 +472,21 @@ func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult) error {
|
||||
|
||||
nCVEs, err := client.DetectCVEs(r, true)
|
||||
if err != nil {
|
||||
if r.Family == constant.Debian {
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Ubuntu:
|
||||
return xerrors.Errorf("Failed to detect CVEs with gost: %w", err)
|
||||
default:
|
||||
return xerrors.Errorf("Failed to detect unfixed CVEs with gost: %w", err)
|
||||
}
|
||||
return xerrors.Errorf("Failed to detect unfixed CVEs with gost: %w", err)
|
||||
}
|
||||
|
||||
if r.Family == constant.Debian {
|
||||
logging.Log.Infof("%s: %d CVEs are detected with gost",
|
||||
r.FormatServerName(), nCVEs)
|
||||
} else {
|
||||
logging.Log.Infof("%s: %d unfixed CVEs are detected with gost",
|
||||
r.FormatServerName(), nCVEs)
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Ubuntu:
|
||||
logging.Log.Infof("%s: %d CVEs are detected with gost", r.FormatServerName(), nCVEs)
|
||||
default:
|
||||
logging.Log.Infof("%s: %d unfixed CVEs are detected with gost", r.FormatServerName(), nCVEs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -464,7 +494,7 @@ func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult) error {
|
||||
func DetectCpeURIsCves(r *models.ScanResult, cpes []Cpe, cnf config.GoCveDictConf, logOpts logging.LogOpts) error {
|
||||
client, err := newGoCveDictClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("Failed to newGoCveDictClient. err: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
@@ -476,7 +506,7 @@ func DetectCpeURIsCves(r *models.ScanResult, cpes []Cpe, cnf config.GoCveDictCon
|
||||
for _, cpe := range cpes {
|
||||
details, err := client.detectCveByCpeURI(cpe.CpeURI, cpe.UseJVN)
|
||||
if err != nil {
|
||||
return err
|
||||
return xerrors.Errorf("Failed to detectCveByCpeURI. err: %w", err)
|
||||
}
|
||||
|
||||
for _, detail := range details {
|
||||
@@ -551,17 +581,13 @@ func FillCweDict(r *models.ScanResult) {
|
||||
|
||||
dict := map[string]models.CweDictEntry{}
|
||||
for id := range uniqCweIDMap {
|
||||
entry := models.CweDictEntry{}
|
||||
entry := models.CweDictEntry{
|
||||
OwaspTopTens: map[string]string{},
|
||||
CweTopTwentyfives: map[string]string{},
|
||||
SansTopTwentyfives: map[string]string{},
|
||||
}
|
||||
if e, ok := cwe.CweDictEn[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
fillCweRank(&entry, id)
|
||||
entry.En = &e
|
||||
} else {
|
||||
logging.Log.Debugf("CWE-ID %s is not found in English CWE Dict", id)
|
||||
@@ -570,23 +596,34 @@ func FillCweDict(r *models.ScanResult) {
|
||||
|
||||
if r.Lang == "ja" {
|
||||
if e, ok := cwe.CweDictJa[id]; ok {
|
||||
if rank, ok := cwe.OwaspTopTen2017[id]; ok {
|
||||
entry.OwaspTopTen2017 = rank
|
||||
}
|
||||
if rank, ok := cwe.CweTopTwentyfive2019[id]; ok {
|
||||
entry.CweTopTwentyfive2019 = rank
|
||||
}
|
||||
if rank, ok := cwe.SansTopTwentyfive[id]; ok {
|
||||
entry.SansTopTwentyfive = rank
|
||||
}
|
||||
fillCweRank(&entry, id)
|
||||
entry.Ja = &e
|
||||
} else {
|
||||
logging.Log.Debugf("CWE-ID %s is not found in Japanese CWE Dict", id)
|
||||
entry.Ja = &cwe.Cwe{CweID: id}
|
||||
}
|
||||
}
|
||||
|
||||
dict[id] = entry
|
||||
}
|
||||
r.CweDict = dict
|
||||
return
|
||||
}
|
||||
|
||||
func fillCweRank(entry *models.CweDictEntry, id string) {
|
||||
for year, ranks := range cwe.OwaspTopTens {
|
||||
if rank, ok := ranks[id]; ok {
|
||||
entry.OwaspTopTens[year] = rank
|
||||
}
|
||||
}
|
||||
for year, ranks := range cwe.CweTopTwentyfives {
|
||||
if rank, ok := ranks[id]; ok {
|
||||
entry.CweTopTwentyfives[year] = rank
|
||||
}
|
||||
}
|
||||
for year, ranks := range cwe.SansTopTwentyfives {
|
||||
if rank, ok := ranks[id]; ok {
|
||||
entry.SansTopTwentyfives[year] = rank
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,33 +9,73 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
exploitdb "github.com/vulsio/go-exploitdb/db"
|
||||
exploitmodels "github.com/vulsio/go-exploitdb/models"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
exploitlog "github.com/vulsio/go-exploitdb/util"
|
||||
)
|
||||
|
||||
// goExploitDBClient is a DB Driver
|
||||
type goExploitDBClient struct {
|
||||
driver exploitdb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// closeDB close a DB connection
|
||||
func (client goExploitDBClient) closeDB() error {
|
||||
if client.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return client.driver.CloseDB()
|
||||
}
|
||||
|
||||
func newGoExploitDBClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goExploitDBClient, error) {
|
||||
if err := exploitlog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set go-exploitdb logger. err: %w", err)
|
||||
}
|
||||
|
||||
db, err := newExploitDB(cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newExploitDB. err: %w", err)
|
||||
}
|
||||
return &goExploitDBClient{driver: db, baseURL: cnf.GetURL()}, nil
|
||||
}
|
||||
|
||||
// FillWithExploit fills exploit information that has in Exploit
|
||||
func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf) (nExploitCve int, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf, logOpts logging.LogOpts) (nExploitCve int, err error) {
|
||||
client, err := newGoExploitDBClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to newGoExploitDBClient. err: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if client.driver == nil {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, _ := util.URLPathJoin(cnf.GetURL(), "cves")
|
||||
prefix, err := util.URLPathJoin(client.baseURL, "cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getExploitsViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get Exploits via HTTP. err: %w", err)
|
||||
}
|
||||
for _, res := range responses {
|
||||
exps := []exploitmodels.Exploit{}
|
||||
if err := json.Unmarshal([]byte(res.json), &exps); err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
exploits := ConvertToModelsExploit(exps)
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
@@ -46,25 +86,13 @@ func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf) (nExploitCve
|
||||
nExploitCve++
|
||||
}
|
||||
} else {
|
||||
driver, locked, err := newExploitDB(&cnf)
|
||||
if locked {
|
||||
return 0, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
es, err := driver.GetExploitByCveID(cveID)
|
||||
es, err := client.driver.GetExploitByCveID(cveID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get Exploits by CVE-ID. err: %w", err)
|
||||
}
|
||||
if len(es) == 0 {
|
||||
continue
|
||||
@@ -81,14 +109,20 @@ func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf) (nExploitCve
|
||||
// ConvertToModelsExploit converts exploit model to vuls model
|
||||
func ConvertToModelsExploit(es []exploitmodels.Exploit) (exploits []models.Exploit) {
|
||||
for _, e := range es {
|
||||
var documentURL, shellURL *string
|
||||
var documentURL, shellURL, paperURL, ghdbURL *string
|
||||
if e.OffensiveSecurity != nil {
|
||||
os := e.OffensiveSecurity
|
||||
if os.Document != nil {
|
||||
documentURL = &os.Document.DocumentURL
|
||||
documentURL = &os.Document.FileURL
|
||||
}
|
||||
if os.ShellCode != nil {
|
||||
shellURL = &os.ShellCode.ShellCodeURL
|
||||
shellURL = &os.ShellCode.FileURL
|
||||
}
|
||||
if os.Paper != nil {
|
||||
paperURL = &os.Paper.FileURL
|
||||
}
|
||||
if os.GHDB != nil {
|
||||
ghdbURL = &os.GHDB.Link
|
||||
}
|
||||
}
|
||||
exploit := models.Exploit{
|
||||
@@ -98,6 +132,8 @@ func ConvertToModelsExploit(es []exploitmodels.Exploit) (exploits []models.Explo
|
||||
Description: e.Description,
|
||||
DocumentURL: documentURL,
|
||||
ShellCodeURL: shellURL,
|
||||
PaperURL: paperURL,
|
||||
GHDBURL: ghdbURL,
|
||||
}
|
||||
exploits = append(exploits, exploit)
|
||||
}
|
||||
@@ -203,19 +239,20 @@ func httpGetExploit(url string, req exploitRequest, resChan chan<- exploitRespon
|
||||
}
|
||||
}
|
||||
|
||||
func newExploitDB(cnf config.VulnDictInterface) (driver exploitdb.DB, locked bool, err error) {
|
||||
func newExploitDB(cnf config.VulnDictInterface) (driver exploitdb.DB, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = exploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), exploitdb.Option{}); err != nil {
|
||||
driver, locked, err := exploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), exploitdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("exploitDB is locked. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to init exploit DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, false, err
|
||||
return nil, xerrors.Errorf("Failed to init exploit DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, false, nil
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
@@ -29,7 +29,7 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
// TODO Use `https://github.com/shurcooL/githubv4` if the tool supports vulnerabilityAlerts Endpoint
|
||||
// Memo : https://developer.github.com/v4/explorer/
|
||||
const jsonfmt = `{"query":
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url vulnerabilityAlerts(first: %d, %s) { pageInfo { endCursor hasNextPage startCursor } edges { node { id dismissReason dismissedAt securityVulnerability{ package { name ecosystem } severity vulnerableVersionRange firstPatchedVersion { identifier } } securityAdvisory { description ghsaId permalink publishedAt summary updatedAt withdrawnAt origin severity references { url } identifiers { type value } } } } } } } "}`
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url vulnerabilityAlerts(first: %d, states:[OPEN], %s) { pageInfo { endCursor hasNextPage startCursor } edges { node { id dismissReason dismissedAt securityVulnerability{ package { name ecosystem } severity vulnerableVersionRange firstPatchedVersion { identifier } } vulnerableManifestFilename vulnerableManifestPath vulnerableRequirements securityAdvisory { description ghsaId permalink publishedAt summary updatedAt withdrawnAt origin severity references { url } identifiers { type value } } } } } } } "}`
|
||||
after := ""
|
||||
|
||||
for {
|
||||
@@ -57,7 +57,7 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -79,11 +79,15 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
continue
|
||||
}
|
||||
|
||||
pkgName := fmt.Sprintf("%s %s",
|
||||
alerts.Data.Repository.URL, v.Node.SecurityVulnerability.Package.Name)
|
||||
|
||||
m := models.GitHubSecurityAlert{
|
||||
PackageName: pkgName,
|
||||
Repository: alerts.Data.Repository.URL,
|
||||
Package: models.GSAVulnerablePackage{
|
||||
Name: v.Node.SecurityVulnerability.Package.Name,
|
||||
Ecosystem: v.Node.SecurityVulnerability.Package.Ecosystem,
|
||||
ManifestFilename: v.Node.VulnerableManifestFilename,
|
||||
ManifestPath: v.Node.VulnerableManifestPath,
|
||||
Requirements: v.Node.VulnerableRequirements,
|
||||
},
|
||||
FixedIn: v.Node.SecurityVulnerability.FirstPatchedVersion.Identifier,
|
||||
AffectedRange: v.Node.SecurityVulnerability.VulnerableVersionRange,
|
||||
Dismissed: len(v.Node.DismissReason) != 0,
|
||||
@@ -148,7 +152,7 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
return nCVEs, err
|
||||
}
|
||||
|
||||
//SecurityAlerts has detected CVE-IDs, PackageNames, Refs
|
||||
// SecurityAlerts has detected CVE-IDs, PackageNames, Refs
|
||||
type SecurityAlerts struct {
|
||||
Data struct {
|
||||
Repository struct {
|
||||
@@ -175,7 +179,10 @@ type SecurityAlerts struct {
|
||||
Identifier string `json:"identifier"`
|
||||
} `json:"firstPatchedVersion"`
|
||||
} `json:"securityVulnerability"`
|
||||
SecurityAdvisory struct {
|
||||
VulnerableManifestFilename string `json:"vulnerableManifestFilename"`
|
||||
VulnerableManifestPath string `json:"vulnerableManifestPath"`
|
||||
VulnerableRequirements string `json:"vulnerableRequirements"`
|
||||
SecurityAdvisory struct {
|
||||
Description string `json:"description"`
|
||||
GhsaID string `json:"ghsaId"`
|
||||
Permalink string `json:"permalink"`
|
||||
@@ -199,3 +206,138 @@ type SecurityAlerts struct {
|
||||
} `json:"repository"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// DetectGitHubDependencyGraph access to owner/repo on GitHub and fetch dependency graph of the repository via GitHub API v4 GraphQL and then set to the given ScanResult.
|
||||
// https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-the-dependency-graph
|
||||
func DetectGitHubDependencyGraph(r *models.ScanResult, owner, repo, token string) (err error) {
|
||||
src := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
//TODO Proxy
|
||||
httpClient := oauth2.NewClient(context.Background(), src)
|
||||
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, "", "")
|
||||
}
|
||||
|
||||
// recursive function
|
||||
func fetchDependencyGraph(r *models.ScanResult, httpClient *http.Client, owner, repo, after, dependenciesAfter string) (err error) {
|
||||
const queryFmt = `{"query":
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url dependencyGraphManifests(first: %d, withDependencies: true%s) { pageInfo { endCursor hasNextPage } edges { node { blobPath filename repository { url } parseable exceedsMaxSize dependenciesCount dependencies%s { pageInfo { endCursor hasNextPage } edges { node { packageName packageManager repository { url } requirements hasDependencies } } } } } } } }"}`
|
||||
|
||||
queryStr := fmt.Sprintf(queryFmt, owner, repo, 100, after, dependenciesAfter)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
"https://api.github.com/graphql",
|
||||
bytes.NewBuffer([]byte(queryStr)),
|
||||
)
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// https://docs.github.com/en/graphql/overview/schema-previews#access-to-a-repository-s-dependency-graph-preview
|
||||
// TODO remove this header if it is no longer preview status in the future.
|
||||
req.Header.Set("Accept", "application/vnd.github.hawkgirl-preview+json")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
graph := DependencyGraph{}
|
||||
if err := json.Unmarshal(body, &graph); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if graph.Data.Repository.URL == "" {
|
||||
return errof.New(errof.ErrFailedToAccessGithubAPI,
|
||||
fmt.Sprintf("Failed to access to GitHub API. Response: %s", string(body)))
|
||||
}
|
||||
|
||||
dependenciesAfter = ""
|
||||
for _, m := range graph.Data.Repository.DependencyGraphManifests.Edges {
|
||||
manifest, ok := r.GitHubManifests[m.Node.BlobPath]
|
||||
if !ok {
|
||||
manifest = models.DependencyGraphManifest{
|
||||
BlobPath: m.Node.BlobPath,
|
||||
Filename: m.Node.Filename,
|
||||
Repository: m.Node.Repository.URL,
|
||||
Dependencies: []models.Dependency{},
|
||||
}
|
||||
}
|
||||
for _, d := range m.Node.Dependencies.Edges {
|
||||
manifest.Dependencies = append(manifest.Dependencies, models.Dependency{
|
||||
PackageName: d.Node.PackageName,
|
||||
PackageManager: d.Node.PackageManager,
|
||||
Repository: d.Node.Repository.URL,
|
||||
Requirements: d.Node.Requirements,
|
||||
})
|
||||
}
|
||||
r.GitHubManifests[m.Node.BlobPath] = manifest
|
||||
|
||||
if m.Node.Dependencies.PageInfo.HasNextPage {
|
||||
dependenciesAfter = fmt.Sprintf(`(after: \"%s\")`, m.Node.Dependencies.PageInfo.EndCursor)
|
||||
}
|
||||
}
|
||||
if dependenciesAfter != "" {
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, after, dependenciesAfter)
|
||||
}
|
||||
|
||||
if graph.Data.Repository.DependencyGraphManifests.PageInfo.HasNextPage {
|
||||
after = fmt.Sprintf(`, after: \"%s\"`, graph.Data.Repository.DependencyGraphManifests.PageInfo.EndCursor)
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, after, dependenciesAfter)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DependencyGraph is a GitHub API response
|
||||
type DependencyGraph struct {
|
||||
Data struct {
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
DependencyGraphManifests struct {
|
||||
PageInfo struct {
|
||||
EndCursor string `json:"endCursor"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
} `json:"pageInfo"`
|
||||
Edges []struct {
|
||||
Node struct {
|
||||
BlobPath string `json:"blobPath"`
|
||||
Filename string `json:"filename"`
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
Parseable bool `json:"parseable"`
|
||||
ExceedsMaxSize bool `json:"exceedsMaxSize"`
|
||||
DependenciesCount int `json:"dependenciesCount"`
|
||||
Dependencies struct {
|
||||
PageInfo struct {
|
||||
EndCursor string `json:"endCursor"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
} `json:"pageInfo"`
|
||||
Edges []struct {
|
||||
Node struct {
|
||||
PackageName string `json:"packageName"`
|
||||
PackageManager string `json:"packageManager"`
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
Requirements string `json:"requirements"`
|
||||
HasDependencies bool `json:"hasDependencies"`
|
||||
} `json:"node"`
|
||||
} `json:"edges"`
|
||||
} `json:"dependencies"`
|
||||
} `json:"node"`
|
||||
} `json:"edges"`
|
||||
} `json:"dependencyGraphManifests"`
|
||||
} `json:"repository"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
@@ -9,25 +9,63 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
kevulndb "github.com/vulsio/go-kev/db"
|
||||
kevulnmodels "github.com/vulsio/go-kev/models"
|
||||
kevulnlog "github.com/vulsio/go-kev/utils"
|
||||
)
|
||||
|
||||
// goKEVulnDBClient is a DB Driver
|
||||
type goKEVulnDBClient struct {
|
||||
driver kevulndb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// closeDB close a DB connection
|
||||
func (client goKEVulnDBClient) closeDB() error {
|
||||
if client.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return client.driver.CloseDB()
|
||||
}
|
||||
|
||||
func newGoKEVulnDBClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goKEVulnDBClient, error) {
|
||||
if err := kevulnlog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set go-kev logger. err: %w", err)
|
||||
}
|
||||
|
||||
db, err := newKEVulnDB(cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newKEVulnDB. err: %w", err)
|
||||
}
|
||||
return &goKEVulnDBClient{driver: db, baseURL: cnf.GetURL()}, nil
|
||||
}
|
||||
|
||||
// FillWithKEVuln :
|
||||
func FillWithKEVuln(r *models.ScanResult, cnf config.KEVulnConf) error {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
func FillWithKEVuln(r *models.ScanResult, cnf config.KEVulnConf, logOpts logging.LogOpts) error {
|
||||
client, err := newGoKEVulnDBClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
nKEV := 0
|
||||
if client.driver == nil {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, err := util.URLPathJoin(cnf.GetURL(), "cves")
|
||||
prefix, err := util.URLPathJoin(client.baseURL, "cves")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -53,27 +91,16 @@ func FillWithKEVuln(r *models.ScanResult, cnf config.KEVulnConf) error {
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
if ok {
|
||||
v.AlertDict.CISA = alerts
|
||||
nKEV++
|
||||
}
|
||||
r.ScannedCves[res.request.cveID] = v
|
||||
}
|
||||
} else {
|
||||
driver, locked, err := newKEVulnDB(&cnf)
|
||||
if locked {
|
||||
return xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
kevulns, err := driver.GetKEVulnByCveID(cveID)
|
||||
kevulns, err := client.driver.GetKEVulnByCveID(cveID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -91,9 +118,12 @@ func FillWithKEVuln(r *models.ScanResult, cnf config.KEVulnConf) error {
|
||||
}
|
||||
|
||||
vuln.AlertDict.CISA = alerts
|
||||
nKEV++
|
||||
r.ScannedCves[cveID] = vuln
|
||||
}
|
||||
}
|
||||
|
||||
logging.Log.Infof("%s: Known Exploited Vulnerabilities are detected for %d CVEs", r.FormatServerName(), nKEV)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -196,19 +226,20 @@ func httpGetKEVuln(url string, req kevulnRequest, resChan chan<- kevulnResponse,
|
||||
}
|
||||
}
|
||||
|
||||
func newKEVulnDB(cnf config.VulnDictInterface) (driver kevulndb.DB, locked bool, err error) {
|
||||
func newKEVulnDB(cnf config.VulnDictInterface) (kevulndb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = kevulndb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), kevulndb.Option{}); err != nil {
|
||||
driver, locked, err := kevulndb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), kevulndb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("kevulnDB is locked. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to init kevuln DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, false, err
|
||||
return nil, xerrors.Errorf("Failed to init kevuln DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, false, nil
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -6,14 +6,11 @@ package detector
|
||||
import (
|
||||
"context"
|
||||
|
||||
db2 "github.com/aquasecurity/trivy-db/pkg/db"
|
||||
trivydb "github.com/aquasecurity/trivy-db/pkg/db"
|
||||
"github.com/aquasecurity/trivy-db/pkg/metadata"
|
||||
"github.com/aquasecurity/trivy/pkg/db"
|
||||
"github.com/aquasecurity/trivy/pkg/github"
|
||||
"github.com/aquasecurity/trivy/pkg/indicator"
|
||||
"github.com/aquasecurity/trivy/pkg/log"
|
||||
"github.com/spf13/afero"
|
||||
"golang.org/x/xerrors"
|
||||
"k8s.io/utils/clock"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
@@ -33,14 +30,14 @@ func DetectLibsCves(r *models.ScanResult, cacheDir string, noProgress bool) (err
|
||||
}
|
||||
|
||||
logging.Log.Info("Updating library db...")
|
||||
if err := downloadDB("", cacheDir, noProgress, false, false); err != nil {
|
||||
if err := downloadDB("", cacheDir, noProgress, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := db2.Init(cacheDir); err != nil {
|
||||
if err := trivydb.Init(cacheDir); err != nil {
|
||||
return err
|
||||
}
|
||||
defer db2.Close()
|
||||
defer trivydb.Close()
|
||||
|
||||
for _, lib := range r.LibraryScanners {
|
||||
vinfos, err := lib.Scan()
|
||||
@@ -65,10 +62,10 @@ func DetectLibsCves(r *models.ScanResult, cacheDir string, noProgress bool) (err
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadDB(appVersion, cacheDir string, quiet, light, skipUpdate bool) error {
|
||||
client := initializeDBClient(cacheDir, quiet)
|
||||
func downloadDB(appVersion, cacheDir string, quiet, skipUpdate bool) error {
|
||||
client := db.NewClient(cacheDir, quiet, false)
|
||||
ctx := context.Background()
|
||||
needsUpdate, err := client.NeedsUpdate(appVersion, light, skipUpdate)
|
||||
needsUpdate, err := client.NeedsUpdate(appVersion, skipUpdate)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("database error: %w", err)
|
||||
}
|
||||
@@ -76,12 +73,9 @@ func downloadDB(appVersion, cacheDir string, quiet, light, skipUpdate bool) erro
|
||||
if needsUpdate {
|
||||
logging.Log.Info("Need to update DB")
|
||||
logging.Log.Info("Downloading DB...")
|
||||
if err := client.Download(ctx, cacheDir, light); err != nil {
|
||||
if err := client.Download(ctx, cacheDir); err != nil {
|
||||
return xerrors.Errorf("failed to download vulnerability DB: %w", err)
|
||||
}
|
||||
if err = client.UpdateMetadata(cacheDir); err != nil {
|
||||
return xerrors.Errorf("unable to update database metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// for debug
|
||||
@@ -91,24 +85,13 @@ func downloadDB(appVersion, cacheDir string, quiet, light, skipUpdate bool) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func initializeDBClient(cacheDir string, quiet bool) db.Client {
|
||||
config := db2.Config{}
|
||||
client := github.NewClient()
|
||||
progressBar := indicator.NewProgressBar(quiet)
|
||||
realClock := clock.RealClock{}
|
||||
fs := afero.NewOsFs()
|
||||
metadata := db.NewMetadata(fs, cacheDir)
|
||||
dbClient := db.NewClient(config, client, progressBar, realClock, metadata)
|
||||
return dbClient
|
||||
}
|
||||
|
||||
func showDBInfo(cacheDir string) error {
|
||||
m := db.NewMetadata(afero.NewOsFs(), cacheDir)
|
||||
metadata, err := m.Get()
|
||||
m := metadata.NewClient(cacheDir)
|
||||
meta, err := m.Get()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("something wrong with DB: %w", err)
|
||||
}
|
||||
logging.Log.Debugf("DB Schema: %d, Type: %d, UpdatedAt: %s, NextUpdate: %s",
|
||||
metadata.Version, metadata.Type, metadata.UpdatedAt, metadata.NextUpdate)
|
||||
log.Logger.Debugf("DB Schema: %d, UpdatedAt: %s, NextUpdate: %s, DownloadedAt: %s",
|
||||
meta.Version, meta.UpdatedAt, meta.NextUpdate, meta.DownloadedAt)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,35 +9,73 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
metasploitdb "github.com/vulsio/go-msfdb/db"
|
||||
metasploitmodels "github.com/vulsio/go-msfdb/models"
|
||||
"golang.org/x/xerrors"
|
||||
metasploitlog "github.com/vulsio/go-msfdb/utils"
|
||||
)
|
||||
|
||||
// goMetasploitDBClient is a DB Driver
|
||||
type goMetasploitDBClient struct {
|
||||
driver metasploitdb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// closeDB close a DB connection
|
||||
func (client goMetasploitDBClient) closeDB() error {
|
||||
if client.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return client.driver.CloseDB()
|
||||
}
|
||||
|
||||
func newGoMetasploitDBClient(cnf config.VulnDictInterface, o logging.LogOpts) (*goMetasploitDBClient, error) {
|
||||
if err := metasploitlog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set go-msfdb logger. err: %w", err)
|
||||
}
|
||||
|
||||
db, err := newMetasploitDB(cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newMetasploitDB. err: %w", err)
|
||||
}
|
||||
return &goMetasploitDBClient{driver: db, baseURL: cnf.GetURL()}, nil
|
||||
}
|
||||
|
||||
// FillWithMetasploit fills metasploit module information that has in module
|
||||
func FillWithMetasploit(r *models.ScanResult, cnf config.MetasploitConf) (nMetasploitCve int, err error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
func FillWithMetasploit(r *models.ScanResult, cnf config.MetasploitConf, logOpts logging.LogOpts) (nMetasploitCve int, err error) {
|
||||
client, err := newGoMetasploitDBClient(&cnf, logOpts)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to newGoMetasploitDBClient. err: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := client.closeDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if client.driver == nil {
|
||||
var cveIDs []string
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
prefix, err := util.URLPathJoin(cnf.GetURL(), "cves")
|
||||
prefix, err := util.URLPathJoin(client.baseURL, "cves")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getMetasploitsViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get Metasploits via HTTP. err: %w", err)
|
||||
}
|
||||
for _, res := range responses {
|
||||
msfs := []metasploitmodels.Metasploit{}
|
||||
if err := json.Unmarshal([]byte(res.json), &msfs); err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
metasploits := ConvertToModelsMsf(msfs)
|
||||
v, ok := r.ScannedCves[res.request.cveID]
|
||||
@@ -48,25 +86,13 @@ func FillWithMetasploit(r *models.ScanResult, cnf config.MetasploitConf) (nMetas
|
||||
nMetasploitCve++
|
||||
}
|
||||
} else {
|
||||
driver, locked, err := newMetasploitDB(&cnf)
|
||||
if locked {
|
||||
return 0, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for cveID, vuln := range r.ScannedCves {
|
||||
if cveID == "" {
|
||||
continue
|
||||
}
|
||||
ms, err := driver.GetModuleByCveID(cveID)
|
||||
ms, err := client.driver.GetModuleByCveID(cveID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get Metasploits by CVE-ID. err: %w", err)
|
||||
}
|
||||
if len(ms) == 0 {
|
||||
continue
|
||||
@@ -199,19 +225,20 @@ func ConvertToModelsMsf(ms []metasploitmodels.Metasploit) (modules []models.Meta
|
||||
return modules
|
||||
}
|
||||
|
||||
func newMetasploitDB(cnf config.VulnDictInterface) (driver metasploitdb.DB, locked bool, err error) {
|
||||
func newMetasploitDB(cnf config.VulnDictInterface) (metasploitdb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = metasploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), metasploitdb.Option{}); err != nil {
|
||||
driver, locked, err := metasploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), metasploitdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("metasploitDB is locked. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to init metasploit DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, false, err
|
||||
return nil, xerrors.Errorf("Failed to init metasploit DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, false, nil
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -6,7 +6,7 @@ package detector
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -26,12 +26,7 @@ func reuseScannedCves(r *models.ScanResult) bool {
|
||||
case constant.FreeBSD, constant.Raspbian:
|
||||
return true
|
||||
}
|
||||
return isTrivyResult(r)
|
||||
}
|
||||
|
||||
func isTrivyResult(r *models.ScanResult) bool {
|
||||
_, ok := r.Optional["trivy-target"]
|
||||
return ok
|
||||
return r.ScannedBy == "trivy"
|
||||
}
|
||||
|
||||
func needToRefreshCve(r models.ScanResult) bool {
|
||||
@@ -130,7 +125,7 @@ func getPlusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
previousCveIDsSet[previousVulnInfo.CveID] = true
|
||||
}
|
||||
|
||||
new := models.VulnInfos{}
|
||||
newer := models.VulnInfos{}
|
||||
updated := models.VulnInfos{}
|
||||
for _, v := range current.ScannedCves {
|
||||
if previousCveIDsSet[v.CveID] {
|
||||
@@ -150,17 +145,17 @@ func getPlusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
logging.Log.Debugf("same: %s", v.CveID)
|
||||
}
|
||||
} else {
|
||||
logging.Log.Debugf("new: %s", v.CveID)
|
||||
logging.Log.Debugf("newer: %s", v.CveID)
|
||||
v.DiffStatus = models.DiffPlus
|
||||
new[v.CveID] = v
|
||||
newer[v.CveID] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 && len(new) == 0 {
|
||||
if len(updated) == 0 && len(newer) == 0 {
|
||||
logging.Log.Infof("%s: There are %d vulnerabilities, but no difference between current result and previous one.", current.FormatServerName(), len(current.ScannedCves))
|
||||
}
|
||||
|
||||
for cveID, vuln := range new {
|
||||
for cveID, vuln := range newer {
|
||||
updated[cveID] = vuln
|
||||
}
|
||||
return updated
|
||||
@@ -188,11 +183,7 @@ func getMinusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
}
|
||||
|
||||
func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
cTypes := []models.CveContentType{
|
||||
models.Nvd,
|
||||
models.Jvn,
|
||||
models.NewCveContentType(current.Family),
|
||||
}
|
||||
cTypes := append([]models.CveContentType{models.Nvd, models.Jvn}, models.GetCveContentTypes(current.Family)...)
|
||||
|
||||
prevLastModified := map[models.CveContentType][]time.Time{}
|
||||
preVinfo, ok := previous.ScannedCves[cveID]
|
||||
@@ -239,8 +230,8 @@ var jsonDirPattern = regexp.MustCompile(
|
||||
// ListValidJSONDirs returns valid json directory as array
|
||||
// Returned array is sorted so that recent directories are at the head
|
||||
func ListValidJSONDirs(resultsDir string) (dirs []string, err error) {
|
||||
var dirInfo []os.FileInfo
|
||||
if dirInfo, err = ioutil.ReadDir(resultsDir); err != nil {
|
||||
var dirInfo []fs.DirEntry
|
||||
if dirInfo, err = os.ReadDir(resultsDir); err != nil {
|
||||
err = xerrors.Errorf("Failed to read %s: %w",
|
||||
config.Conf.ResultsDir, err)
|
||||
return
|
||||
@@ -263,7 +254,7 @@ func loadOneServerScanResult(jsonFile string) (*models.ScanResult, error) {
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if data, err = ioutil.ReadFile(jsonFile); err != nil {
|
||||
if data, err = os.ReadFile(jsonFile); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", jsonFile, err)
|
||||
}
|
||||
result := &models.ScanResult{}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
//WpCveInfos is for wpscan json
|
||||
// WpCveInfos is for wpscan json
|
||||
type WpCveInfos struct {
|
||||
ReleaseDate string `json:"release_date"`
|
||||
ChangelogURL string `json:"changelog_url"`
|
||||
@@ -33,7 +33,7 @@ type WpCveInfos struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
//WpCveInfo is for wpscan json
|
||||
// WpCveInfo is for wpscan json
|
||||
type WpCveInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
@@ -44,7 +44,7 @@ type WpCveInfo struct {
|
||||
FixedIn string `json:"fixed_in"`
|
||||
}
|
||||
|
||||
//References is for wpscan json
|
||||
// References is for wpscan json
|
||||
type References struct {
|
||||
URL []string `json:"url"`
|
||||
Cve []string `json:"cve"`
|
||||
@@ -242,7 +242,7 @@ func httpRequest(url, token string) (string, error) {
|
||||
return "", errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to access to wpscan.com. err: %s", err))
|
||||
}
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", errof.New(errof.ErrFailedToAccessWpScan,
|
||||
fmt.Sprintf("Failed to access to wpscan.com. err: %s", err))
|
||||
|
||||
243
go.mod
243
go.mod
@@ -1,170 +1,183 @@
|
||||
module github.com/future-architect/vuls
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v50.2.0+incompatible
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible
|
||||
github.com/BurntSushi/toml v1.2.1
|
||||
github.com/CycloneDX/cyclonedx-go v0.7.0
|
||||
github.com/Ullaakut/nmap/v2 v2.1.2-0.20210406060955-59a52fe80a4f
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/aquasecurity/fanal v0.0.0-20211224205755-c94f68b6d71a
|
||||
github.com/aquasecurity/go-dep-parser v0.0.0-20220110153540-4a30ebc4b509
|
||||
github.com/aquasecurity/trivy v0.22.0
|
||||
github.com/aquasecurity/trivy-db v0.0.0-20210916043317-726b7b72a47b
|
||||
github.com/aquasecurity/go-dep-parser v0.0.0-20221114145626-35ef808901e8
|
||||
github.com/aquasecurity/trivy v0.35.0
|
||||
github.com/aquasecurity/trivy-db v0.0.0-20220627104749-930461748b63
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||
github.com/aws/aws-sdk-go v1.42.0
|
||||
github.com/boltdb/bolt v1.3.1
|
||||
github.com/briandowns/spinner v1.16.0 // indirect
|
||||
github.com/aws/aws-sdk-go v1.44.136
|
||||
github.com/c-robinson/iplib v1.0.3
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheggaaa/pb/v3 v3.0.8 // indirect
|
||||
github.com/d4l3k/messagediff v1.2.2-0.20190829033028-7e0a312ae40b
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21
|
||||
github.com/emersion/go-smtp v0.14.0
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.4 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/google/subcommands v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gosuri/uitable v0.0.4
|
||||
github.com/hashicorp/go-uuid v1.0.2
|
||||
github.com/hashicorp/go-version v1.3.0
|
||||
github.com/howeyc/gopass v0.0.0-20190910152052-7cb4b85ec19c
|
||||
github.com/hashicorp/go-uuid v1.0.3
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
github.com/jesseduffield/gocui v0.3.0
|
||||
github.com/k0kubun/pp v3.0.1+incompatible
|
||||
github.com/knqyf263/go-apk-version v0.0.0-20200609155635-041fdbb8563f
|
||||
github.com/knqyf263/go-cpe v0.0.0-20201213041631-54f6ab28673f
|
||||
github.com/knqyf263/go-deb-version v0.0.0-20190517075300-09fca494f03d
|
||||
github.com/knqyf263/go-rpm-version v0.0.0-20170716094938-74609b86c936
|
||||
github.com/knqyf263/go-rpm-version v0.0.0-20220614171824-631e686d1075
|
||||
github.com/kotakanbe/go-pingscanner v0.1.0
|
||||
github.com/kotakanbe/logrus-prefixed-formatter v0.0.0-20180123152602-928f7356cb96
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/nlopes/slack v0.6.0
|
||||
github.com/nsf/termbox-go v0.0.0-20200418040025-38ba6e5628f1 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220203205134-d70459300c8a
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/spf13/afero v1.7.0
|
||||
github.com/spf13/cast v1.4.1 // indirect
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/vulsio/go-cve-dictionary v0.8.2-0.20211028094424-0a854f8e8f85
|
||||
github.com/vulsio/go-exploitdb v0.4.2-0.20211028071949-1ebf9c4f6c4d
|
||||
github.com/vulsio/go-kev v0.1.0
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.6.1
|
||||
github.com/vulsio/go-cti v0.0.2-0.20220613013115-8c7e57a6aa86
|
||||
github.com/vulsio/go-cve-dictionary v0.8.2
|
||||
github.com/vulsio/go-exploitdb v0.4.4
|
||||
github.com/vulsio/go-kev v0.1.1-0.20220118062020-5f69b364106f
|
||||
github.com/vulsio/go-msfdb v0.2.1-0.20211028071756-4a9759bd9f14
|
||||
github.com/vulsio/gost v0.4.1-0.20211028071837-7ad032a6ffa8
|
||||
github.com/vulsio/goval-dictionary v0.6.1-0.20211224012144-554a54938173
|
||||
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa // indirect
|
||||
golang.org/x/net v0.0.0-20211206223403-eba003a116a9 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gorm.io/driver/mysql v1.2.1 // indirect
|
||||
gorm.io/driver/postgres v1.2.3 // indirect
|
||||
gorm.io/driver/sqlite v1.2.6 // indirect
|
||||
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009
|
||||
github.com/vulsio/gost v0.4.2-0.20230203045609-dcfab39a9ff4
|
||||
github.com/vulsio/goval-dictionary v0.8.0
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb
|
||||
golang.org/x/oauth2 v0.1.0
|
||||
golang.org/x/sync v0.1.0
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.99.0 // indirect
|
||||
cloud.google.com/go/storage v1.18.2 // indirect
|
||||
cloud.google.com/go v0.105.0 // indirect
|
||||
cloud.google.com/go/compute v1.14.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.8.0 // indirect
|
||||
cloud.google.com/go/storage v1.27.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.1 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/Masterminds/goutils v1.1.1 // indirect
|
||||
github.com/Masterminds/semver v1.5.0 // indirect
|
||||
github.com/Masterminds/sprig v2.22.0+incompatible // indirect
|
||||
github.com/PuerkitoBio/goquery v1.7.1 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.1 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.6.1 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.2.0 // indirect
|
||||
github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce // indirect
|
||||
github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798 // indirect
|
||||
github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect
|
||||
github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect
|
||||
github.com/caarlos0/env/v6 v6.0.0 // indirect
|
||||
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
|
||||
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
|
||||
github.com/cncf/xds/go v0.0.0-20211216145620-d92e9ce0af51 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/briandowns/spinner v1.21.0 // indirect
|
||||
github.com/caarlos0/env/v6 v6.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cheggaaa/pb/v3 v3.1.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/envoyproxy/go-control-plane v0.10.1 // indirect
|
||||
github.com/envoyproxy/protoc-gen-validate v0.6.2 // indirect
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/docker/cli v20.10.20+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.20+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/fatih/color v1.14.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.5 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.0 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gofrs/uuid v4.0.0+incompatible // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/go-containerregistry v0.6.0 // indirect
|
||||
github.com/google/go-github/v33 v33.0.0 // indirect
|
||||
github.com/google/go-querystring v1.0.0 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/google/wire v0.4.0 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.1.1 // indirect
|
||||
github.com/googleapis/google-cloud-go-testing v0.0.0-20210719221736-1c9a4c676720 // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/go-containerregistry v0.12.0 // indirect
|
||||
github.com/google/licenseclassifier/v2 v2.0.0-pre6 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/grokify/html-strip-tags-go v0.0.1 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.6.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||
github.com/hashicorp/go-safetemp v1.0.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/htcat/htcat v1.0.2 // indirect
|
||||
github.com/huandu/xstrings v1.3.2 // indirect
|
||||
github.com/imdario/mergo v0.3.12 // indirect
|
||||
github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.10.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/inconshreveable/log15 v2.16.0+incompatible // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.2.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.9.1 // indirect
|
||||
github.com/jackc/pgx/v4 v4.14.1 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.0 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.4 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/liamg/jfather v0.0.7 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.9 // indirect
|
||||
github.com/mitchellh/copystructure v1.1.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.3 // indirect
|
||||
github.com/mitchellh/reflectwalk v1.0.1 // indirect
|
||||
github.com/masahiro331/go-xfs-filesystem v0.0.0-20221127135739-051c25f1becd // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.17 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/nsf/termbox-go v1.1.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.6 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.2.0 // indirect
|
||||
github.com/satori/go.uuid v1.2.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/samber/lo v1.33.0 // indirect
|
||||
github.com/sergi/go-diff v1.3.1 // indirect
|
||||
github.com/smartystreets/assertions v1.13.0 // indirect
|
||||
github.com/spdx/tools-golang v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.9.3 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.10.0 // indirect
|
||||
github.com/stretchr/objx v0.3.0 // indirect
|
||||
github.com/stretchr/testify v1.7.0 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
github.com/ymomoi/goval-parser v0.0.0-20170813122243-0a0be1dd9d08 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
go.uber.org/multierr v1.6.0 // indirect
|
||||
go.uber.org/zap v1.20.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486 // indirect
|
||||
golang.org/x/term v0.0.0-20201210144234-2321bbc49cbf // indirect
|
||||
google.golang.org/api v0.63.0 // indirect
|
||||
github.com/spf13/viper v1.15.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/testify v1.8.1 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.1.12 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.6.0 // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/net v0.7.0 // indirect
|
||||
golang.org/x/sys v0.5.0 // indirect
|
||||
golang.org/x/term v0.5.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/tools v0.6.0 // indirect
|
||||
google.golang.org/api v0.107.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
|
||||
google.golang.org/grpc v1.43.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
gorm.io/gorm v1.22.4 // indirect
|
||||
google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect
|
||||
google.golang.org/grpc v1.52.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gorm.io/driver/mysql v1.4.7 // indirect
|
||||
gorm.io/driver/postgres v1.4.8 // indirect
|
||||
gorm.io/driver/sqlite v1.4.4 // indirect
|
||||
gorm.io/gorm v1.24.5 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
)
|
||||
|
||||
// See https://github.com/moby/moby/issues/42939#issuecomment-1114255529
|
||||
replace github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
|
||||
|
||||
@@ -6,12 +6,13 @@ package gost
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
gostmodels "github.com/vulsio/gost/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Debian is Gost client for Debian GNU/Linux
|
||||
@@ -46,27 +47,36 @@ func (deb Debian) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error
|
||||
|
||||
// Add linux and set the version of running kernel to search Gost.
|
||||
if r.Container.ContainerID == "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages["linux-image-"+r.RunningKernel.Release]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
if r.RunningKernel.Version != "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages["linux-image-"+r.RunningKernel.Release]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
}
|
||||
} else {
|
||||
logging.Log.Warnf("Since the exact kernel version is not available, the vulnerability in the linux package is not detected.")
|
||||
}
|
||||
}
|
||||
|
||||
stashLinuxPackage := r.Packages["linux"]
|
||||
var stashLinuxPackage models.Package
|
||||
if linux, ok := r.Packages["linux"]; ok {
|
||||
stashLinuxPackage = linux
|
||||
}
|
||||
nFixedCVEs, err := deb.detectCVEsWithFixState(r, "resolved")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to detect fixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
r.Packages["linux"] = stashLinuxPackage
|
||||
if stashLinuxPackage.Name != "" {
|
||||
r.Packages["linux"] = stashLinuxPackage
|
||||
}
|
||||
nUnfixedCVEs, err := deb.detectCVEsWithFixState(r, "open")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to detect unfixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
return (nFixedCVEs + nUnfixedCVEs), nil
|
||||
@@ -78,22 +88,25 @@ func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixStatus string)
|
||||
}
|
||||
|
||||
packCvesList := []packCves{}
|
||||
if deb.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
url, _ := util.URLPathJoin(deb.DBDriver.Cnf.GetURL(), "debian", major(r.Release), "pkgs")
|
||||
if deb.driver == nil {
|
||||
url, err := util.URLPathJoin(deb.baseURL, "debian", major(r.Release), "pkgs")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
s := "unfixed-cves"
|
||||
if s == "resolved" {
|
||||
s = "fixed-cves"
|
||||
}
|
||||
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, url, s)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range responses {
|
||||
debCves := map[string]gostmodels.DebianCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &debCves); err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
fixes := []models.PackageFixStatus{}
|
||||
@@ -109,13 +122,10 @@ func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixStatus string)
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if deb.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range r.Packages {
|
||||
cves, fixes, err := deb.getCvesDebianWithfixStatus(fixStatus, major(r.Release), pack.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get CVEs for Package. err: %w", err)
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
@@ -129,7 +139,7 @@ func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixStatus string)
|
||||
for _, pack := range r.SrcPackages {
|
||||
cves, fixes, err := deb.getCvesDebianWithfixStatus(fixStatus, major(r.Release), pack.Name)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get CVEs for SrcPackage. err: %w", err)
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
@@ -230,11 +240,11 @@ func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixStatus string)
|
||||
func isGostDefAffected(versionRelease, gostVersion string) (affected bool, err error) {
|
||||
vera, err := debver.NewVersion(versionRelease)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", versionRelease, err)
|
||||
}
|
||||
verb, err := debver.NewVersion(gostVersion)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", gostVersion, err)
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
}
|
||||
@@ -242,13 +252,13 @@ func isGostDefAffected(versionRelease, gostVersion string) (affected bool, err e
|
||||
func (deb Debian) getCvesDebianWithfixStatus(fixStatus, release, pkgName string) ([]models.CveContent, []models.PackageFixStatus, error) {
|
||||
var f func(string, string) (map[string]gostmodels.DebianCVE, error)
|
||||
if fixStatus == "resolved" {
|
||||
f = deb.DBDriver.DB.GetFixedCvesDebian
|
||||
f = deb.driver.GetFixedCvesDebian
|
||||
} else {
|
||||
f = deb.DBDriver.DB.GetUnfixedCvesDebian
|
||||
f = deb.driver.GetUnfixedCvesDebian
|
||||
}
|
||||
debCves, err := f(release, pkgName)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, nil, xerrors.Errorf("Failed to get CVEs. fixStatus: %s, release: %s, src package: %s, err: %w", fixStatus, release, pkgName, err)
|
||||
}
|
||||
|
||||
cves := []models.CveContent{}
|
||||
|
||||
82
gost/gost.go
82
gost/gost.go
@@ -4,22 +4,17 @@
|
||||
package gost
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/vulsio/gost/db"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
gostdb "github.com/vulsio/gost/db"
|
||||
gostlog "github.com/vulsio/gost/util"
|
||||
)
|
||||
|
||||
// DBDriver is a DB Driver
|
||||
type DBDriver struct {
|
||||
DB db.DB
|
||||
Cnf config.VulnDictInterface
|
||||
}
|
||||
|
||||
// Client is the interface of OVAL client.
|
||||
// Client is the interface of Gost client.
|
||||
type Client interface {
|
||||
DetectCVEs(*models.ScanResult, bool) (int, error)
|
||||
CloseDB() error
|
||||
@@ -27,72 +22,79 @@ type Client interface {
|
||||
|
||||
// Base is a base struct
|
||||
type Base struct {
|
||||
DBDriver DBDriver
|
||||
driver gostdb.DB
|
||||
baseURL string
|
||||
}
|
||||
|
||||
// CloseDB close a DB connection
|
||||
func (b Base) CloseDB() error {
|
||||
if b.DBDriver.DB == nil {
|
||||
if b.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return b.DBDriver.DB.CloseDB()
|
||||
return b.driver.CloseDB()
|
||||
}
|
||||
|
||||
// FillCVEsWithRedHat fills CVE detailed with Red Hat Security
|
||||
func FillCVEsWithRedHat(r *models.ScanResult, cnf config.GostConf) error {
|
||||
db, locked, err := newGostDB(cnf)
|
||||
if locked {
|
||||
return xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
func FillCVEsWithRedHat(r *models.ScanResult, cnf config.GostConf, o logging.LogOpts) error {
|
||||
if err := gostlog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
db, err := newGostDB(&cnf)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to newGostDB. err: %w", err)
|
||||
}
|
||||
|
||||
client := RedHat{Base{driver: db, baseURL: cnf.GetURL()}}
|
||||
defer func() {
|
||||
if err := db.CloseDB(); err != nil {
|
||||
if err := client.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
return RedHat{Base{DBDriver{DB: db, Cnf: &cnf}}}.fillCvesWithRedHatAPI(r)
|
||||
return client.fillCvesWithRedHatAPI(r)
|
||||
}
|
||||
|
||||
// NewClient make Client by family
|
||||
func NewClient(cnf config.GostConf, family string) (Client, error) {
|
||||
db, locked, err := newGostDB(cnf)
|
||||
if locked {
|
||||
return nil, xerrors.Errorf("SQLite3 is locked: %s", cnf.GetSQLite3Path())
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
// NewGostClient make Client by family
|
||||
func NewGostClient(cnf config.GostConf, family string, o logging.LogOpts) (Client, error) {
|
||||
if err := gostlog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set gost logger. err: %w", err)
|
||||
}
|
||||
|
||||
driver := DBDriver{DB: db, Cnf: &cnf}
|
||||
db, err := newGostDB(&cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newGostDB. err: %w", err)
|
||||
}
|
||||
|
||||
base := Base{driver: db, baseURL: cnf.GetURL()}
|
||||
switch family {
|
||||
case constant.RedHat, constant.CentOS, constant.Rocky, constant.Alma:
|
||||
return RedHat{Base{DBDriver: driver}}, nil
|
||||
return RedHat{base}, nil
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return Debian{Base{DBDriver: driver}}, nil
|
||||
return Debian{base}, nil
|
||||
case constant.Ubuntu:
|
||||
return Ubuntu{Base{DBDriver: driver}}, nil
|
||||
return Ubuntu{base}, nil
|
||||
case constant.Windows:
|
||||
return Microsoft{Base{DBDriver: driver}}, nil
|
||||
return Microsoft{base}, nil
|
||||
default:
|
||||
return Pseudo{Base{DBDriver: driver}}, nil
|
||||
return Pseudo{base}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// NewGostDB returns db client for Gost
|
||||
func newGostDB(cnf config.GostConf) (driver db.DB, locked bool, err error) {
|
||||
func newGostDB(cnf config.VulnDictInterface) (gostdb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, false, nil
|
||||
return nil, nil
|
||||
}
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
if driver, locked, err = db.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), db.Option{}); err != nil {
|
||||
driver, locked, err := gostdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), gostdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
return nil, true, xerrors.Errorf("gostDB is locked. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to init gost DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, false, err
|
||||
return nil, xerrors.Errorf("Failed to init gost DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, false, nil
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
@@ -4,9 +4,16 @@
|
||||
package gost
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
gostmodels "github.com/vulsio/gost/models"
|
||||
)
|
||||
@@ -16,64 +23,164 @@ type Microsoft struct {
|
||||
Base
|
||||
}
|
||||
|
||||
var kbIDPattern = regexp.MustCompile(`KB(\d{6,7})`)
|
||||
|
||||
// DetectCVEs fills cve information that has in Gost
|
||||
func (ms Microsoft) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
if ms.DBDriver.DB == nil {
|
||||
if ms.driver == nil {
|
||||
return 0, nil
|
||||
}
|
||||
cveIDs := []string{}
|
||||
for cveID := range r.ScannedCves {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
|
||||
var osName string
|
||||
osName, ok := r.Optional["OSName"].(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("This Windows has wrong type option(OSName). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
msCves, err := ms.DBDriver.DB.GetMicrosoftMulti(cveIDs)
|
||||
|
||||
var products []string
|
||||
if _, ok := r.Optional["InstalledProducts"]; ok {
|
||||
switch ps := r.Optional["InstalledProducts"].(type) {
|
||||
case []interface{}:
|
||||
for _, p := range ps {
|
||||
pname, ok := p.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip products: %v", p)
|
||||
continue
|
||||
}
|
||||
products = append(products, pname)
|
||||
}
|
||||
case []string:
|
||||
for _, p := range ps {
|
||||
products = append(products, p)
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(InstalledProducts). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
}
|
||||
|
||||
applied, unapplied := map[string]struct{}{}, map[string]struct{}{}
|
||||
if _, ok := r.Optional["KBID"]; ok {
|
||||
switch kbIDs := r.Optional["KBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
unapplied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
unapplied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(KBID). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
|
||||
for _, pkg := range r.Packages {
|
||||
matches := kbIDPattern.FindAllStringSubmatch(pkg.Name, -1)
|
||||
for _, match := range matches {
|
||||
applied[match[1]] = struct{}{}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
switch kbIDs := r.Optional["AppliedKBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
applied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
applied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(AppliedKBID). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
|
||||
switch kbIDs := r.Optional["UnappliedKBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
unapplied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
unapplied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(UnappliedKBID). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
}
|
||||
|
||||
logging.Log.Debugf(`GetCvesByMicrosoftKBID query body {"osName": %s, "installedProducts": %q, "applied": %q, "unapplied: %q"}`, osName, products, maps.Keys(applied), maps.Keys(unapplied))
|
||||
cves, err := ms.driver.GetCvesByMicrosoftKBID(osName, products, maps.Keys(applied), maps.Keys(unapplied))
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
return 0, xerrors.Errorf("Failed to detect CVEs. err: %w", err)
|
||||
}
|
||||
for cveID, msCve := range msCves {
|
||||
if _, ok := r.ScannedCves[cveID]; !ok {
|
||||
continue
|
||||
|
||||
for cveID, cve := range cves {
|
||||
cveCont, mitigations := ms.ConvertToModel(&cve)
|
||||
uniqKB := map[string]struct{}{}
|
||||
for _, p := range cve.Products {
|
||||
for _, kb := range p.KBs {
|
||||
if _, err := strconv.Atoi(kb.Article); err == nil {
|
||||
uniqKB[fmt.Sprintf("KB%s", kb.Article)] = struct{}{}
|
||||
} else {
|
||||
uniqKB[kb.Article] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
cveCont, mitigations := ms.ConvertToModel(&msCve)
|
||||
v, _ := r.ScannedCves[cveID]
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.CveContents{}
|
||||
advisories := []models.DistroAdvisory{}
|
||||
for kb := range uniqKB {
|
||||
advisories = append(advisories, models.DistroAdvisory{
|
||||
AdvisoryID: kb,
|
||||
Description: "Microsoft Knowledge Base",
|
||||
})
|
||||
}
|
||||
|
||||
r.ScannedCves[cveID] = models.VulnInfo{
|
||||
CveID: cveID,
|
||||
Confidences: models.Confidences{models.WindowsUpdateSearch},
|
||||
DistroAdvisories: advisories,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Mitigations: mitigations,
|
||||
}
|
||||
v.CveContents[models.Microsoft] = []models.CveContent{*cveCont}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
r.ScannedCves[cveID] = v
|
||||
}
|
||||
return len(cveIDs), nil
|
||||
return len(cves), nil
|
||||
}
|
||||
|
||||
// ConvertToModel converts gost model to vuls model
|
||||
func (ms Microsoft) ConvertToModel(cve *gostmodels.MicrosoftCVE) (*models.CveContent, []models.Mitigation) {
|
||||
sort.Slice(cve.ScoreSets, func(i, j int) bool {
|
||||
return cve.ScoreSets[i].Vector < cve.ScoreSets[j].Vector
|
||||
slices.SortFunc(cve.Products, func(i, j gostmodels.MicrosoftProduct) bool {
|
||||
return i.ScoreSet.Vector < j.ScoreSet.Vector
|
||||
})
|
||||
|
||||
v3score := 0.0
|
||||
var v3Vector string
|
||||
for _, scoreSet := range cve.ScoreSets {
|
||||
if v3score < scoreSet.BaseScore {
|
||||
v3score = scoreSet.BaseScore
|
||||
v3Vector = scoreSet.Vector
|
||||
for _, p := range cve.Products {
|
||||
v, err := strconv.ParseFloat(p.ScoreSet.BaseScore, 64)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if v3score < v {
|
||||
v3score = v
|
||||
v3Vector = p.ScoreSet.Vector
|
||||
}
|
||||
}
|
||||
|
||||
var v3Severity string
|
||||
for _, s := range cve.Severity {
|
||||
v3Severity = s.Description
|
||||
}
|
||||
|
||||
var refs []models.Reference
|
||||
for _, r := range cve.References {
|
||||
if r.AttrType == "External" {
|
||||
refs = append(refs, models.Reference{Link: r.URL})
|
||||
}
|
||||
}
|
||||
|
||||
var cwe []string
|
||||
if 0 < len(cve.CWE) {
|
||||
cwe = []string{cve.CWE}
|
||||
for _, p := range cve.Products {
|
||||
v3Severity = p.Severity
|
||||
}
|
||||
|
||||
option := map[string]string{}
|
||||
@@ -82,28 +189,20 @@ func (ms Microsoft) ConvertToModel(cve *gostmodels.MicrosoftCVE) (*models.CveCon
|
||||
// "exploit_status": "Publicly Disclosed:No;Exploited:No;Latest Software Release:Exploitation Less Likely;Older Software Release:Exploitation Less Likely;DOS:N/A",
|
||||
option["exploit"] = cve.ExploitStatus
|
||||
}
|
||||
kbids := []string{}
|
||||
for _, kbid := range cve.KBIDs {
|
||||
kbids = append(kbids, kbid.KBID)
|
||||
}
|
||||
if 0 < len(kbids) {
|
||||
option["kbids"] = strings.Join(kbids, ",")
|
||||
}
|
||||
|
||||
vendorURL := "https://msrc.microsoft.com/update-guide/vulnerability/" + cve.CveID
|
||||
mitigations := []models.Mitigation{}
|
||||
if cve.Mitigation != "" {
|
||||
mitigations = append(mitigations, models.Mitigation{
|
||||
CveContentType: models.Microsoft,
|
||||
Mitigation: cve.Mitigation,
|
||||
URL: vendorURL,
|
||||
URL: cve.URL,
|
||||
})
|
||||
}
|
||||
if cve.Workaround != "" {
|
||||
mitigations = append(mitigations, models.Mitigation{
|
||||
CveContentType: models.Microsoft,
|
||||
Mitigation: cve.Workaround,
|
||||
URL: vendorURL,
|
||||
URL: cve.URL,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -115,11 +214,9 @@ func (ms Microsoft) ConvertToModel(cve *gostmodels.MicrosoftCVE) (*models.CveCon
|
||||
Cvss3Score: v3score,
|
||||
Cvss3Vector: v3Vector,
|
||||
Cvss3Severity: v3Severity,
|
||||
References: refs,
|
||||
CweIDs: cwe,
|
||||
Published: cve.PublishDate,
|
||||
LastModified: cve.LastUpdateDate,
|
||||
SourceLink: vendorURL,
|
||||
SourceLink: cve.URL,
|
||||
Optional: option,
|
||||
}, mitigations
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// Pseudo is Gost client except for RedHat family and Debian
|
||||
// Pseudo is Gost client except for RedHat family, Debian, Ubuntu and Windows
|
||||
type Pseudo struct {
|
||||
Base
|
||||
}
|
||||
|
||||
@@ -8,7 +8,9 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
gostmodels "github.com/vulsio/gost/models"
|
||||
@@ -21,17 +23,24 @@ type RedHat struct {
|
||||
|
||||
// DetectCVEs fills cve information that has in Gost
|
||||
func (red RedHat) DetectCVEs(r *models.ScanResult, ignoreWillNotFix bool) (nCVEs int, err error) {
|
||||
if red.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(red.DBDriver.Cnf.GetURL(), "redhat", major(r.Release), "pkgs")
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, prefix)
|
||||
gostRelease := r.Release
|
||||
if r.Family == constant.CentOS {
|
||||
gostRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
}
|
||||
if red.driver == nil {
|
||||
prefix, err := util.URLPathJoin(red.baseURL, "redhat", major(gostRelease), "pkgs")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, prefix, "unfixed-cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
for _, res := range responses {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves := map[string]gostmodels.RedhatCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &cves); err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
for _, cve := range cves {
|
||||
if newly := red.setUnfixedCveToScanResult(&cve, r); newly {
|
||||
@@ -40,14 +49,11 @@ func (red RedHat) DetectCVEs(r *models.ScanResult, ignoreWillNotFix bool) (nCVEs
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if red.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range r.Packages {
|
||||
// CVE-ID: RedhatCVE
|
||||
cves, err := red.DBDriver.DB.GetUnfixedCvesRedhat(major(r.Release), pack.Name, ignoreWillNotFix)
|
||||
cves, err := red.driver.GetUnfixedCvesRedhat(major(gostRelease), pack.Name, ignoreWillNotFix)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs. err: %w", err)
|
||||
}
|
||||
for _, cve := range cves {
|
||||
if newly := red.setUnfixedCveToScanResult(&cve, r); newly {
|
||||
@@ -68,8 +74,11 @@ func (red RedHat) fillCvesWithRedHatAPI(r *models.ScanResult) error {
|
||||
cveIDs = append(cveIDs, cveID)
|
||||
}
|
||||
|
||||
if red.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
prefix, _ := util.URLPathJoin(config.Conf.Gost.URL, "redhat", "cves")
|
||||
if red.driver == nil {
|
||||
prefix, err := util.URLPathJoin(red.baseURL, "redhat", "cves")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
responses, err := getCvesViaHTTP(cveIDs, prefix)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -85,10 +94,7 @@ func (red RedHat) fillCvesWithRedHatAPI(r *models.ScanResult) error {
|
||||
red.setFixedCveToScanResult(&redCve, r)
|
||||
}
|
||||
} else {
|
||||
if red.DBDriver.DB == nil {
|
||||
return nil
|
||||
}
|
||||
redCves, err := red.DBDriver.DB.GetRedhatMulti(cveIDs)
|
||||
redCves, err := red.driver.GetRedhatMulti(cveIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -141,8 +147,12 @@ func (red RedHat) setUnfixedCveToScanResult(cve *gostmodels.RedhatCVE, r *models
|
||||
newly = true
|
||||
}
|
||||
v.Mitigations = append(v.Mitigations, mitigations...)
|
||||
pkgStats := red.mergePackageStates(v,
|
||||
cve.PackageState, r.Packages, r.Release)
|
||||
|
||||
gostRelease := r.Release
|
||||
if r.Family == constant.CentOS {
|
||||
gostRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
}
|
||||
pkgStats := red.mergePackageStates(v, cve.PackageState, r.Packages, gostRelease)
|
||||
if 0 < len(pkgStats) {
|
||||
v.AffectedPackages = pkgStats
|
||||
r.ScannedCves[cve.Name] = v
|
||||
|
||||
319
gost/ubuntu.go
319
gost/ubuntu.go
@@ -5,8 +5,12 @@ package gost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
@@ -20,16 +24,52 @@ type Ubuntu struct {
|
||||
|
||||
func (ubu Ubuntu) supported(version string) bool {
|
||||
_, ok := map[string]string{
|
||||
"606": "dapper",
|
||||
"610": "edgy",
|
||||
"704": "feisty",
|
||||
"710": "gutsy",
|
||||
"804": "hardy",
|
||||
"810": "intrepid",
|
||||
"904": "jaunty",
|
||||
"910": "karmic",
|
||||
"1004": "lucid",
|
||||
"1010": "maverick",
|
||||
"1104": "natty",
|
||||
"1110": "oneiric",
|
||||
"1204": "precise",
|
||||
"1210": "quantal",
|
||||
"1304": "raring",
|
||||
"1310": "saucy",
|
||||
"1404": "trusty",
|
||||
"1410": "utopic",
|
||||
"1504": "vivid",
|
||||
"1510": "wily",
|
||||
"1604": "xenial",
|
||||
"1610": "yakkety",
|
||||
"1704": "zesty",
|
||||
"1710": "artful",
|
||||
"1804": "bionic",
|
||||
"1810": "cosmic",
|
||||
"1904": "disco",
|
||||
"1910": "eoan",
|
||||
"2004": "focal",
|
||||
"2010": "groovy",
|
||||
"2104": "hirsute",
|
||||
"2110": "impish",
|
||||
"2204": "jammy",
|
||||
"2210": "kinetic",
|
||||
// "2304": "lunar",
|
||||
}[version]
|
||||
return ok
|
||||
}
|
||||
|
||||
type cveContent struct {
|
||||
cveContent models.CveContent
|
||||
fixStatuses models.PackageFixStatuses
|
||||
}
|
||||
|
||||
var kernelSourceNamePattern = regexp.MustCompile(`^linux((-(ti-omap4|armadaxp|mako|manta|flo|goldfish|joule|raspi2?|snapdragon|aws|azure|bluefield|dell300x|gcp|gke(op)?|ibm|intel|lowlatency|kvm|oem|oracle|euclid|lts-xenial|hwe|riscv))?(-(edge|fde|iotg|hwe|osp1))?(-[\d\.]+)?)?$`)
|
||||
|
||||
// DetectCVEs fills cve information that has in Gost
|
||||
func (ubu Ubuntu) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
ubuReleaseVer := strings.Replace(r.Release, ".", "", 1)
|
||||
@@ -38,129 +78,222 @@ func (ubu Ubuntu) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
// Add linux and set the version of running kernel to search Gost.
|
||||
if r.Container.ContainerID == "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages[linuxImage]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
}
|
||||
}
|
||||
|
||||
packCvesList := []packCves{}
|
||||
if ubu.DBDriver.Cnf.IsFetchViaHTTP() {
|
||||
url, _ := util.URLPathJoin(ubu.DBDriver.Cnf.GetURL(), "ubuntu", ubuReleaseVer, "pkgs")
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, url)
|
||||
detects := map[string]cveContent{}
|
||||
if ubu.driver == nil {
|
||||
urlPrefix, err := util.URLPathJoin(ubu.baseURL, "ubuntu", ubuReleaseVer, "pkgs")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, urlPrefix, "fixed-cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get fixed CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range responses {
|
||||
ubuCves := map[string]gostmodels.UbuntuCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &ubuCves); err != nil {
|
||||
return 0, err
|
||||
if !res.request.isSrcPack {
|
||||
continue
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(res.request.packName)
|
||||
|
||||
if kernelSourceNamePattern.MatchString(n) {
|
||||
isDetect := false
|
||||
for _, bn := range r.SrcPackages[res.request.packName].BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isDetect = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isDetect {
|
||||
continue
|
||||
}
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: res.request.packName,
|
||||
isSrcPack: res.request.isSrcPack,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
if ubu.DBDriver.DB == nil {
|
||||
return 0, nil
|
||||
}
|
||||
for _, pack := range r.Packages {
|
||||
ubuCves, err := ubu.DBDriver.DB.GetUnfixedCvesUbuntu(ubuReleaseVer, pack.Name)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
|
||||
fixeds := map[string]gostmodels.UbuntuCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &fixeds); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
for _, content := range detect(fixeds, true, models.SrcPackage{Name: res.request.packName, Version: r.SrcPackages[res.request.packName].Version, BinaryNames: r.SrcPackages[res.request.packName].BinaryNames}, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: false,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
|
||||
// SrcPack
|
||||
responses, err = getCvesWithFixStateViaHTTP(r, urlPrefix, "unfixed-cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get unfixed CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
for _, res := range responses {
|
||||
if !res.request.isSrcPack {
|
||||
continue
|
||||
}
|
||||
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(res.request.packName)
|
||||
|
||||
if kernelSourceNamePattern.MatchString(n) {
|
||||
isDetect := false
|
||||
for _, bn := range r.SrcPackages[res.request.packName].BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isDetect = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isDetect {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
unfixeds := map[string]gostmodels.UbuntuCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &unfixeds); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
for _, content := range detect(unfixeds, false, models.SrcPackage{Name: res.request.packName, Version: r.SrcPackages[res.request.packName].Version, BinaryNames: r.SrcPackages[res.request.packName].BinaryNames}, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, pack := range r.SrcPackages {
|
||||
ubuCves, err := ubu.DBDriver.DB.GetUnfixedCvesUbuntu(ubuReleaseVer, pack.Name)
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(pack.Name)
|
||||
|
||||
if kernelSourceNamePattern.MatchString(n) {
|
||||
isDetect := false
|
||||
for _, bn := range pack.BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isDetect = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isDetect {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
fixeds, err := ubu.driver.GetFixedCvesUbuntu(ubuReleaseVer, n)
|
||||
if err != nil {
|
||||
return 0, nil
|
||||
return 0, xerrors.Errorf("Failed to get fixed CVEs for SrcPackage. err: %w", err)
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
for _, content := range detect(fixeds, true, pack, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
|
||||
unfixeds, err := ubu.driver.GetUnfixedCvesUbuntu(ubuReleaseVer, n)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get unfixed CVEs for SrcPackage. err: %w", err)
|
||||
}
|
||||
for _, content := range detect(unfixeds, false, pack, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: true,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
delete(r.Packages, "linux")
|
||||
|
||||
for _, p := range packCvesList {
|
||||
for _, cve := range p.cves {
|
||||
v, ok := r.ScannedCves[cve.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(cve)
|
||||
} else {
|
||||
v.CveContents[models.UbuntuAPI] = []models.CveContent{cve}
|
||||
}
|
||||
for _, content := range detects {
|
||||
v, ok := r.ScannedCves[content.cveContent.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(content.cveContent)
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cve.CveID,
|
||||
CveContents: models.NewCveContents(cve),
|
||||
Confidences: models.Confidences{models.UbuntuAPIMatch},
|
||||
}
|
||||
nCVEs++
|
||||
v.CveContents[models.UbuntuAPI] = []models.CveContent{content.cveContent}
|
||||
v.Confidences = models.Confidences{models.UbuntuAPIMatch}
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: content.cveContent.CveID,
|
||||
CveContents: models.NewCveContents(content.cveContent),
|
||||
Confidences: models.Confidences{models.UbuntuAPIMatch},
|
||||
}
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
if p.isSrcPack {
|
||||
if srcPack, ok := r.SrcPackages[p.packName]; ok {
|
||||
for _, binName := range srcPack.BinaryNames {
|
||||
if _, ok := r.Packages[binName]; ok {
|
||||
names = append(names, binName)
|
||||
for _, s := range content.fixStatuses {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(s)
|
||||
}
|
||||
r.ScannedCves[content.cveContent.CveID] = v
|
||||
}
|
||||
|
||||
return len(detects), nil
|
||||
}
|
||||
|
||||
func detect(cves map[string]gostmodels.UbuntuCVE, fixed bool, srcPkg models.SrcPackage, runningKernelBinaryPkgName string) []cveContent {
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(srcPkg.Name)
|
||||
|
||||
var contents []cveContent
|
||||
for _, cve := range cves {
|
||||
c := cveContent{
|
||||
cveContent: *(Ubuntu{}).ConvertToModel(&cve),
|
||||
}
|
||||
|
||||
if fixed {
|
||||
for _, p := range cve.Patches {
|
||||
for _, rp := range p.ReleasePatches {
|
||||
installedVersion := srcPkg.Version
|
||||
patchedVersion := rp.Note
|
||||
|
||||
// https://git.launchpad.net/ubuntu-cve-tracker/tree/scripts/generate-oval#n384
|
||||
if kernelSourceNamePattern.MatchString(n) && strings.HasPrefix(srcPkg.Name, "linux-meta") {
|
||||
// 5.15.0.1026.30~20.04.16 -> 5.15.0.1026
|
||||
ss := strings.Split(installedVersion, ".")
|
||||
if len(ss) >= 4 {
|
||||
installedVersion = strings.Join(ss[:4], ".")
|
||||
}
|
||||
|
||||
// 5.15.0-1026.30~20.04.16 -> 5.15.0.1026
|
||||
lhs, rhs, ok := strings.Cut(patchedVersion, "-")
|
||||
if ok {
|
||||
patchedVersion = fmt.Sprintf("%s.%s", lhs, strings.Split(rhs, ".")[0])
|
||||
}
|
||||
}
|
||||
|
||||
affected, err := isGostDefAffected(installedVersion, patchedVersion)
|
||||
if err != nil {
|
||||
logging.Log.Debugf("Failed to parse versions: %s, Ver: %s, Gost: %s", err, installedVersion, patchedVersion)
|
||||
continue
|
||||
}
|
||||
|
||||
if affected {
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if kernelSourceNamePattern.MatchString(n) && bn != runningKernelBinaryPkgName {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixedIn: patchedVersion,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if p.packName == "linux" {
|
||||
names = append(names, linuxImage)
|
||||
} else {
|
||||
names = append(names, p.packName)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(models.PackageFixStatus{
|
||||
Name: name,
|
||||
} else {
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if kernelSourceNamePattern.MatchString(n) && bn != runningKernelBinaryPkgName {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
})
|
||||
}
|
||||
r.ScannedCves[cve.CveID] = v
|
||||
}
|
||||
|
||||
if len(c.fixStatuses) > 0 {
|
||||
contents = append(contents, c)
|
||||
}
|
||||
}
|
||||
return nCVEs, nil
|
||||
return contents
|
||||
}
|
||||
|
||||
// ConvertToModel converts gost model to vuls model
|
||||
|
||||
@@ -127,11 +127,171 @@ func TestUbuntuConvertToModel(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ubu := Ubuntu{}
|
||||
got := ubu.ConvertToModel(&tt.input)
|
||||
if !reflect.DeepEqual(got, &tt.expected) {
|
||||
if got := (Ubuntu{}).ConvertToModel(&tt.input); !reflect.DeepEqual(got, &tt.expected) {
|
||||
t.Errorf("Ubuntu.ConvertToModel() = %#v, want %#v", got, &tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_detect(t *testing.T) {
|
||||
type args struct {
|
||||
cves map[string]gostmodels.UbuntuCVE
|
||||
fixed bool
|
||||
srcPkg models.SrcPackage
|
||||
runningKernelBinaryPkgName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []cveContent
|
||||
}{
|
||||
{
|
||||
name: "fixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
runningKernelBinaryPkgName: "",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unfixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "open"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: false,
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
runningKernelBinaryPkgName: "",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0000", SourceLink: "https://ubuntu.com/security/CVE-0000-0000", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "linux-signed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "linux-signed", Version: "0.0.0-1", BinaryNames: []string{"linux-image-generic", "linux-headers-generic"}},
|
||||
runningKernelBinaryPkgName: "linux-image-generic",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "linux-image-generic",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "linux-meta",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "linux-meta", Version: "0.0.0.1", BinaryNames: []string{"linux-image-generic", "linux-headers-generic"}},
|
||||
runningKernelBinaryPkgName: "linux-image-generic",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "linux-image-generic",
|
||||
FixedIn: "0.0.0.2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := detect(tt.args.cves, tt.args.fixed, tt.args.srcPkg, tt.args.runningKernelBinaryPkgName); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("detect() = %#v, want %#v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
10
gost/util.go
10
gost/util.go
@@ -9,11 +9,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type response struct {
|
||||
@@ -84,11 +85,6 @@ type request struct {
|
||||
cveID string
|
||||
}
|
||||
|
||||
func getAllUnfixedCvesViaHTTP(r *models.ScanResult, urlPrefix string) (
|
||||
responses []response, err error) {
|
||||
return getCvesWithFixStateViaHTTP(r, urlPrefix, "unfixed-cves")
|
||||
}
|
||||
|
||||
func getCvesWithFixStateViaHTTP(r *models.ScanResult, urlPrefix, fixState string) (responses []response, err error) {
|
||||
nReq := len(r.Packages) + len(r.SrcPackages)
|
||||
reqChan := make(chan request, nReq)
|
||||
|
||||
Submodule integration updated: 3d05674df7...a36b4595ee
@@ -4,7 +4,6 @@ import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -16,7 +15,7 @@ import (
|
||||
formatter "github.com/kotakanbe/logrus-prefixed-formatter"
|
||||
)
|
||||
|
||||
//LogOpts has options for logging
|
||||
// LogOpts has options for logging
|
||||
type LogOpts struct {
|
||||
Debug bool `json:"debug,omitempty"`
|
||||
DebugSQL bool `json:"debugSQL,omitempty"`
|
||||
@@ -36,7 +35,7 @@ type Logger struct {
|
||||
|
||||
func init() {
|
||||
log := logrus.New()
|
||||
log.Out = ioutil.Discard
|
||||
log.Out = io.Discard
|
||||
fields := logrus.Fields{"prefix": ""}
|
||||
Log = Logger{Entry: *log.WithFields(fields)}
|
||||
}
|
||||
@@ -46,6 +45,13 @@ func NewNormalLogger() Logger {
|
||||
return Logger{Entry: logrus.Entry{Logger: logrus.New()}}
|
||||
}
|
||||
|
||||
// NewIODiscardLogger creates discard logger
|
||||
func NewIODiscardLogger() Logger {
|
||||
l := logrus.New()
|
||||
l.Out = io.Discard
|
||||
return Logger{Entry: logrus.Entry{Logger: l}}
|
||||
}
|
||||
|
||||
// NewCustomLogger creates logrus
|
||||
func NewCustomLogger(debug, quiet, logToFile bool, logDir, logMsgAnsiColor, serverName string) Logger {
|
||||
log := logrus.New()
|
||||
@@ -101,7 +107,7 @@ func NewCustomLogger(debug, quiet, logToFile bool, logDir, logMsgAnsiColor, serv
|
||||
}
|
||||
}
|
||||
} else if quiet {
|
||||
log.Out = ioutil.Discard
|
||||
log.Out = io.Discard
|
||||
} else {
|
||||
log.Out = os.Stderr
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/aquasecurity/trivy-db/pkg/vulnsrc/vulnerability"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
// CveContents has CveContent
|
||||
@@ -75,7 +75,7 @@ func (v CveContents) PrimarySrcURLs(lang, myFamily, cveID string, confidences Co
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Nvd, NewCveContentType(myFamily), GitHub}
|
||||
order := append(append(CveContentTypes{Nvd}, GetCveContentTypes(myFamily)...), GitHub)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
@@ -133,24 +133,6 @@ func (v CveContents) PatchURLs() (urls []string) {
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// Severities returns Severities
|
||||
func (v CveContents) Severities(myFamily string) (values []CveContentStr) {
|
||||
order := CveContentTypes{NVD, NewCveContentType(myFamily)}
|
||||
order = append(order, AllCveContetTypes.Except(append(order)...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
if cont, found := v[ctype]; found && 0 < len(cont.Severity) {
|
||||
values = append(values, CveContentStr{
|
||||
Type: ctype,
|
||||
Value: cont.Severity,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
// CveContentCpes has CveContentType and Value
|
||||
type CveContentCpes struct {
|
||||
Type CveContentType
|
||||
@@ -159,7 +141,7 @@ type CveContentCpes struct {
|
||||
|
||||
// Cpes returns affected CPEs of this Vulnerability
|
||||
func (v CveContents) Cpes(myFamily string) (values []CveContentCpes) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
@@ -185,7 +167,7 @@ type CveContentRefs struct {
|
||||
|
||||
// References returns References
|
||||
func (v CveContents) References(myFamily string) (values []CveContentRefs) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
@@ -206,7 +188,7 @@ func (v CveContents) References(myFamily string) (values []CveContentRefs) {
|
||||
|
||||
// CweIDs returns related CweIDs of the vulnerability
|
||||
func (v CveContents) CweIDs(myFamily string) (values []CveContentStr) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v[ctype]; found {
|
||||
@@ -321,11 +303,13 @@ func NewCveContentType(name string) CveContentType {
|
||||
return Jvn
|
||||
case "redhat", "centos", "alma", "rocky":
|
||||
return RedHat
|
||||
case "fedora":
|
||||
return Fedora
|
||||
case "oracle":
|
||||
return Oracle
|
||||
case "ubuntu":
|
||||
return Ubuntu
|
||||
case "debian", vulnerability.DebianOVAL:
|
||||
case "debian", "debian-oval":
|
||||
return Debian
|
||||
case "redhat_api":
|
||||
return RedHatAPI
|
||||
@@ -333,6 +317,8 @@ func NewCveContentType(name string) CveContentType {
|
||||
return DebianSecurityTracker
|
||||
case "ubuntu_api":
|
||||
return UbuntuAPI
|
||||
case constant.OpenSUSE, constant.OpenSUSELeap, constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
return SUSE
|
||||
case "microsoft":
|
||||
return Microsoft
|
||||
case "wordpress":
|
||||
@@ -348,6 +334,30 @@ func NewCveContentType(name string) CveContentType {
|
||||
}
|
||||
}
|
||||
|
||||
// GetCveContentTypes return CveContentTypes
|
||||
func GetCveContentTypes(family string) []CveContentType {
|
||||
switch family {
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky:
|
||||
return []CveContentType{RedHat, RedHatAPI}
|
||||
case constant.Fedora:
|
||||
return []CveContentType{Fedora}
|
||||
case constant.Oracle:
|
||||
return []CveContentType{Oracle}
|
||||
case constant.Amazon:
|
||||
return []CveContentType{Amazon}
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return []CveContentType{Debian, DebianSecurityTracker}
|
||||
case constant.Ubuntu:
|
||||
return []CveContentType{Ubuntu, UbuntuAPI}
|
||||
case constant.OpenSUSE, constant.OpenSUSELeap, constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
return []CveContentType{SUSE}
|
||||
case constant.Windows:
|
||||
return []CveContentType{Microsoft}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Nvd is Nvd JSON
|
||||
Nvd CveContentType = "nvd"
|
||||
@@ -379,6 +389,9 @@ const (
|
||||
// Amazon is Amazon Linux
|
||||
Amazon CveContentType = "amazon"
|
||||
|
||||
// Fedora is Fedora Linux
|
||||
Fedora CveContentType = "fedora"
|
||||
|
||||
// SUSE is SUSE Linux
|
||||
SUSE CveContentType = "suse"
|
||||
|
||||
@@ -412,6 +425,7 @@ var AllCveContetTypes = CveContentTypes{
|
||||
Ubuntu,
|
||||
UbuntuAPI,
|
||||
Amazon,
|
||||
Fedora,
|
||||
SUSE,
|
||||
WpScan,
|
||||
Trivy,
|
||||
|
||||
@@ -3,6 +3,8 @@ package models
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
func TestExcept(t *testing.T) {
|
||||
@@ -249,3 +251,61 @@ func TestCveContents_Sort(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCveContentType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want CveContentType
|
||||
}{
|
||||
{
|
||||
name: "redhat",
|
||||
want: RedHat,
|
||||
},
|
||||
{
|
||||
name: "centos",
|
||||
want: RedHat,
|
||||
},
|
||||
{
|
||||
name: "unknown",
|
||||
want: Unknown,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewCveContentType(tt.name); got != tt.want {
|
||||
t.Errorf("NewCveContentType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCveContentTypes(t *testing.T) {
|
||||
tests := []struct {
|
||||
family string
|
||||
want []CveContentType
|
||||
}{
|
||||
{
|
||||
family: constant.RedHat,
|
||||
want: []CveContentType{RedHat, RedHatAPI},
|
||||
},
|
||||
{
|
||||
family: constant.Debian,
|
||||
want: []CveContentType{Debian, DebianSecurityTracker},
|
||||
},
|
||||
{
|
||||
family: constant.Ubuntu,
|
||||
want: []CveContentType{Ubuntu, UbuntuAPI},
|
||||
},
|
||||
{
|
||||
family: constant.FreeBSD,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.family, func(t *testing.T) {
|
||||
if got := GetCveContentTypes(tt.family); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetCveContentTypes() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
96
models/github.go
Normal file
96
models/github.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ftypes "github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
)
|
||||
|
||||
// DependencyGraphManifests has a map of DependencyGraphManifest
|
||||
// key: BlobPath
|
||||
type DependencyGraphManifests map[string]DependencyGraphManifest
|
||||
|
||||
// DependencyGraphManifest has filename, repository, dependencies
|
||||
type DependencyGraphManifest struct {
|
||||
BlobPath string `json:"blobPath"`
|
||||
Filename string `json:"filename"`
|
||||
Repository string `json:"repository"`
|
||||
Dependencies []Dependency `json:"dependencies"`
|
||||
}
|
||||
|
||||
// RepoURLFilename should be same format with GitHubSecurityAlert.RepoURLManifestPath()
|
||||
func (m DependencyGraphManifest) RepoURLFilename() string {
|
||||
return fmt.Sprintf("%s/%s", m.Repository, m.Filename)
|
||||
}
|
||||
|
||||
// Ecosystem returns a name of ecosystem(or package manager) of manifest(lock) file in trivy way
|
||||
// https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-the-dependency-graph#supported-package-ecosystems
|
||||
func (m DependencyGraphManifest) Ecosystem() string {
|
||||
switch {
|
||||
case strings.HasSuffix(m.Filename, "Cargo.lock"),
|
||||
strings.HasSuffix(m.Filename, "Cargo.toml"):
|
||||
return ftypes.Cargo // Rust
|
||||
case strings.HasSuffix(m.Filename, "composer.lock"),
|
||||
strings.HasSuffix(m.Filename, "composer.json"):
|
||||
return ftypes.Composer // PHP
|
||||
case strings.HasSuffix(m.Filename, ".csproj"),
|
||||
strings.HasSuffix(m.Filename, ".vbproj"),
|
||||
strings.HasSuffix(m.Filename, ".nuspec"),
|
||||
strings.HasSuffix(m.Filename, ".vcxproj"),
|
||||
strings.HasSuffix(m.Filename, ".fsproj"),
|
||||
strings.HasSuffix(m.Filename, "packages.config"):
|
||||
return ftypes.NuGet // .NET languages (C#, F#, VB), C++
|
||||
case strings.HasSuffix(m.Filename, "go.sum"),
|
||||
strings.HasSuffix(m.Filename, "go.mod"):
|
||||
return ftypes.GoModule // Go
|
||||
case strings.HasSuffix(m.Filename, "pom.xml"):
|
||||
return ftypes.Pom // Java, Scala
|
||||
case strings.HasSuffix(m.Filename, "package-lock.json"),
|
||||
strings.HasSuffix(m.Filename, "package.json"):
|
||||
return ftypes.Npm // JavaScript
|
||||
case strings.HasSuffix(m.Filename, "yarn.lock"):
|
||||
return ftypes.Yarn // JavaScript
|
||||
case strings.HasSuffix(m.Filename, "requirements.txt"),
|
||||
strings.HasSuffix(m.Filename, "requirements-dev.txt"),
|
||||
strings.HasSuffix(m.Filename, "setup.py"):
|
||||
return ftypes.Pip // Python
|
||||
case strings.HasSuffix(m.Filename, "Pipfile.lock"),
|
||||
strings.HasSuffix(m.Filename, "Pipfile"):
|
||||
return ftypes.Pipenv // Python
|
||||
case strings.HasSuffix(m.Filename, "poetry.lock"),
|
||||
strings.HasSuffix(m.Filename, "pyproject.toml"):
|
||||
return ftypes.Poetry // Python
|
||||
case strings.HasSuffix(m.Filename, "Gemfile.lock"),
|
||||
strings.HasSuffix(m.Filename, "Gemfile"):
|
||||
return ftypes.Bundler // Ruby
|
||||
case strings.HasSuffix(m.Filename, ".gemspec"):
|
||||
return ftypes.GemSpec // Ruby
|
||||
case strings.HasSuffix(m.Filename, "pubspec.lock"),
|
||||
strings.HasSuffix(m.Filename, "pubspec.yaml"):
|
||||
return "pub" // Dart
|
||||
case strings.HasSuffix(m.Filename, ".yml"),
|
||||
strings.HasSuffix(m.Filename, ".yaml"):
|
||||
return "actions" // GitHub Actions workflows
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Dependency has dependency package information
|
||||
type Dependency struct {
|
||||
PackageName string `json:"packageName"`
|
||||
PackageManager string `json:"packageManager"`
|
||||
Repository string `json:"repository"`
|
||||
Requirements string `json:"requirements"`
|
||||
}
|
||||
|
||||
// Version returns version
|
||||
func (d Dependency) Version() string {
|
||||
s := strings.Split(d.Requirements, " ")
|
||||
if len(s) == 2 && s[0] == "=" {
|
||||
return s[1]
|
||||
}
|
||||
// in case of ranged version
|
||||
return ""
|
||||
}
|
||||
@@ -1,15 +1,14 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aquasecurity/trivy-db/pkg/db"
|
||||
trivyDBTypes "github.com/aquasecurity/trivy-db/pkg/types"
|
||||
"github.com/aquasecurity/trivy/pkg/detector/library"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
|
||||
ftypes "github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/types"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
)
|
||||
|
||||
// LibraryScanners is an array of LibraryScanner
|
||||
@@ -60,11 +59,11 @@ type Library struct {
|
||||
func (s LibraryScanner) Scan() ([]VulnInfo, error) {
|
||||
scanner, err := library.NewDriver(s.Type)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to new a library driver: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to new a library driver %s: %w", s.Type, err)
|
||||
}
|
||||
var vulnerabilities = []VulnInfo{}
|
||||
for _, pkg := range s.Libs {
|
||||
tvulns, err := scanner.Detect(pkg.Name, pkg.Version)
|
||||
tvulns, err := scanner.DetectVulnerabilities("", pkg.Name, pkg.Version)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("failed to detect %s vulnerabilities: %w", scanner.Type(), err)
|
||||
}
|
||||
@@ -130,27 +129,52 @@ func getCveContents(cveID string, vul trivyDBTypes.Vulnerability) (contents map[
|
||||
return contents
|
||||
}
|
||||
|
||||
// LibraryMap is filename and library type
|
||||
var LibraryMap = map[string]string{
|
||||
"package-lock.json": "node",
|
||||
"yarn.lock": "node",
|
||||
"Gemfile.lock": "ruby",
|
||||
"Cargo.lock": "rust",
|
||||
"composer.lock": "php",
|
||||
"Pipfile.lock": "python",
|
||||
"poetry.lock": "python",
|
||||
"packages.lock.json": ".net",
|
||||
"go.sum": "gomod",
|
||||
// FindLockFiles is a list of filenames that is the target of findLock
|
||||
var FindLockFiles = []string{
|
||||
// node
|
||||
ftypes.NpmPkgLock, ftypes.YarnLock, ftypes.PnpmLock,
|
||||
// ruby
|
||||
ftypes.GemfileLock,
|
||||
// rust
|
||||
ftypes.CargoLock,
|
||||
// php
|
||||
ftypes.ComposerLock,
|
||||
// python
|
||||
ftypes.PipRequirements, ftypes.PipfileLock, ftypes.PoetryLock,
|
||||
// .net
|
||||
ftypes.NuGetPkgsLock, ftypes.NuGetPkgsConfig, "*.deps.json",
|
||||
// gomod
|
||||
ftypes.GoMod, ftypes.GoSum,
|
||||
// java
|
||||
ftypes.MavenPom, "*.jar", "*.war", "*.ear", "*.par", "*gradle.lockfile",
|
||||
// C / C++
|
||||
ftypes.ConanLock,
|
||||
}
|
||||
|
||||
// GetLibraryKey returns target library key
|
||||
func (s LibraryScanner) GetLibraryKey() string {
|
||||
fileName := filepath.Base(s.LockfilePath)
|
||||
switch s.Type {
|
||||
case "jar", "war", "ear":
|
||||
case ftypes.Bundler, ftypes.GemSpec:
|
||||
return "ruby"
|
||||
case ftypes.Cargo:
|
||||
return "rust"
|
||||
case ftypes.Composer:
|
||||
return "php"
|
||||
case ftypes.GoBinary, ftypes.GoModule:
|
||||
return "gomod"
|
||||
case ftypes.Jar, ftypes.Pom, ftypes.Gradle:
|
||||
return "java"
|
||||
case ftypes.Npm, ftypes.Yarn, ftypes.Pnpm, ftypes.NodePkg, ftypes.JavaScript:
|
||||
return "node"
|
||||
case ftypes.NuGet, ftypes.DotNetCore:
|
||||
return ".net"
|
||||
case ftypes.Pipenv, ftypes.Poetry, ftypes.Pip, ftypes.PythonPkg:
|
||||
return "python"
|
||||
case ftypes.ConanLock:
|
||||
return "c"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
return LibraryMap[fileName]
|
||||
}
|
||||
|
||||
// LibraryFixedIn has library fixed information
|
||||
|
||||
@@ -45,15 +45,16 @@ type ScanResult struct {
|
||||
Errors []string `json:"errors"`
|
||||
Warnings []string `json:"warnings"`
|
||||
|
||||
ScannedCves VulnInfos `json:"scannedCves"`
|
||||
RunningKernel Kernel `json:"runningKernel"`
|
||||
Packages Packages `json:"packages"`
|
||||
SrcPackages SrcPackages `json:",omitempty"`
|
||||
EnabledDnfModules []string `json:"enabledDnfModules,omitempty"` // for dnf modules
|
||||
WordPressPackages WordPressPackages `json:",omitempty"`
|
||||
LibraryScanners LibraryScanners `json:"libraries,omitempty"`
|
||||
CweDict CweDict `json:"cweDict,omitempty"`
|
||||
Optional map[string]interface{} `json:",omitempty"`
|
||||
ScannedCves VulnInfos `json:"scannedCves"`
|
||||
RunningKernel Kernel `json:"runningKernel"`
|
||||
Packages Packages `json:"packages"`
|
||||
SrcPackages SrcPackages `json:",omitempty"`
|
||||
EnabledDnfModules []string `json:"enabledDnfModules,omitempty"` // for dnf modules
|
||||
WordPressPackages WordPressPackages `json:",omitempty"`
|
||||
GitHubManifests DependencyGraphManifests `json:"gitHubManifests,omitempty"`
|
||||
LibraryScanners LibraryScanners `json:"libraries,omitempty"`
|
||||
CweDict CweDict `json:"cweDict,omitempty"`
|
||||
Optional map[string]interface{} `json:",omitempty"`
|
||||
Config struct {
|
||||
Scan config.Config `json:"scan"`
|
||||
Report config.Config `json:"report"`
|
||||
@@ -436,23 +437,23 @@ func (r *ScanResult) SortForJSONOutput() {
|
||||
// CweDict is a dictionary for CWE
|
||||
type CweDict map[string]CweDictEntry
|
||||
|
||||
// AttentionCWE has OWASP TOP10, CWE TOP25, CWE/SANS TOP25 rank and url
|
||||
type AttentionCWE struct {
|
||||
Rank string
|
||||
URL string
|
||||
}
|
||||
|
||||
// Get the name, url, top10URL for the specified cweID, lang
|
||||
func (c CweDict) Get(cweID, lang string) (name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL string) {
|
||||
func (c CweDict) Get(cweID, lang string) (name, url string, owasp, cwe25, sans map[string]AttentionCWE) {
|
||||
cweNum := strings.TrimPrefix(cweID, "CWE-")
|
||||
dict, ok := c[cweNum]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
owasp, cwe25, sans = fillAttentionCwe(dict, lang)
|
||||
switch lang {
|
||||
case "ja":
|
||||
if dict, ok := c[cweNum]; ok && dict.OwaspTopTen2017 != "" {
|
||||
top10Rank = dict.OwaspTopTen2017
|
||||
top10URL = cwe.OwaspTopTen2017GitHubURLJa[dict.OwaspTopTen2017]
|
||||
}
|
||||
if dict, ok := c[cweNum]; ok && dict.CweTopTwentyfive2019 != "" {
|
||||
cweTop25Rank = dict.CweTopTwentyfive2019
|
||||
cweTop25URL = cwe.CweTopTwentyfive2019URL
|
||||
}
|
||||
if dict, ok := c[cweNum]; ok && dict.SansTopTwentyfive != "" {
|
||||
sansTop25Rank = dict.SansTopTwentyfive
|
||||
sansTop25URL = cwe.SansTopTwentyfiveURL
|
||||
}
|
||||
if dict, ok := cwe.CweDictJa[cweNum]; ok {
|
||||
name = dict.Name
|
||||
url = fmt.Sprintf("http://jvndb.jvn.jp/ja/cwe/%s.html", cweID)
|
||||
@@ -463,18 +464,6 @@ func (c CweDict) Get(cweID, lang string) (name, url, top10Rank, top10URL, cweTop
|
||||
url = fmt.Sprintf("https://cwe.mitre.org/data/definitions/%s.html", cweID)
|
||||
}
|
||||
default:
|
||||
if dict, ok := c[cweNum]; ok && dict.OwaspTopTen2017 != "" {
|
||||
top10Rank = dict.OwaspTopTen2017
|
||||
top10URL = cwe.OwaspTopTen2017GitHubURLEn[dict.OwaspTopTen2017]
|
||||
}
|
||||
if dict, ok := c[cweNum]; ok && dict.CweTopTwentyfive2019 != "" {
|
||||
cweTop25Rank = dict.CweTopTwentyfive2019
|
||||
cweTop25URL = cwe.CweTopTwentyfive2019URL
|
||||
}
|
||||
if dict, ok := c[cweNum]; ok && dict.SansTopTwentyfive != "" {
|
||||
sansTop25Rank = dict.SansTopTwentyfive
|
||||
sansTop25URL = cwe.SansTopTwentyfiveURL
|
||||
}
|
||||
url = fmt.Sprintf("https://cwe.mitre.org/data/definitions/%s.html", cweID)
|
||||
if dict, ok := cwe.CweDictEn[cweNum]; ok {
|
||||
name = dict.Name
|
||||
@@ -483,11 +472,47 @@ func (c CweDict) Get(cweID, lang string) (name, url, top10Rank, top10URL, cweTop
|
||||
return
|
||||
}
|
||||
|
||||
func fillAttentionCwe(dict CweDictEntry, lang string) (owasp, cwe25, sans map[string]AttentionCWE) {
|
||||
owasp, cwe25, sans = map[string]AttentionCWE{}, map[string]AttentionCWE{}, map[string]AttentionCWE{}
|
||||
switch lang {
|
||||
case "ja":
|
||||
for year, rank := range dict.OwaspTopTens {
|
||||
owasp[year] = AttentionCWE{
|
||||
Rank: rank,
|
||||
URL: cwe.OwaspTopTenURLsJa[year][rank],
|
||||
}
|
||||
}
|
||||
default:
|
||||
for year, rank := range dict.OwaspTopTens {
|
||||
owasp[year] = AttentionCWE{
|
||||
Rank: rank,
|
||||
URL: cwe.OwaspTopTenURLsEn[year][rank],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for year, rank := range dict.CweTopTwentyfives {
|
||||
cwe25[year] = AttentionCWE{
|
||||
Rank: rank,
|
||||
URL: cwe.CweTopTwentyfiveURLs[year],
|
||||
}
|
||||
}
|
||||
|
||||
for year, rank := range dict.SansTopTwentyfives {
|
||||
sans[year] = AttentionCWE{
|
||||
Rank: rank,
|
||||
URL: cwe.SansTopTwentyfiveURLs[year],
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// CweDictEntry is a entry of CWE
|
||||
type CweDictEntry struct {
|
||||
En *cwe.Cwe `json:"en,omitempty"`
|
||||
Ja *cwe.Cwe `json:"ja,omitempty"`
|
||||
OwaspTopTen2017 string `json:"owaspTopTen2017"`
|
||||
CweTopTwentyfive2019 string `json:"cweTopTwentyfive2019"`
|
||||
SansTopTwentyfive string `json:"sansTopTwentyfive"`
|
||||
En *cwe.Cwe `json:"en,omitempty"`
|
||||
Ja *cwe.Cwe `json:"ja,omitempty"`
|
||||
OwaspTopTens map[string]string `json:"owaspTopTens"`
|
||||
CweTopTwentyfives map[string]string `json:"cweTopTwentyfives"`
|
||||
SansTopTwentyfives map[string]string `json:"sansTopTwentyfives"`
|
||||
}
|
||||
|
||||
@@ -86,6 +86,11 @@ func TestIsDisplayUpdatableNum(t *testing.T) {
|
||||
family: constant.Alpine,
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
mode: []byte{config.Fast},
|
||||
family: constant.Fedora,
|
||||
expected: true,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
|
||||
@@ -256,11 +256,12 @@ type VulnInfo struct {
|
||||
CveID string `json:"cveID,omitempty"`
|
||||
Confidences Confidences `json:"confidences,omitempty"`
|
||||
AffectedPackages PackageFixStatuses `json:"affectedPackages,omitempty"`
|
||||
DistroAdvisories DistroAdvisories `json:"distroAdvisories,omitempty"` // for Amazon, RHEL, FreeBSD
|
||||
DistroAdvisories DistroAdvisories `json:"distroAdvisories,omitempty"` // for Amazon, RHEL, Fedora, FreeBSD, Microsoft
|
||||
CveContents CveContents `json:"cveContents,omitempty"`
|
||||
Exploits []Exploit `json:"exploits,omitempty"`
|
||||
Metasploits []Metasploit `json:"metasploits,omitempty"`
|
||||
Mitigations []Mitigation `json:"mitigations,omitempty"`
|
||||
Ctis []string `json:"ctis,omitempty"`
|
||||
AlertDict AlertDict `json:"alertDict,omitempty"`
|
||||
CpeURIs []string `json:"cpeURIs,omitempty"` // CpeURIs related to this CVE defined in config.toml
|
||||
GitHubSecurityAlerts GitHubSecurityAlerts `json:"gitHubSecurityAlerts,omitempty"`
|
||||
@@ -283,7 +284,7 @@ type GitHubSecurityAlerts []GitHubSecurityAlert
|
||||
// Add adds given arg to the slice and return the slice (immutable)
|
||||
func (g GitHubSecurityAlerts) Add(alert GitHubSecurityAlert) GitHubSecurityAlerts {
|
||||
for _, a := range g {
|
||||
if a.PackageName == alert.PackageName {
|
||||
if a.RepoURLPackageName() == alert.RepoURLPackageName() {
|
||||
return g
|
||||
}
|
||||
}
|
||||
@@ -293,19 +294,39 @@ func (g GitHubSecurityAlerts) Add(alert GitHubSecurityAlert) GitHubSecurityAlert
|
||||
// Names return a slice of lib names
|
||||
func (g GitHubSecurityAlerts) Names() (names []string) {
|
||||
for _, a := range g {
|
||||
names = append(names, a.PackageName)
|
||||
names = append(names, a.RepoURLPackageName())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// GitHubSecurityAlert has detected CVE-ID, PackageName, Status fetched via GitHub API
|
||||
// GitHubSecurityAlert has detected CVE-ID, GSAVulnerablePackage, Status fetched via GitHub API
|
||||
type GitHubSecurityAlert struct {
|
||||
PackageName string `json:"packageName"`
|
||||
FixedIn string `json:"fixedIn"`
|
||||
AffectedRange string `json:"affectedRange"`
|
||||
Dismissed bool `json:"dismissed"`
|
||||
DismissedAt time.Time `json:"dismissedAt"`
|
||||
DismissReason string `json:"dismissReason"`
|
||||
Repository string `json:"repository"`
|
||||
Package GSAVulnerablePackage `json:"package,omitempty"`
|
||||
FixedIn string `json:"fixedIn"`
|
||||
AffectedRange string `json:"affectedRange"`
|
||||
Dismissed bool `json:"dismissed"`
|
||||
DismissedAt time.Time `json:"dismissedAt"`
|
||||
DismissReason string `json:"dismissReason"`
|
||||
}
|
||||
|
||||
// RepoURLPackageName returns a string connecting the repository and package name
|
||||
func (a GitHubSecurityAlert) RepoURLPackageName() string {
|
||||
return fmt.Sprintf("%s %s", a.Repository, a.Package.Name)
|
||||
}
|
||||
|
||||
// RepoURLManifestPath should be same format with DependencyGraphManifest.RepoURLFilename()
|
||||
func (a GitHubSecurityAlert) RepoURLManifestPath() string {
|
||||
return fmt.Sprintf("%s/%s", a.Repository, a.Package.ManifestPath)
|
||||
}
|
||||
|
||||
// GSAVulnerablePackage has vulnerable package information
|
||||
type GSAVulnerablePackage struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
ManifestFilename string `json:"manifestFilename"`
|
||||
ManifestPath string `json:"manifestPath"`
|
||||
Requirements string `json:"requirements"`
|
||||
}
|
||||
|
||||
// LibraryFixedIns is a list of Library's FixedIn
|
||||
@@ -392,7 +413,7 @@ func (v VulnInfo) Titles(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, Nvd, NewCveContentType(myFamily)}
|
||||
order := append(CveContentTypes{Trivy, Nvd}, GetCveContentTypes(myFamily)...)
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
@@ -439,7 +460,7 @@ func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, NewCveContentType(myFamily), Nvd, GitHub}
|
||||
order := append(append(CveContentTypes{Trivy}, GetCveContentTypes(myFamily)...), Nvd, GitHub)
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
@@ -510,7 +531,7 @@ func (v VulnInfo) Cvss2Scores() (values []CveContentCvss) {
|
||||
|
||||
// Cvss3Scores returns CVSS V3 Score
|
||||
func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
order := []CveContentType{RedHatAPI, RedHat, Nvd, Jvn}
|
||||
order := []CveContentType{RedHatAPI, RedHat, SUSE, Nvd, Jvn}
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
@@ -531,7 +552,7 @@ func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, ctype := range []CveContentType{Debian, DebianSecurityTracker, Ubuntu, Amazon, Trivy, GitHub, WpScan} {
|
||||
for _, ctype := range []CveContentType{Debian, DebianSecurityTracker, Ubuntu, UbuntuAPI, Amazon, Trivy, GitHub, WpScan} {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
if cont.Cvss3Severity != "" {
|
||||
@@ -549,7 +570,7 @@ func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
}
|
||||
}
|
||||
|
||||
// Memo: Only RedHat, Oracle and Amazon has severity data in advisory.
|
||||
// Memo: Only RedHat, SUSE, Oracle and Amazon has severity data in advisory.
|
||||
for _, adv := range v.DistroAdvisories {
|
||||
if adv.Severity != "" {
|
||||
score := severityToCvssScoreRoughly(adv.Severity)
|
||||
@@ -709,7 +730,7 @@ func severityToCvssScoreRange(severity string) string {
|
||||
return "7.0-8.9"
|
||||
case "MODERATE", "MEDIUM":
|
||||
return "4.0-6.9"
|
||||
case "LOW":
|
||||
case "LOW", "NEGLIGIBLE":
|
||||
return "0.1-3.9"
|
||||
}
|
||||
return "None"
|
||||
@@ -727,6 +748,10 @@ func severityToCvssScoreRange(severity string) string {
|
||||
// Critical, High, Medium, Low
|
||||
// https://wiki.ubuntu.com/Bugs/Importance
|
||||
// https://people.canonical.com/~ubuntu-security/cve/priority.html
|
||||
//
|
||||
// Ubuntu CVE Tracker
|
||||
// Critical, High, Medium, Low, Negligible
|
||||
// https://people.canonical.com/~ubuntu-security/priority.html
|
||||
func severityToCvssScoreRoughly(severity string) float64 {
|
||||
switch strings.ToUpper(severity) {
|
||||
case "CRITICAL":
|
||||
@@ -735,7 +760,7 @@ func severityToCvssScoreRoughly(severity string) float64 {
|
||||
return 8.9
|
||||
case "MODERATE", "MEDIUM":
|
||||
return 6.9
|
||||
case "LOW":
|
||||
case "LOW", "NEGLIGIBLE":
|
||||
return 3.9
|
||||
}
|
||||
return 0
|
||||
@@ -796,6 +821,8 @@ type Exploit struct {
|
||||
DocumentURL *string `json:"documentURL,omitempty"`
|
||||
ShellCodeURL *string `json:"shellCodeURL,omitempty"`
|
||||
BinaryURL *string `json:"binaryURL,omitempty"`
|
||||
PaperURL *string `json:"paperURL,omitempty"`
|
||||
GHDBURL *string `json:"ghdbURL,omitempty"`
|
||||
}
|
||||
|
||||
// Metasploit :
|
||||
@@ -903,6 +930,9 @@ const (
|
||||
// UbuntuAPIMatchStr :
|
||||
UbuntuAPIMatchStr = "UbuntuAPIMatch"
|
||||
|
||||
// WindowsUpdateSearchStr :
|
||||
WindowsUpdateSearchStr = "WindowsUpdateSearch"
|
||||
|
||||
// TrivyMatchStr :
|
||||
TrivyMatchStr = "TrivyMatch"
|
||||
|
||||
@@ -941,6 +971,9 @@ var (
|
||||
// UbuntuAPIMatch ranking how confident the CVE-ID was detected correctly
|
||||
UbuntuAPIMatch = Confidence{100, UbuntuAPIMatchStr, 0}
|
||||
|
||||
// WindowsUpdateSearch ranking how confident the CVE-ID was detected correctly
|
||||
WindowsUpdateSearch = Confidence{100, WindowsUpdateSearchStr, 0}
|
||||
|
||||
// TrivyMatch ranking how confident the CVE-ID was detected correctly
|
||||
TrivyMatch = Confidence{100, TrivyMatchStr, 0}
|
||||
|
||||
|
||||
@@ -4,10 +4,12 @@
|
||||
package oval
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
)
|
||||
|
||||
// Alpine is the struct of Alpine Linux
|
||||
@@ -16,11 +18,12 @@ type Alpine struct {
|
||||
}
|
||||
|
||||
// NewAlpine creates OVAL client for SUSE
|
||||
func NewAlpine(cnf config.VulnDictInterface) Alpine {
|
||||
func NewAlpine(driver ovaldb.DB, baseURL string) Alpine {
|
||||
return Alpine{
|
||||
Base{
|
||||
family: constant.Alpine,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Alpine,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -28,23 +31,13 @@ func NewAlpine(cnf config.VulnDictInterface) Alpine {
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Alpine) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
|
||||
361
oval/debian.go
361
oval/debian.go
@@ -4,14 +4,12 @@
|
||||
package oval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -122,12 +120,13 @@ type Debian struct {
|
||||
}
|
||||
|
||||
// NewDebian creates OVAL client for Debian
|
||||
func NewDebian(cnf config.VulnDictInterface) Debian {
|
||||
func NewDebian(driver ovaldb.DB, baseURL string) Debian {
|
||||
return Debian{
|
||||
DebianBase{
|
||||
Base{
|
||||
family: constant.Debian,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Debian,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -141,35 +140,29 @@ func (o Debian) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
|
||||
// Add linux and set the version of running kernel to search OVAL.
|
||||
if r.Container.ContainerID == "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages[linuxImage]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
if r.RunningKernel.Version != "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages[linuxImage]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
}
|
||||
} else {
|
||||
logging.Log.Warnf("Since the exact kernel version is not available, the vulnerability in the linux package is not detected.")
|
||||
}
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -209,315 +202,19 @@ type Ubuntu struct {
|
||||
}
|
||||
|
||||
// NewUbuntu creates OVAL client for Debian
|
||||
func NewUbuntu(cnf config.VulnDictInterface) Ubuntu {
|
||||
func NewUbuntu(driver ovaldb.DB, baseURL string) Ubuntu {
|
||||
return Ubuntu{
|
||||
DebianBase{
|
||||
Base{
|
||||
family: constant.Ubuntu,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Ubuntu,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Ubuntu) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
switch util.Major(r.Release) {
|
||||
case "14":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-azure",
|
||||
"linux-lts-xenial",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-lts-xenial",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-lts-xenial",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "16":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-aws-hwe",
|
||||
"linux-azure",
|
||||
"linux-euclid",
|
||||
"linux-flo",
|
||||
"linux-gcp",
|
||||
"linux-gke",
|
||||
"linux-goldfish",
|
||||
"linux-hwe",
|
||||
"linux-kvm",
|
||||
"linux-mako",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-aws-hwe",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-hwe",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-raspi2",
|
||||
"linux-meta-snapdragon",
|
||||
"linux-oem",
|
||||
"linux-oracle",
|
||||
"linux-raspi2",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-hwe",
|
||||
"linux-signed-oracle",
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "18":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-aws-5.0",
|
||||
"linux-azure",
|
||||
"linux-gcp",
|
||||
"linux-gcp-5.3",
|
||||
"linux-gke-4.15",
|
||||
"linux-gke-5.0",
|
||||
"linux-gke-5.3",
|
||||
"linux-hwe",
|
||||
"linux-kvm",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-aws-5.0",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-gcp-5.3",
|
||||
"linux-meta-gke-4.15",
|
||||
"linux-meta-gke-5.0",
|
||||
"linux-meta-gke-5.3",
|
||||
"linux-meta-hwe",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oem",
|
||||
"linux-meta-oem-osp1",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-oracle-5.0",
|
||||
"linux-meta-oracle-5.3",
|
||||
"linux-meta-raspi2",
|
||||
"linux-meta-raspi2-5.3",
|
||||
"linux-meta-snapdragon",
|
||||
"linux-oem",
|
||||
"linux-oem-osp1",
|
||||
"linux-oracle",
|
||||
"linux-oracle-5.0",
|
||||
"linux-oracle-5.3",
|
||||
"linux-raspi2",
|
||||
"linux-raspi2-5.3",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-gcp-5.3",
|
||||
"linux-signed-gke-4.15",
|
||||
"linux-signed-gke-5.0",
|
||||
"linux-signed-gke-5.3",
|
||||
"linux-signed-hwe",
|
||||
"linux-signed-oem",
|
||||
"linux-signed-oem-osp1",
|
||||
"linux-signed-oracle",
|
||||
"linux-signed-oracle-5.0",
|
||||
"linux-signed-oracle-5.3",
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "20":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-azure",
|
||||
"linux-gcp",
|
||||
"linux-kvm",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oem-5.6",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-raspi",
|
||||
"linux-meta-riscv",
|
||||
"linux-oem-5.6",
|
||||
"linux-oracle",
|
||||
"linux-raspi",
|
||||
"linux-raspi2",
|
||||
"linux-riscv",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-oem-5.6",
|
||||
"linux-signed-oracle",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "21":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-base-sgx",
|
||||
"linux-base",
|
||||
"linux-cloud-tools-common",
|
||||
"linux-cloud-tools-generic",
|
||||
"linux-cloud-tools-lowlatency",
|
||||
"linux-cloud-tools-virtual",
|
||||
"linux-gcp",
|
||||
"linux-generic",
|
||||
"linux-gke",
|
||||
"linux-headers-aws",
|
||||
"linux-headers-gcp",
|
||||
"linux-headers-gke",
|
||||
"linux-headers-oracle",
|
||||
"linux-image-aws",
|
||||
"linux-image-extra-virtual",
|
||||
"linux-image-gcp",
|
||||
"linux-image-generic",
|
||||
"linux-image-gke",
|
||||
"linux-image-lowlatency",
|
||||
"linux-image-oracle",
|
||||
"linux-image-virtual",
|
||||
"linux-lowlatency",
|
||||
"linux-modules-extra-aws",
|
||||
"linux-modules-extra-gcp",
|
||||
"linux-modules-extra-gke",
|
||||
"linux-oracle",
|
||||
"linux-tools-aws",
|
||||
"linux-tools-common",
|
||||
"linux-tools-gcp",
|
||||
"linux-tools-generic",
|
||||
"linux-tools-gke",
|
||||
"linux-tools-host",
|
||||
"linux-tools-lowlatency",
|
||||
"linux-tools-oracle",
|
||||
"linux-tools-virtual",
|
||||
"linux-virtual",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
}
|
||||
return 0, fmt.Errorf("Ubuntu %s is not support for now", r.Release)
|
||||
}
|
||||
|
||||
func (o Ubuntu) fillWithOval(r *models.ScanResult, kernelNamesInOval []string) (nCVEs int, err error) {
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
runningKernelVersion := ""
|
||||
kernelPkgInOVAL := ""
|
||||
isOVALKernelPkgAdded := false
|
||||
unusedKernels := []models.Package{}
|
||||
copiedSourcePkgs := models.SrcPackages{}
|
||||
|
||||
if r.Container.ContainerID == "" {
|
||||
if v, ok := r.Packages[linuxImage]; ok {
|
||||
runningKernelVersion = v.Version
|
||||
} else {
|
||||
logging.Log.Warnf("Unable to detect vulns of running kernel because the version of the running kernel is unknown. server: %s",
|
||||
r.ServerName)
|
||||
}
|
||||
|
||||
for _, n := range kernelNamesInOval {
|
||||
if p, ok := r.Packages[n]; ok {
|
||||
kernelPkgInOVAL = p.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// remove unused kernels from packages to prevent detecting vulns of unused kernel
|
||||
for _, n := range kernelNamesInOval {
|
||||
if v, ok := r.Packages[n]; ok {
|
||||
unusedKernels = append(unusedKernels, v)
|
||||
delete(r.Packages, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove linux-* in order to detect only vulnerabilities in the running kernel.
|
||||
for n := range r.Packages {
|
||||
if n != kernelPkgInOVAL && strings.HasPrefix(n, "linux-") {
|
||||
unusedKernels = append(unusedKernels, r.Packages[n])
|
||||
delete(r.Packages, n)
|
||||
}
|
||||
}
|
||||
for srcPackName, srcPack := range r.SrcPackages {
|
||||
copiedSourcePkgs[srcPackName] = srcPack
|
||||
targetBinaryNames := []string{}
|
||||
for _, n := range srcPack.BinaryNames {
|
||||
if n == kernelPkgInOVAL || !strings.HasPrefix(n, "linux-") {
|
||||
targetBinaryNames = append(targetBinaryNames, n)
|
||||
}
|
||||
}
|
||||
srcPack.BinaryNames = targetBinaryNames
|
||||
r.SrcPackages[srcPackName] = srcPack
|
||||
}
|
||||
|
||||
if kernelPkgInOVAL == "" {
|
||||
logging.Log.Warnf("The OVAL name of the running kernel image %+v is not found. So vulns of `linux` wll be detected. server: %s",
|
||||
r.RunningKernel, r.ServerName)
|
||||
kernelPkgInOVAL = "linux"
|
||||
isOVALKernelPkgAdded = true
|
||||
}
|
||||
|
||||
if runningKernelVersion != "" {
|
||||
r.Packages[kernelPkgInOVAL] = models.Package{
|
||||
Name: kernelPkgInOVAL,
|
||||
Version: runningKernelVersion,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
if isOVALKernelPkgAdded {
|
||||
delete(r.Packages, kernelPkgInOVAL)
|
||||
}
|
||||
for _, p := range unusedKernels {
|
||||
r.Packages[p.Name] = p
|
||||
}
|
||||
r.SrcPackages = copiedSourcePkgs
|
||||
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
// Remove "linux" added above for searching oval
|
||||
// "linux" is not a real package name (key of affected packages in OVAL)
|
||||
if nfy, ok := defPacks.binpkgFixstat[kernelPkgInOVAL]; isOVALKernelPkgAdded && ok {
|
||||
defPacks.binpkgFixstat[linuxImage] = nfy
|
||||
delete(defPacks.binpkgFixstat, kernelPkgInOVAL)
|
||||
for i, p := range defPacks.def.AffectedPacks {
|
||||
if p.Name == kernelPkgInOVAL {
|
||||
p.Name = linuxImage
|
||||
defPacks.def.AffectedPacks[i] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
o.update(r, defPacks)
|
||||
}
|
||||
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if conts, ok := vuln.CveContents[models.Ubuntu]; ok {
|
||||
for i, cont := range conts {
|
||||
cont.SourceLink = "http://people.ubuntu.com/~ubuntu-security/cve/" + cont.CveID
|
||||
vuln.CveContents[models.Ubuntu][i] = cont
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(relatedDefs.entries), nil
|
||||
func (o Ubuntu) FillWithOval(_ *models.ScanResult) (nCVEs int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
126
oval/oval.go
126
oval/oval.go
@@ -5,15 +5,18 @@ package oval
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/vulsio/goval-dictionary/db"
|
||||
"golang.org/x/xerrors"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
)
|
||||
|
||||
// Client is the interface of OVAL client.
|
||||
@@ -21,49 +24,58 @@ type Client interface {
|
||||
FillWithOval(*models.ScanResult) (int, error)
|
||||
CheckIfOvalFetched(string, string) (bool, error)
|
||||
CheckIfOvalFresh(string, string) (bool, error)
|
||||
CloseDB() error
|
||||
}
|
||||
|
||||
// Base is a base struct
|
||||
type Base struct {
|
||||
family string
|
||||
Cnf config.VulnDictInterface
|
||||
driver ovaldb.DB
|
||||
baseURL string
|
||||
family string
|
||||
}
|
||||
|
||||
// CloseDB close a DB connection
|
||||
func (b Base) CloseDB() error {
|
||||
if b.driver == nil {
|
||||
return nil
|
||||
}
|
||||
return b.driver.CloseDB()
|
||||
}
|
||||
|
||||
// CheckIfOvalFetched checks if oval entries are in DB by family, release.
|
||||
func (b Base) CheckIfOvalFetched(osFamily, release string) (fetched bool, err error) {
|
||||
func (b Base) CheckIfOvalFetched(osFamily, release string) (bool, error) {
|
||||
ovalFamily, err := GetFamilyInOval(osFamily)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to GetFamilyInOval. err: %w", err)
|
||||
}
|
||||
if !b.Cnf.IsFetchViaHTTP() {
|
||||
driver, err := newOvalDB(b.Cnf)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
count, err := driver.CountDefs(ovalFamily, release)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to count OVAL defs: %s, %s, %w", ovalFamily, release, err)
|
||||
}
|
||||
logging.Log.Infof("OVAL %s %s found. defs: %d", osFamily, release, count)
|
||||
return 0 < count, nil
|
||||
if ovalFamily == "" {
|
||||
return false, nil
|
||||
}
|
||||
ovalRelease := release
|
||||
if osFamily == constant.CentOS {
|
||||
ovalRelease = strings.TrimPrefix(release, "stream")
|
||||
}
|
||||
|
||||
url, _ := util.URLPathJoin(config.Conf.OvalDict.URL, "count", ovalFamily, release)
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
var count int
|
||||
if b.driver == nil {
|
||||
url, err := util.URLPathJoin(b.baseURL, "count", ovalFamily, ovalRelease)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
if err := json.Unmarshal([]byte(body), &count); err != nil {
|
||||
return false, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
} else {
|
||||
count, err = b.driver.CountDefs(ovalFamily, ovalRelease)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to count OVAL defs: %s, %s, %w", ovalFamily, ovalRelease, err)
|
||||
}
|
||||
}
|
||||
count := 0
|
||||
if err := json.Unmarshal([]byte(body), &count); err != nil {
|
||||
return false, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
logging.Log.Infof("OVAL %s %s is fresh. defs: %d", osFamily, release, count)
|
||||
logging.Log.Infof("OVAL %s %s found. defs: %d", ovalFamily, ovalRelease, count)
|
||||
return 0 < count, nil
|
||||
}
|
||||
|
||||
@@ -71,64 +83,62 @@ func (b Base) CheckIfOvalFetched(osFamily, release string) (fetched bool, err er
|
||||
func (b Base) CheckIfOvalFresh(osFamily, release string) (ok bool, err error) {
|
||||
ovalFamily, err := GetFamilyInOval(osFamily)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to GetFamilyInOval. err: %w", err)
|
||||
}
|
||||
if ovalFamily == "" {
|
||||
return false, nil
|
||||
}
|
||||
ovalRelease := release
|
||||
if osFamily == constant.CentOS {
|
||||
ovalRelease = strings.TrimPrefix(release, "stream")
|
||||
}
|
||||
|
||||
var lastModified time.Time
|
||||
if !b.Cnf.IsFetchViaHTTP() {
|
||||
driver, err := newOvalDB(b.Cnf)
|
||||
if b.driver == nil {
|
||||
url, err := util.URLPathJoin(b.baseURL, "lastmodified", ovalFamily, ovalRelease)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
lastModified, err = driver.GetLastModified(ovalFamily, release)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to GetLastModified: %w", err)
|
||||
}
|
||||
} else {
|
||||
url, _ := util.URLPathJoin(config.Conf.OvalDict.URL, "lastmodified", ovalFamily, release)
|
||||
resp, body, errs := gorequest.New().Timeout(10 * time.Second).Get(url).End()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return false, xerrors.Errorf("HTTP GET error, url: %s, resp: %v, err: %+v", url, resp, errs)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(body), &lastModified); err != nil {
|
||||
return false, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
} else {
|
||||
lastModified, err = b.driver.GetLastModified(ovalFamily, ovalRelease)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to GetLastModified: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
since := time.Now()
|
||||
since = since.AddDate(0, 0, -3)
|
||||
if lastModified.Before(since) {
|
||||
logging.Log.Warnf("OVAL for %s %s is old, last modified is %s. It's recommended to update OVAL to improve scanning accuracy. How to update OVAL database, see https://github.com/vulsio/goval-dictionary#usage",
|
||||
osFamily, release, lastModified)
|
||||
ovalFamily, ovalRelease, lastModified)
|
||||
return false, nil
|
||||
}
|
||||
logging.Log.Infof("OVAL %s %s is fresh. lastModified: %s", osFamily, release, lastModified.Format(time.RFC3339))
|
||||
logging.Log.Infof("OVAL %s %s is fresh. lastModified: %s", ovalFamily, ovalRelease, lastModified.Format(time.RFC3339))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// NewOvalDB returns oval db client
|
||||
func newOvalDB(cnf config.VulnDictInterface) (driver db.DB, err error) {
|
||||
func newOvalDB(cnf config.VulnDictInterface) (ovaldb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
path := cnf.GetURL()
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
|
||||
driver, locked, err := db.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), db.Option{})
|
||||
driver, locked, err := ovaldb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ovaldb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
err = xerrors.Errorf("SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
return nil, xerrors.Errorf("Failed to init OVAL DB. SQLite3: %s is locked. err: %w, ", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
err = xerrors.Errorf("Failed to new OVAL DB. err: %w", err)
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("Failed to init OVAL DB. DB Path: %s, err: %w", path, err)
|
||||
}
|
||||
return driver, nil
|
||||
}
|
||||
|
||||
24
oval/pseudo.go
Normal file
24
oval/pseudo.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package oval
|
||||
|
||||
import "github.com/future-architect/vuls/models"
|
||||
|
||||
// Pseudo is OVAL client for Windows, FreeBSD and Pseudo
|
||||
type Pseudo struct {
|
||||
Base
|
||||
}
|
||||
|
||||
// NewPseudo creates OVAL client for Windows, FreeBSD and Pseudo
|
||||
func NewPseudo(family string) Pseudo {
|
||||
return Pseudo{
|
||||
Base{
|
||||
driver: nil,
|
||||
baseURL: "",
|
||||
family: family,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// FillWithOval is a mock function for operating systems that do not use OVAL
|
||||
func (pse Pseudo) FillWithOval(_ *models.ScanResult) (int, error) {
|
||||
return 0, nil
|
||||
}
|
||||
134
oval/redhat.go
134
oval/redhat.go
@@ -5,17 +5,18 @@ package oval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
// RedHatBase is the base struct for RedHat, CentOS, Alma and Rocky
|
||||
// RedHatBase is the base struct for RedHat, CentOS, Alma, Rocky and Fedora
|
||||
type RedHatBase struct {
|
||||
Base
|
||||
}
|
||||
@@ -23,23 +24,13 @@ type RedHatBase struct {
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o RedHatBase) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,6 +48,15 @@ func (o RedHatBase) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
vuln.CveContents[models.RedHat][i] = cont
|
||||
}
|
||||
}
|
||||
case models.Fedora:
|
||||
for _, d := range vuln.DistroAdvisories {
|
||||
if conts, ok := vuln.CveContents[models.Fedora]; ok {
|
||||
for i, cont := range conts {
|
||||
cont.SourceLink = "https://bodhi.fedoraproject.org/updates/" + d.AdvisoryID
|
||||
vuln.CveContents[models.Fedora][i] = cont
|
||||
}
|
||||
}
|
||||
}
|
||||
case models.Oracle:
|
||||
if conts, ok := vuln.CveContents[models.Oracle]; ok {
|
||||
for i, cont := range conts {
|
||||
@@ -216,8 +216,8 @@ func (o RedHatBase) convertToModel(cveID string, def *ovalmodels.Definition) *mo
|
||||
continue
|
||||
}
|
||||
|
||||
score2, vec2 := o.parseCvss2(cve.Cvss2)
|
||||
score3, vec3 := o.parseCvss3(cve.Cvss3)
|
||||
score2, vec2 := parseCvss2(cve.Cvss2)
|
||||
score3, vec3 := parseCvss3(cve.Cvss3)
|
||||
|
||||
sev2, sev3, severity := "", "", def.Advisory.Severity
|
||||
if cve.Impact != "" {
|
||||
@@ -253,51 +253,19 @@ func (o RedHatBase) convertToModel(cveID string, def *ovalmodels.Definition) *mo
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParseCvss2 divide CVSSv2 string into score and vector
|
||||
// 5/AV:N/AC:L/Au:N/C:N/I:N/A:P
|
||||
func (o RedHatBase) parseCvss2(scoreVector string) (score float64, vector string) {
|
||||
var err error
|
||||
ss := strings.Split(scoreVector, "/")
|
||||
if 1 < len(ss) {
|
||||
if score, err = strconv.ParseFloat(ss[0], 64); err != nil {
|
||||
return 0, ""
|
||||
}
|
||||
return score, strings.Join(ss[1:], "/")
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// ParseCvss3 divide CVSSv3 string into score and vector
|
||||
// 5.6/CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L
|
||||
func (o RedHatBase) parseCvss3(scoreVector string) (score float64, vector string) {
|
||||
var err error
|
||||
for _, s := range []string{
|
||||
"/CVSS:3.0/",
|
||||
"/CVSS:3.1/",
|
||||
} {
|
||||
ss := strings.Split(scoreVector, s)
|
||||
if 1 < len(ss) {
|
||||
if score, err = strconv.ParseFloat(ss[0], 64); err != nil {
|
||||
return 0, ""
|
||||
}
|
||||
return score, strings.TrimPrefix(s, "/") + ss[1]
|
||||
}
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// RedHat is the interface for RedhatBase OVAL
|
||||
type RedHat struct {
|
||||
RedHatBase
|
||||
}
|
||||
|
||||
// NewRedhat creates OVAL client for Redhat
|
||||
func NewRedhat(cnf config.VulnDictInterface) RedHat {
|
||||
func NewRedhat(driver ovaldb.DB, baseURL string) RedHat {
|
||||
return RedHat{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.RedHat,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.RedHat,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -309,12 +277,13 @@ type CentOS struct {
|
||||
}
|
||||
|
||||
// NewCentOS creates OVAL client for CentOS
|
||||
func NewCentOS(cnf config.VulnDictInterface) CentOS {
|
||||
func NewCentOS(driver ovaldb.DB, baseURL string) CentOS {
|
||||
return CentOS{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.CentOS,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.CentOS,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -326,12 +295,13 @@ type Oracle struct {
|
||||
}
|
||||
|
||||
// NewOracle creates OVAL client for Oracle
|
||||
func NewOracle(cnf config.VulnDictInterface) Oracle {
|
||||
func NewOracle(driver ovaldb.DB, baseURL string) Oracle {
|
||||
return Oracle{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.Oracle,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Oracle,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -344,12 +314,13 @@ type Amazon struct {
|
||||
}
|
||||
|
||||
// NewAmazon creates OVAL client for Amazon Linux
|
||||
func NewAmazon(cnf config.VulnDictInterface) Amazon {
|
||||
func NewAmazon(driver ovaldb.DB, baseURL string) Amazon {
|
||||
return Amazon{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.Amazon,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Amazon,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -362,12 +333,13 @@ type Alma struct {
|
||||
}
|
||||
|
||||
// NewAlma creates OVAL client for Alma Linux
|
||||
func NewAlma(cnf config.VulnDictInterface) Alma {
|
||||
func NewAlma(driver ovaldb.DB, baseURL string) Alma {
|
||||
return Alma{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.Alma,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Alma,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -380,12 +352,32 @@ type Rocky struct {
|
||||
}
|
||||
|
||||
// NewRocky creates OVAL client for Rocky Linux
|
||||
func NewRocky(cnf config.VulnDictInterface) Rocky {
|
||||
func NewRocky(driver ovaldb.DB, baseURL string) Rocky {
|
||||
return Rocky{
|
||||
RedHatBase{
|
||||
Base{
|
||||
family: constant.Rocky,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Rocky,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Fedora is the interface for RedhatBase OVAL
|
||||
type Fedora struct {
|
||||
// Base
|
||||
RedHatBase
|
||||
}
|
||||
|
||||
// NewFedora creates OVAL client for Fedora Linux
|
||||
func NewFedora(driver ovaldb.DB, baseURL string) Fedora {
|
||||
return Fedora{
|
||||
RedHatBase{
|
||||
Base{
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: constant.Fedora,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -11,79 +11,6 @@ import (
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
func TestParseCvss2(t *testing.T) {
|
||||
type out struct {
|
||||
score float64
|
||||
vector string
|
||||
}
|
||||
var tests = []struct {
|
||||
in string
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: "5/AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
out: out{
|
||||
score: 5.0,
|
||||
vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "",
|
||||
out: out{
|
||||
score: 0,
|
||||
vector: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, v := RedHatBase{}.parseCvss2(tt.in)
|
||||
if s != tt.out.score || v != tt.out.vector {
|
||||
t.Errorf("\nexpected: %f, %s\n actual: %f, %s",
|
||||
tt.out.score, tt.out.vector, s, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCvss3(t *testing.T) {
|
||||
type out struct {
|
||||
score float64
|
||||
vector string
|
||||
}
|
||||
var tests = []struct {
|
||||
in string
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: "5.6/CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
out: out{
|
||||
score: 5.6,
|
||||
vector: "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "6.1/CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
out: out{
|
||||
score: 6.1,
|
||||
vector: "CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "",
|
||||
out: out{
|
||||
score: 0,
|
||||
vector: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, v := RedHatBase{}.parseCvss3(tt.in)
|
||||
if s != tt.out.score || v != tt.out.vector {
|
||||
t.Errorf("\nexpected: %f, %s\n actual: %f, %s",
|
||||
tt.out.score, tt.out.vector, s, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPackNamesOfUpdate(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in models.ScanResult
|
||||
|
||||
76
oval/suse.go
76
oval/suse.go
@@ -4,10 +4,13 @@
|
||||
package oval
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
@@ -17,12 +20,13 @@ type SUSE struct {
|
||||
}
|
||||
|
||||
// NewSUSE creates OVAL client for SUSE
|
||||
func NewSUSE(cnf config.VulnDictInterface) SUSE {
|
||||
func NewSUSE(driver ovaldb.DB, baseURL, family string) SUSE {
|
||||
// TODO implement other family
|
||||
return SUSE{
|
||||
Base{
|
||||
family: constant.SUSEEnterpriseServer,
|
||||
Cnf: cnf,
|
||||
driver: driver,
|
||||
baseURL: baseURL,
|
||||
family: family,
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -30,23 +34,13 @@ func NewSUSE(cnf config.VulnDictInterface) SUSE {
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o SUSE) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
var relatedDefs ovalResult
|
||||
if o.Cnf.IsFetchViaHTTP() {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.Cnf.GetURL()); err != nil {
|
||||
return 0, err
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
driver, err := newOvalDB(o.Cnf)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer func() {
|
||||
if err := driver.CloseDB(); err != nil {
|
||||
logging.Log.Errorf("Failed to close DB. err: %+v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(driver, r); err != nil {
|
||||
return 0, err
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
@@ -56,7 +50,7 @@ func (o SUSE) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if conts, ok := vuln.CveContents[models.SUSE]; ok {
|
||||
for i, cont := range conts {
|
||||
cont.SourceLink = "https://security-tracker.debian.org/tracker/" + cont.CveID
|
||||
cont.SourceLink = fmt.Sprintf("https://www.suse.com/security/cve/%s.html", cont.CveID)
|
||||
vuln.CveContents[models.SUSE][i] = cont
|
||||
}
|
||||
}
|
||||
@@ -65,27 +59,30 @@ func (o SUSE) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
}
|
||||
|
||||
func (o SUSE) update(r *models.ScanResult, defpacks defPacks) {
|
||||
ovalContent := *o.convertToModel(&defpacks.def)
|
||||
ovalContent := o.convertToModel(&defpacks.def)
|
||||
if ovalContent == nil {
|
||||
return
|
||||
}
|
||||
ovalContent.Type = models.NewCveContentType(o.family)
|
||||
vinfo, ok := r.ScannedCves[defpacks.def.Title]
|
||||
vinfo, ok := r.ScannedCves[ovalContent.CveID]
|
||||
if !ok {
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", defpacks.def.Title)
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", ovalContent.CveID)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: defpacks.def.Title,
|
||||
CveID: ovalContent.CveID,
|
||||
Confidences: models.Confidences{models.OvalMatch},
|
||||
CveContents: models.NewCveContents(ovalContent),
|
||||
CveContents: models.NewCveContents(*ovalContent),
|
||||
}
|
||||
} else {
|
||||
cveContents := vinfo.CveContents
|
||||
ctype := models.NewCveContentType(o.family)
|
||||
if _, ok := vinfo.CveContents[ctype]; ok {
|
||||
logging.Log.Debugf("%s OVAL will be overwritten", defpacks.def.Title)
|
||||
logging.Log.Debugf("%s OVAL will be overwritten", ovalContent.CveID)
|
||||
} else {
|
||||
logging.Log.Debugf("%s is also detected by OVAL", defpacks.def.Title)
|
||||
logging.Log.Debugf("%s is also detected by OVAL", ovalContent.CveID)
|
||||
cveContents = models.CveContents{}
|
||||
}
|
||||
vinfo.Confidences.AppendIfMissing(models.OvalMatch)
|
||||
cveContents[ctype] = []models.CveContent{ovalContent}
|
||||
cveContents[ctype] = []models.CveContent{*ovalContent}
|
||||
vinfo.CveContents = cveContents
|
||||
}
|
||||
|
||||
@@ -105,10 +102,15 @@ func (o SUSE) update(r *models.ScanResult, defpacks defPacks) {
|
||||
}
|
||||
vinfo.AffectedPackages = collectBinpkgFixstat.toPackStatuses()
|
||||
vinfo.AffectedPackages.Sort()
|
||||
r.ScannedCves[defpacks.def.Title] = vinfo
|
||||
r.ScannedCves[ovalContent.CveID] = vinfo
|
||||
}
|
||||
|
||||
func (o SUSE) convertToModel(def *ovalmodels.Definition) *models.CveContent {
|
||||
if len(def.Advisory.Cves) != 1 {
|
||||
logging.Log.Warnf("Unknown Oval format. Please register the issue as it needs to be investigated. https://github.com/vulsio/goval-dictionary/issues family: %s, defID: %s", o.family, def.DefinitionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
refs := []models.Reference{}
|
||||
for _, r := range def.References {
|
||||
refs = append(refs, models.Reference{
|
||||
@@ -117,11 +119,15 @@ func (o SUSE) convertToModel(def *ovalmodels.Definition) *models.CveContent {
|
||||
RefID: r.RefID,
|
||||
})
|
||||
}
|
||||
|
||||
cve := def.Advisory.Cves[0]
|
||||
score3, vec3 := parseCvss3(cve.Cvss3)
|
||||
return &models.CveContent{
|
||||
CveID: def.Title,
|
||||
Title: def.Title,
|
||||
Summary: def.Description,
|
||||
References: refs,
|
||||
Title: def.Title,
|
||||
Summary: def.Description,
|
||||
CveID: cve.CveID,
|
||||
Cvss3Score: score3,
|
||||
Cvss3Vector: vec3,
|
||||
Cvss3Severity: cve.Impact,
|
||||
References: refs,
|
||||
}
|
||||
}
|
||||
|
||||
243
oval/util.go
243
oval/util.go
@@ -5,25 +5,29 @@ package oval
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
apkver "github.com/knqyf263/go-apk-version"
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
rpmver "github.com/knqyf263/go-rpm-version"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
apkver "github.com/knqyf263/go-apk-version"
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
rpmver "github.com/knqyf263/go-rpm-version"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"github.com/vulsio/goval-dictionary/db"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
ovallog "github.com/vulsio/goval-dictionary/log"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type ovalResult struct {
|
||||
@@ -89,6 +93,7 @@ type request struct {
|
||||
binaryPackNames []string
|
||||
isSrcPack bool
|
||||
modularityLabel string // RHEL 8 or later only
|
||||
repository string // Amazon Linux 2 Only
|
||||
}
|
||||
|
||||
type response struct {
|
||||
@@ -98,6 +103,24 @@ type response struct {
|
||||
|
||||
// getDefsByPackNameViaHTTP fetches OVAL information via HTTP
|
||||
func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ovalResult, err error) {
|
||||
ovalFamily, err := GetFamilyInOval(r.Family)
|
||||
if err != nil {
|
||||
return relatedDefs, xerrors.Errorf("Failed to GetFamilyInOval. err: %w", err)
|
||||
}
|
||||
ovalRelease := r.Release
|
||||
switch r.Family {
|
||||
case constant.CentOS:
|
||||
ovalRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(r.Release)[0] {
|
||||
case "2022":
|
||||
ovalRelease = "2022"
|
||||
case "2":
|
||||
ovalRelease = "2"
|
||||
default:
|
||||
ovalRelease = "1"
|
||||
}
|
||||
}
|
||||
|
||||
nReq := len(r.Packages) + len(r.SrcPackages)
|
||||
reqChan := make(chan request, nReq)
|
||||
@@ -109,13 +132,18 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ova
|
||||
|
||||
go func() {
|
||||
for _, pack := range r.Packages {
|
||||
reqChan <- request{
|
||||
req := request{
|
||||
packName: pack.Name,
|
||||
versionRelease: pack.FormatVer(),
|
||||
newVersionRelease: pack.FormatVer(),
|
||||
newVersionRelease: pack.FormatNewVer(),
|
||||
isSrcPack: false,
|
||||
arch: pack.Arch,
|
||||
repository: pack.Repository,
|
||||
}
|
||||
if ovalFamily == constant.Amazon && ovalRelease == "2" && req.repository == "" {
|
||||
req.repository = "amzn2-core"
|
||||
}
|
||||
reqChan <- req
|
||||
}
|
||||
for _, pack := range r.SrcPackages {
|
||||
reqChan <- request{
|
||||
@@ -137,8 +165,8 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ova
|
||||
url, err := util.URLPathJoin(
|
||||
url,
|
||||
"packs",
|
||||
r.Family,
|
||||
r.Release,
|
||||
ovalFamily,
|
||||
ovalRelease,
|
||||
req.packName,
|
||||
)
|
||||
if err != nil {
|
||||
@@ -157,7 +185,7 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ova
|
||||
select {
|
||||
case res := <-resChan:
|
||||
for _, def := range res.defs {
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(def, res.request, r.Family, r.RunningKernel, r.EnabledDnfModules)
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(def, res.request, ovalFamily, ovalRelease, r.RunningKernel, r.EnabledDnfModules)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
continue
|
||||
@@ -236,16 +264,40 @@ func httpGet(url string, req request, resChan chan<- response, errChan chan<- er
|
||||
}
|
||||
}
|
||||
|
||||
func getDefsByPackNameFromOvalDB(driver db.DB, r *models.ScanResult) (relatedDefs ovalResult, err error) {
|
||||
func getDefsByPackNameFromOvalDB(r *models.ScanResult, driver ovaldb.DB) (relatedDefs ovalResult, err error) {
|
||||
ovalFamily, err := GetFamilyInOval(r.Family)
|
||||
if err != nil {
|
||||
return relatedDefs, xerrors.Errorf("Failed to GetFamilyInOval. err: %w", err)
|
||||
}
|
||||
ovalRelease := r.Release
|
||||
switch r.Family {
|
||||
case constant.CentOS:
|
||||
ovalRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(r.Release)[0] {
|
||||
case "2022":
|
||||
ovalRelease = "2022"
|
||||
case "2":
|
||||
ovalRelease = "2"
|
||||
default:
|
||||
ovalRelease = "1"
|
||||
}
|
||||
}
|
||||
|
||||
requests := []request{}
|
||||
for _, pack := range r.Packages {
|
||||
requests = append(requests, request{
|
||||
req := request{
|
||||
packName: pack.Name,
|
||||
versionRelease: pack.FormatVer(),
|
||||
newVersionRelease: pack.FormatNewVer(),
|
||||
arch: pack.Arch,
|
||||
repository: pack.Repository,
|
||||
isSrcPack: false,
|
||||
})
|
||||
}
|
||||
if ovalFamily == constant.Amazon && ovalRelease == "2" && req.repository == "" {
|
||||
req.repository = "amzn2-core"
|
||||
}
|
||||
requests = append(requests, req)
|
||||
}
|
||||
for _, pack := range r.SrcPackages {
|
||||
requests = append(requests, request{
|
||||
@@ -256,19 +308,13 @@ func getDefsByPackNameFromOvalDB(driver db.DB, r *models.ScanResult) (relatedDef
|
||||
isSrcPack: true,
|
||||
})
|
||||
}
|
||||
|
||||
ovalFamily, err := GetFamilyInOval(r.Family)
|
||||
if err != nil {
|
||||
return relatedDefs, err
|
||||
}
|
||||
|
||||
for _, req := range requests {
|
||||
definitions, err := driver.GetByPackName(ovalFamily, r.Release, req.packName, req.arch)
|
||||
definitions, err := driver.GetByPackName(ovalFamily, ovalRelease, req.packName, req.arch)
|
||||
if err != nil {
|
||||
return relatedDefs, xerrors.Errorf("Failed to get %s OVAL info by package: %#v, err: %w", r.Family, req, err)
|
||||
}
|
||||
for _, def := range definitions {
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(def, req, ovalFamily, r.RunningKernel, r.EnabledDnfModules)
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(def, req, ovalFamily, ovalRelease, r.RunningKernel, r.EnabledDnfModules)
|
||||
if err != nil {
|
||||
return relatedDefs, xerrors.Errorf("Failed to exec isOvalAffected. err: %w", err)
|
||||
}
|
||||
@@ -298,16 +344,27 @@ func getDefsByPackNameFromOvalDB(driver db.DB, r *models.ScanResult) (relatedDef
|
||||
return
|
||||
}
|
||||
|
||||
func isOvalDefAffected(def ovalmodels.Definition, req request, family string, running models.Kernel, enabledMods []string) (affected, notFixedYet bool, fixedIn string, err error) {
|
||||
var modularVersionPattern = regexp.MustCompile(`.+\.module(?:\+el|_f)\d{1,2}.*`)
|
||||
|
||||
func isOvalDefAffected(def ovalmodels.Definition, req request, family, release string, running models.Kernel, enabledMods []string) (affected, notFixedYet bool, fixedIn string, err error) {
|
||||
if family == constant.Amazon && release == "2" {
|
||||
if def.Advisory.AffectedRepository == "" {
|
||||
def.Advisory.AffectedRepository = "amzn2-core"
|
||||
}
|
||||
if req.repository != def.Advisory.AffectedRepository {
|
||||
return false, false, "", nil
|
||||
}
|
||||
}
|
||||
|
||||
for _, ovalPack := range def.AffectedPacks {
|
||||
if req.packName != ovalPack.Name {
|
||||
continue
|
||||
}
|
||||
|
||||
switch family {
|
||||
case constant.Oracle, constant.Amazon:
|
||||
case constant.Oracle, constant.Amazon, constant.Fedora:
|
||||
if ovalPack.Arch == "" {
|
||||
logging.Log.Infof("Arch is needed to detect Vulns for Amazon and Oracle Linux, but empty. You need refresh OVAL maybe. oval: %#v, defID: %s", ovalPack, def.DefinitionID)
|
||||
logging.Log.Infof("Arch is needed to detect Vulns for Amazon Linux, Oracle Linux and Fedora, but empty. You need refresh OVAL maybe. oval: %#v, defID: %s", ovalPack, def.DefinitionID)
|
||||
continue
|
||||
}
|
||||
}
|
||||
@@ -321,10 +378,24 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
continue
|
||||
}
|
||||
|
||||
// There is a modular package and a non-modular package with the same name. (e.g. fedora 35 community-mysql)
|
||||
if ovalPack.ModularityLabel == "" && modularVersionPattern.MatchString(req.versionRelease) {
|
||||
continue
|
||||
} else if ovalPack.ModularityLabel != "" && !modularVersionPattern.MatchString(req.versionRelease) {
|
||||
continue
|
||||
}
|
||||
|
||||
isModularityLabelEmptyOrSame := false
|
||||
if ovalPack.ModularityLabel != "" {
|
||||
// expect ovalPack.ModularityLabel e.g. RedHat: nginx:1.16, Fedora: mysql:8.0:3520211031142409:f27b74a8
|
||||
ss := strings.Split(ovalPack.ModularityLabel, ":")
|
||||
if len(ss) < 2 {
|
||||
logging.Log.Warnf("Invalid modularitylabel format in oval package. Maybe it is necessary to fix modularitylabel of goval-dictionary. expected: ${name}:${stream}(:${version}:${context}:${arch}), actual: %s", ovalPack.ModularityLabel)
|
||||
continue
|
||||
}
|
||||
modularityNameStreamLabel := fmt.Sprintf("%s:%s", ss[0], ss[1])
|
||||
for _, mod := range enabledMods {
|
||||
if mod == ovalPack.ModularityLabel {
|
||||
if mod == modularityNameStreamLabel {
|
||||
isModularityLabelEmptyOrSame = true
|
||||
break
|
||||
}
|
||||
@@ -338,7 +409,7 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
|
||||
if running.Release != "" {
|
||||
switch family {
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky, constant.Oracle:
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky, constant.Oracle, constant.Fedora:
|
||||
// For kernel related packages, ignore OVAL information with different major versions
|
||||
if _, ok := kernelRelatedPackNames[ovalPack.Name]; ok {
|
||||
if util.Major(ovalPack.Version) != util.Major(running.Release) {
|
||||
@@ -368,12 +439,16 @@ func isOvalDefAffected(def ovalmodels.Definition, req request, family string, ru
|
||||
// If the version of installed is less than in OVAL
|
||||
switch family {
|
||||
case constant.RedHat,
|
||||
constant.Fedora,
|
||||
constant.Amazon,
|
||||
constant.Oracle,
|
||||
constant.OpenSUSE,
|
||||
constant.OpenSUSELeap,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.SUSEEnterpriseDesktop,
|
||||
constant.Debian,
|
||||
constant.Ubuntu,
|
||||
constant.Raspbian,
|
||||
constant.Oracle:
|
||||
constant.Ubuntu:
|
||||
// Use fixed state in OVAL for these distros.
|
||||
return true, false, ovalPack.Version, nil
|
||||
}
|
||||
@@ -409,28 +484,32 @@ func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error
|
||||
constant.Raspbian:
|
||||
vera, err := debver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", newVer, err)
|
||||
}
|
||||
verb, err := debver.NewVersion(packInOVAL.Version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", packInOVAL.Version, err)
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
case constant.Alpine:
|
||||
vera, err := apkver.NewVersion(newVer)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", newVer, err)
|
||||
}
|
||||
verb, err := apkver.NewVersion(packInOVAL.Version)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", packInOVAL.Version, err)
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
case constant.Oracle,
|
||||
constant.OpenSUSE,
|
||||
constant.OpenSUSELeap,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.Amazon:
|
||||
constant.SUSEEnterpriseDesktop,
|
||||
constant.Amazon,
|
||||
constant.Fedora:
|
||||
vera := rpmver.NewVersion(newVer)
|
||||
verb := rpmver.NewVersion(packInOVAL.Version)
|
||||
return vera.LessThan(verb), nil
|
||||
@@ -439,8 +518,8 @@ func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error
|
||||
constant.CentOS,
|
||||
constant.Alma,
|
||||
constant.Rocky:
|
||||
vera := rpmver.NewVersion(rhelDownStreamOSVersionToRHEL(newVer))
|
||||
verb := rpmver.NewVersion(rhelDownStreamOSVersionToRHEL(packInOVAL.Version))
|
||||
vera := rpmver.NewVersion(rhelRebuildOSVersionToRHEL(newVer))
|
||||
verb := rpmver.NewVersion(rhelRebuildOSVersionToRHEL(packInOVAL.Version))
|
||||
return vera.LessThan(verb), nil
|
||||
|
||||
default:
|
||||
@@ -448,40 +527,56 @@ func lessThan(family, newVer string, packInOVAL ovalmodels.Package) (bool, error
|
||||
}
|
||||
}
|
||||
|
||||
var rhelDownStreamOSVerPattern = regexp.MustCompile(`\.[es]l(\d+)(?:_\d+)?(?:\.(centos|rocky|alma))?`)
|
||||
var rhelRebuildOSVerPattern = regexp.MustCompile(`\.[es]l(\d+)(?:_\d+)?(?:\.(centos|rocky|alma))?`)
|
||||
|
||||
func rhelDownStreamOSVersionToRHEL(ver string) string {
|
||||
return rhelDownStreamOSVerPattern.ReplaceAllString(ver, ".el$1")
|
||||
func rhelRebuildOSVersionToRHEL(ver string) string {
|
||||
return rhelRebuildOSVerPattern.ReplaceAllString(ver, ".el$1")
|
||||
}
|
||||
|
||||
// NewOVALClient returns a client for OVAL database
|
||||
func NewOVALClient(family string, cnf config.GovalDictConf) (Client, error) {
|
||||
func NewOVALClient(family string, cnf config.GovalDictConf, o logging.LogOpts) (Client, error) {
|
||||
if err := ovallog.SetLogger(o.LogToFile, o.LogDir, o.Debug, o.LogJSON); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to set goval-dictionary logger. err: %w", err)
|
||||
}
|
||||
|
||||
driver, err := newOvalDB(&cnf)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to newOvalDB. err: %w", err)
|
||||
}
|
||||
|
||||
switch family {
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return NewDebian(&cnf), nil
|
||||
return NewDebian(driver, cnf.GetURL()), nil
|
||||
case constant.Ubuntu:
|
||||
return NewUbuntu(&cnf), nil
|
||||
return NewUbuntu(driver, cnf.GetURL()), nil
|
||||
case constant.RedHat:
|
||||
return NewRedhat(&cnf), nil
|
||||
return NewRedhat(driver, cnf.GetURL()), nil
|
||||
case constant.CentOS:
|
||||
return NewCentOS(&cnf), nil
|
||||
return NewCentOS(driver, cnf.GetURL()), nil
|
||||
case constant.Alma:
|
||||
return NewAlma(&cnf), nil
|
||||
return NewAlma(driver, cnf.GetURL()), nil
|
||||
case constant.Rocky:
|
||||
return NewRocky(&cnf), nil
|
||||
return NewRocky(driver, cnf.GetURL()), nil
|
||||
case constant.Oracle:
|
||||
return NewOracle(&cnf), nil
|
||||
return NewOracle(driver, cnf.GetURL()), nil
|
||||
case constant.OpenSUSE:
|
||||
return NewSUSE(driver, cnf.GetURL(), constant.OpenSUSE), nil
|
||||
case constant.OpenSUSELeap:
|
||||
return NewSUSE(driver, cnf.GetURL(), constant.OpenSUSELeap), nil
|
||||
case constant.SUSEEnterpriseServer:
|
||||
// TODO other suse family
|
||||
return NewSUSE(&cnf), nil
|
||||
return NewSUSE(driver, cnf.GetURL(), constant.SUSEEnterpriseServer), nil
|
||||
case constant.SUSEEnterpriseDesktop:
|
||||
return NewSUSE(driver, cnf.GetURL(), constant.SUSEEnterpriseDesktop), nil
|
||||
case constant.Alpine:
|
||||
return NewAlpine(&cnf), nil
|
||||
return NewAlpine(driver, cnf.GetURL()), nil
|
||||
case constant.Amazon:
|
||||
return NewAmazon(&cnf), nil
|
||||
return NewAmazon(driver, cnf.GetURL()), nil
|
||||
case constant.Fedora:
|
||||
return NewFedora(driver, cnf.GetURL()), nil
|
||||
case constant.FreeBSD, constant.Windows:
|
||||
return nil, nil
|
||||
return NewPseudo(family), nil
|
||||
case constant.ServerTypePseudo:
|
||||
return nil, nil
|
||||
return NewPseudo(family), nil
|
||||
default:
|
||||
if family == "" {
|
||||
return nil, xerrors.New("Probably an error occurred during scanning. Check the error message")
|
||||
@@ -500,11 +595,18 @@ func GetFamilyInOval(familyInScanResult string) (string, error) {
|
||||
return constant.Ubuntu, nil
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky:
|
||||
return constant.RedHat, nil
|
||||
case constant.Fedora:
|
||||
return constant.Fedora, nil
|
||||
case constant.Oracle:
|
||||
return constant.Oracle, nil
|
||||
case constant.OpenSUSE:
|
||||
return constant.OpenSUSE, nil
|
||||
case constant.OpenSUSELeap:
|
||||
return constant.OpenSUSELeap, nil
|
||||
case constant.SUSEEnterpriseServer:
|
||||
// TODO other suse family
|
||||
return constant.SUSEEnterpriseServer, nil
|
||||
case constant.SUSEEnterpriseDesktop:
|
||||
return constant.SUSEEnterpriseDesktop, nil
|
||||
case constant.Alpine:
|
||||
return constant.Alpine, nil
|
||||
case constant.Amazon:
|
||||
@@ -521,3 +623,36 @@ func GetFamilyInOval(familyInScanResult string) (string, error) {
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// ParseCvss2 divide CVSSv2 string into score and vector
|
||||
// 5/AV:N/AC:L/Au:N/C:N/I:N/A:P
|
||||
func parseCvss2(scoreVector string) (score float64, vector string) {
|
||||
var err error
|
||||
ss := strings.Split(scoreVector, "/")
|
||||
if 1 < len(ss) {
|
||||
if score, err = strconv.ParseFloat(ss[0], 64); err != nil {
|
||||
return 0, ""
|
||||
}
|
||||
return score, strings.Join(ss[1:], "/")
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
// ParseCvss3 divide CVSSv3 string into score and vector
|
||||
// 5.6/CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L
|
||||
func parseCvss3(scoreVector string) (score float64, vector string) {
|
||||
var err error
|
||||
for _, s := range []string{
|
||||
"/CVSS:3.0/",
|
||||
"/CVSS:3.1/",
|
||||
} {
|
||||
ss := strings.Split(scoreVector, s)
|
||||
if 1 < len(ss) {
|
||||
if score, err = strconv.ParseFloat(ss[0], 64); err != nil {
|
||||
return 0, ""
|
||||
}
|
||||
return score, strings.TrimPrefix(s, "/") + ss[1]
|
||||
}
|
||||
}
|
||||
return 0, ""
|
||||
}
|
||||
|
||||
@@ -199,11 +199,12 @@ func TestDefpacksToPackStatuses(t *testing.T) {
|
||||
|
||||
func TestIsOvalDefAffected(t *testing.T) {
|
||||
type in struct {
|
||||
def ovalmodels.Definition
|
||||
req request
|
||||
family string
|
||||
kernel models.Kernel
|
||||
mods []string
|
||||
def ovalmodels.Definition
|
||||
req request
|
||||
family string
|
||||
release string
|
||||
kernel models.Kernel
|
||||
mods []string
|
||||
}
|
||||
var tests = []struct {
|
||||
in in
|
||||
@@ -1621,6 +1622,88 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
affected: false,
|
||||
notFixedYet: false,
|
||||
},
|
||||
// dnf module 4 (long modularitylabel)
|
||||
{
|
||||
in: in{
|
||||
family: constant.Fedora,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "community-mysql",
|
||||
Version: "0:8.0.27-1.module_f35+13269+c9322734",
|
||||
Arch: "x86_64",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "mysql:8.0:3520211031142409:f27b74a8",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "community-mysql",
|
||||
arch: "x86_64",
|
||||
versionRelease: "8.0.26-1.module_f35+12627+b26747dd",
|
||||
},
|
||||
mods: []string{
|
||||
"mysql:8.0",
|
||||
},
|
||||
},
|
||||
affected: true,
|
||||
notFixedYet: false,
|
||||
fixedIn: "0:8.0.27-1.module_f35+13269+c9322734",
|
||||
},
|
||||
// dnf module 5 (req is non-modular package, oval is modular package)
|
||||
{
|
||||
in: in{
|
||||
family: constant.Fedora,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "community-mysql",
|
||||
Version: "0:8.0.27-1.module_f35+13269+c9322734",
|
||||
Arch: "x86_64",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "mysql:8.0:3520211031142409:f27b74a8",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "community-mysql",
|
||||
arch: "x86_64",
|
||||
versionRelease: "8.0.26-1.fc35",
|
||||
},
|
||||
mods: []string{
|
||||
"mysql:8.0",
|
||||
},
|
||||
},
|
||||
affected: false,
|
||||
notFixedYet: false,
|
||||
},
|
||||
// dnf module 6 (req is modular package, oval is non-modular package)
|
||||
{
|
||||
in: in{
|
||||
family: constant.Fedora,
|
||||
def: ovalmodels.Definition{
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "community-mysql",
|
||||
Version: "0:8.0.27-1.fc35",
|
||||
Arch: "x86_64",
|
||||
NotFixedYet: false,
|
||||
ModularityLabel: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "community-mysql",
|
||||
arch: "x86_64",
|
||||
versionRelease: "8.0.26-1.module_f35+12627+b26747dd",
|
||||
},
|
||||
mods: []string{
|
||||
"mysql:8.0",
|
||||
},
|
||||
},
|
||||
affected: false,
|
||||
notFixedYet: false,
|
||||
},
|
||||
// .ksplice1.
|
||||
{
|
||||
in: in{
|
||||
@@ -1774,10 +1857,63 @@ func TestIsOvalDefAffected(t *testing.T) {
|
||||
wantErr: false,
|
||||
fixedIn: "",
|
||||
},
|
||||
// amazon linux 2 repository
|
||||
{
|
||||
in: in{
|
||||
family: constant.Amazon,
|
||||
release: "2",
|
||||
def: ovalmodels.Definition{
|
||||
Advisory: ovalmodels.Advisory{
|
||||
AffectedRepository: "amzn2-core",
|
||||
},
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "nginx",
|
||||
Version: "2.17-106.0.1",
|
||||
Arch: "x86_64",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "nginx",
|
||||
versionRelease: "2.17-105.0.1",
|
||||
arch: "x86_64",
|
||||
repository: "amzn2-core",
|
||||
},
|
||||
},
|
||||
affected: true,
|
||||
fixedIn: "2.17-106.0.1",
|
||||
},
|
||||
{
|
||||
in: in{
|
||||
family: constant.Amazon,
|
||||
release: "2",
|
||||
def: ovalmodels.Definition{
|
||||
Advisory: ovalmodels.Advisory{
|
||||
AffectedRepository: "amzn2-core",
|
||||
},
|
||||
AffectedPacks: []ovalmodels.Package{
|
||||
{
|
||||
Name: "nginx",
|
||||
Version: "2.17-106.0.1",
|
||||
Arch: "x86_64",
|
||||
},
|
||||
},
|
||||
},
|
||||
req: request{
|
||||
packName: "nginx",
|
||||
versionRelease: "2.17-105.0.1",
|
||||
arch: "x86_64",
|
||||
repository: "amzn2extra-nginx",
|
||||
},
|
||||
},
|
||||
affected: false,
|
||||
fixedIn: "",
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range tests {
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(tt.in.def, tt.in.req, tt.in.family, tt.in.kernel, tt.in.mods)
|
||||
affected, notFixedYet, fixedIn, err := isOvalDefAffected(tt.in.def, tt.in.req, tt.in.family, tt.in.release, tt.in.kernel, tt.in.mods)
|
||||
if tt.wantErr != (err != nil) {
|
||||
t.Errorf("[%d] err\nexpected: %t\n actual: %s\n", i, tt.wantErr, err)
|
||||
}
|
||||
@@ -1833,8 +1969,8 @@ func Test_rhelDownStreamOSVersionToRHEL(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := rhelDownStreamOSVersionToRHEL(tt.args.ver); got != tt.want {
|
||||
t.Errorf("rhelDownStreamOSVersionToRHEL() = %v, want %v", got, tt.want)
|
||||
if got := rhelRebuildOSVersionToRHEL(tt.args.ver); got != tt.want {
|
||||
t.Errorf("rhelRebuildOSVersionToRHEL() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -1967,3 +2103,76 @@ func Test_ovalResult_Sort(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCvss2(t *testing.T) {
|
||||
type out struct {
|
||||
score float64
|
||||
vector string
|
||||
}
|
||||
var tests = []struct {
|
||||
in string
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: "5/AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
out: out{
|
||||
score: 5.0,
|
||||
vector: "AV:N/AC:L/Au:N/C:N/I:N/A:P",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "",
|
||||
out: out{
|
||||
score: 0,
|
||||
vector: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, v := parseCvss2(tt.in)
|
||||
if s != tt.out.score || v != tt.out.vector {
|
||||
t.Errorf("\nexpected: %f, %s\n actual: %f, %s",
|
||||
tt.out.score, tt.out.vector, s, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseCvss3(t *testing.T) {
|
||||
type out struct {
|
||||
score float64
|
||||
vector string
|
||||
}
|
||||
var tests = []struct {
|
||||
in string
|
||||
out out
|
||||
}{
|
||||
{
|
||||
in: "5.6/CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
out: out{
|
||||
score: 5.6,
|
||||
vector: "CVSS:3.0/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "6.1/CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
out: out{
|
||||
score: 6.1,
|
||||
vector: "CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:L",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "",
|
||||
out: out{
|
||||
score: 0,
|
||||
vector: "",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
s, v := parseCvss3(tt.in)
|
||||
if s != tt.out.score || v != tt.out.vector {
|
||||
t.Errorf("\nexpected: %f, %s\n actual: %f, %s",
|
||||
tt.out.score, tt.out.vector, s, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type ChatWorkWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to ChatWork
|
||||
func (w ChatWorkWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
|
||||
for _, r := range rs {
|
||||
|
||||
@@ -23,6 +23,7 @@ type EMailWriter struct {
|
||||
Cnf config.SMTPConf
|
||||
}
|
||||
|
||||
// Write results to Email
|
||||
func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
var message string
|
||||
sender := NewEMailSender(w.Cnf)
|
||||
@@ -31,7 +32,7 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if w.FormatOneEMail {
|
||||
message += formatFullPlainText(r) + "\r\n\r\n"
|
||||
mm := r.ScannedCves.CountGroupBySeverity()
|
||||
keys := []string{"High", "Medium", "Low", "Unknown"}
|
||||
keys := []string{"Critical", "High", "Medium", "Low", "Unknown"}
|
||||
for _, k := range keys {
|
||||
m[k] += mm[k]
|
||||
}
|
||||
@@ -60,9 +61,9 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
summary := fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
summary := fmt.Sprintf("Total: %d (Critical:%d High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["Critical"]+m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["Critical"], m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
|
||||
origmessage := message
|
||||
if w.FormatOneEMail {
|
||||
|
||||
@@ -21,6 +21,7 @@ type GoogleChatWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to Google Chat
|
||||
func (w GoogleChatWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
re := regexp.MustCompile(w.Cnf.ServerNameRegexp)
|
||||
|
||||
|
||||
@@ -2,27 +2,33 @@ package reporter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/CycloneDX/cyclonedx-go"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/reporter/sbom"
|
||||
)
|
||||
|
||||
// LocalFileWriter writes results to a local file.
|
||||
type LocalFileWriter struct {
|
||||
CurrentDir string
|
||||
DiffPlus bool
|
||||
DiffMinus bool
|
||||
FormatJSON bool
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Gzip bool
|
||||
CurrentDir string
|
||||
DiffPlus bool
|
||||
DiffMinus bool
|
||||
FormatJSON bool
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
FormatCycloneDXJSON bool
|
||||
FormatCycloneDXXML bool
|
||||
Gzip bool
|
||||
}
|
||||
|
||||
// Write results to Local File
|
||||
func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if w.FormatOneLineText {
|
||||
path := filepath.Join(w.CurrentDir, "summary.txt")
|
||||
@@ -87,6 +93,28 @@ func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if w.FormatCycloneDXJSON {
|
||||
bs, err := sbom.GenerateCycloneDX(cyclonedx.BOMFileFormatJSON, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to generate CycloneDX JSON. err: %w", err)
|
||||
}
|
||||
p := fmt.Sprintf("%s_cyclonedx.json", path)
|
||||
if err := w.writeFile(p, bs, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write CycloneDX JSON. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if w.FormatCycloneDXXML {
|
||||
bs, err := sbom.GenerateCycloneDX(cyclonedx.BOMFileFormatXML, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to generate CycloneDX XML. err: %w", err)
|
||||
}
|
||||
p := fmt.Sprintf("%s_cyclonedx.xml", path)
|
||||
if err := w.writeFile(p, bs, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write CycloneDX XML. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -99,5 +127,5 @@ func (w LocalFileWriter) writeFile(path string, data []byte, perm os.FileMode) (
|
||||
}
|
||||
path += ".gz"
|
||||
}
|
||||
return ioutil.WriteFile(path, []byte(data), perm)
|
||||
return os.WriteFile(path, []byte(data), perm)
|
||||
}
|
||||
|
||||
561
reporter/sbom/cyclonedx.go
Normal file
561
reporter/sbom/cyclonedx.go
Normal file
@@ -0,0 +1,561 @@
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cdx "github.com/CycloneDX/cyclonedx-go"
|
||||
"github.com/google/uuid"
|
||||
"github.com/package-url/packageurl-go"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// GenerateCycloneDX generates a string in CycloneDX format
|
||||
func GenerateCycloneDX(format cdx.BOMFileFormat, r models.ScanResult) ([]byte, error) {
|
||||
bom := cdx.NewBOM()
|
||||
bom.SerialNumber = uuid.New().URN()
|
||||
bom.Metadata = cdxMetadata(r)
|
||||
bom.Components, bom.Dependencies, bom.Vulnerabilities = cdxComponents(r, bom.Metadata.Component.BOMRef)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
enc := cdx.NewBOMEncoder(buf, format)
|
||||
enc.SetPretty(true)
|
||||
if err := enc.Encode(bom); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to encode CycloneDX. err: %w", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func cdxMetadata(result models.ScanResult) *cdx.Metadata {
|
||||
metadata := cdx.Metadata{
|
||||
Timestamp: result.ReportedAt.Format(time.RFC3339),
|
||||
Tools: &[]cdx.Tool{
|
||||
{
|
||||
Vendor: "future-architect",
|
||||
Name: "vuls",
|
||||
Version: fmt.Sprintf("%s-%s", result.ReportedVersion, result.ReportedRevision),
|
||||
},
|
||||
},
|
||||
Component: &cdx.Component{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeOS,
|
||||
Name: result.ServerName,
|
||||
},
|
||||
}
|
||||
return &metadata
|
||||
}
|
||||
|
||||
func cdxComponents(result models.ScanResult, metaBomRef string) (*[]cdx.Component, *[]cdx.Dependency, *[]cdx.Vulnerability) {
|
||||
var components []cdx.Component
|
||||
bomRefs := map[string][]string{}
|
||||
|
||||
ospkgToPURL := map[string]string{}
|
||||
if ospkgComps := ospkgToCdxComponents(result.Family, result.Release, result.RunningKernel, result.Packages, result.SrcPackages, ospkgToPURL); ospkgComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], ospkgComps[0].BOMRef)
|
||||
for _, comp := range ospkgComps[1:] {
|
||||
bomRefs[ospkgComps[0].BOMRef] = append(bomRefs[ospkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, ospkgComps...)
|
||||
}
|
||||
|
||||
if cpeComps := cpeToCdxComponents(result.ScannedCves); cpeComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], cpeComps[0].BOMRef)
|
||||
for _, comp := range cpeComps[1:] {
|
||||
bomRefs[cpeComps[0].BOMRef] = append(bomRefs[cpeComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, cpeComps...)
|
||||
}
|
||||
|
||||
libpkgToPURL := map[string]map[string]string{}
|
||||
for _, libscanner := range result.LibraryScanners {
|
||||
libpkgToPURL[libscanner.LockfilePath] = map[string]string{}
|
||||
|
||||
libpkgComps := libpkgToCdxComponents(libscanner, libpkgToPURL)
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], libpkgComps[0].BOMRef)
|
||||
for _, comp := range libpkgComps[1:] {
|
||||
bomRefs[libpkgComps[0].BOMRef] = append(bomRefs[libpkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, libpkgComps...)
|
||||
}
|
||||
|
||||
ghpkgToPURL := map[string]map[string]string{}
|
||||
for _, ghm := range result.GitHubManifests {
|
||||
ghpkgToPURL[ghm.RepoURLFilename()] = map[string]string{}
|
||||
|
||||
ghpkgComps := ghpkgToCdxComponents(ghm, ghpkgToPURL)
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], ghpkgComps[0].BOMRef)
|
||||
for _, comp := range ghpkgComps[1:] {
|
||||
bomRefs[ghpkgComps[0].BOMRef] = append(bomRefs[ghpkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, ghpkgComps...)
|
||||
}
|
||||
|
||||
wppkgToPURL := map[string]string{}
|
||||
if wppkgComps := wppkgToCdxComponents(result.WordPressPackages, wppkgToPURL); wppkgComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], wppkgComps[0].BOMRef)
|
||||
for _, comp := range wppkgComps[1:] {
|
||||
bomRefs[wppkgComps[0].BOMRef] = append(bomRefs[wppkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, wppkgComps...)
|
||||
}
|
||||
|
||||
return &components, cdxDependencies(bomRefs), cdxVulnerabilities(result, ospkgToPURL, libpkgToPURL, ghpkgToPURL, wppkgToPURL)
|
||||
}
|
||||
|
||||
func osToCdxComponent(family, release, runningKernelRelease, runningKernelVersion string) cdx.Component {
|
||||
props := []cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "Package",
|
||||
},
|
||||
}
|
||||
if runningKernelRelease != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "RunningKernelRelease",
|
||||
Value: runningKernelRelease,
|
||||
})
|
||||
}
|
||||
if runningKernelVersion != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "RunningKernelVersion",
|
||||
Value: runningKernelVersion,
|
||||
})
|
||||
}
|
||||
return cdx.Component{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeOS,
|
||||
Name: family,
|
||||
Version: release,
|
||||
Properties: &props,
|
||||
}
|
||||
}
|
||||
|
||||
func ospkgToCdxComponents(family, release string, runningKernel models.Kernel, binpkgs models.Packages, srcpkgs models.SrcPackages, ospkgToPURL map[string]string) []cdx.Component {
|
||||
if family == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
osToCdxComponent(family, release, runningKernel.Release, runningKernel.Version),
|
||||
}
|
||||
|
||||
if len(binpkgs) == 0 {
|
||||
return components
|
||||
}
|
||||
|
||||
type srcpkg struct {
|
||||
name string
|
||||
version string
|
||||
arch string
|
||||
}
|
||||
binToSrc := map[string]srcpkg{}
|
||||
for _, pack := range srcpkgs {
|
||||
for _, binpkg := range pack.BinaryNames {
|
||||
binToSrc[binpkg] = srcpkg{
|
||||
name: pack.Name,
|
||||
version: pack.Version,
|
||||
arch: pack.Arch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pack := range binpkgs {
|
||||
var props []cdx.Property
|
||||
if p, ok := binToSrc[pack.Name]; ok {
|
||||
if p.name != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcName",
|
||||
Value: p.name,
|
||||
})
|
||||
}
|
||||
if p.version != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcVersion",
|
||||
Value: p.version,
|
||||
})
|
||||
}
|
||||
if p.arch != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcArch",
|
||||
Value: p.arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
purl := toPkgPURL(family, release, pack.Name, pack.Version, pack.Release, pack.Arch, pack.Repository)
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: pack.Name,
|
||||
Version: pack.Version,
|
||||
PackageURL: purl,
|
||||
Properties: &props,
|
||||
})
|
||||
|
||||
ospkgToPURL[pack.Name] = purl
|
||||
}
|
||||
return components
|
||||
}
|
||||
|
||||
func cpeToCdxComponents(scannedCves models.VulnInfos) []cdx.Component {
|
||||
cpes := map[string]struct{}{}
|
||||
for _, cve := range scannedCves {
|
||||
for _, cpe := range cve.CpeURIs {
|
||||
cpes[cpe] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(cpes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: "CPEs",
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "CPE",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for cpe := range cpes {
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: cpe,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: cpe,
|
||||
CPE: cpe,
|
||||
})
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func libpkgToCdxComponents(libscanner models.LibraryScanner, libpkgToPURL map[string]map[string]string) []cdx.Component {
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: libscanner.LockfilePath,
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: libscanner.Type,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, lib := range libscanner.Libs {
|
||||
purl := packageurl.NewPackageURL(libscanner.Type, "", lib.Name, lib.Version, packageurl.Qualifiers{{Key: "file_path", Value: libscanner.LockfilePath}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: lib.Name,
|
||||
Version: lib.Version,
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
libpkgToPURL[libscanner.LockfilePath][lib.Name] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func ghpkgToCdxComponents(m models.DependencyGraphManifest, ghpkgToPURL map[string]map[string]string) []cdx.Component {
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: m.BlobPath,
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: m.Ecosystem(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dep := range m.Dependencies {
|
||||
purl := packageurl.NewPackageURL(m.Ecosystem(), "", dep.PackageName, dep.Version(), packageurl.Qualifiers{{Key: "repo_url", Value: m.Repository}, {Key: "file_path", Value: m.Filename}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: dep.PackageName,
|
||||
Version: dep.Version(),
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
ghpkgToPURL[m.RepoURLFilename()][dep.PackageName] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func wppkgToCdxComponents(wppkgs models.WordPressPackages, wppkgToPURL map[string]string) []cdx.Component {
|
||||
if len(wppkgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: "wordpress",
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "WordPress",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, wppkg := range wppkgs {
|
||||
purl := packageurl.NewPackageURL("wordpress", wppkg.Type, wppkg.Name, wppkg.Version, packageurl.Qualifiers{{Key: "status", Value: wppkg.Status}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: wppkg.Name,
|
||||
Version: wppkg.Version,
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
wppkgToPURL[wppkg.Name] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func cdxDependencies(bomRefs map[string][]string) *[]cdx.Dependency {
|
||||
dependencies := make([]cdx.Dependency, 0, len(bomRefs))
|
||||
for ref, depRefs := range bomRefs {
|
||||
ds := depRefs
|
||||
dependencies = append(dependencies, cdx.Dependency{
|
||||
Ref: ref,
|
||||
Dependencies: &ds,
|
||||
})
|
||||
}
|
||||
return &dependencies
|
||||
}
|
||||
|
||||
func toPkgPURL(osFamily, osVersion, packName, packVersion, packRelease, packArch, packRepository string) string {
|
||||
var purlType string
|
||||
switch osFamily {
|
||||
case constant.Alma, constant.Amazon, constant.CentOS, constant.Fedora, constant.OpenSUSE, constant.OpenSUSELeap, constant.Oracle, constant.RedHat, constant.Rocky, constant.SUSEEnterpriseDesktop, constant.SUSEEnterpriseServer:
|
||||
purlType = "rpm"
|
||||
case constant.Alpine:
|
||||
purlType = "apk"
|
||||
case constant.Debian, constant.Raspbian, constant.Ubuntu:
|
||||
purlType = "deb"
|
||||
case constant.FreeBSD:
|
||||
purlType = "pkg"
|
||||
case constant.Windows:
|
||||
purlType = "win"
|
||||
case constant.ServerTypePseudo:
|
||||
purlType = "pseudo"
|
||||
default:
|
||||
purlType = "unknown"
|
||||
}
|
||||
|
||||
version := packVersion
|
||||
if packRelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", packVersion, packRelease)
|
||||
}
|
||||
|
||||
var qualifiers packageurl.Qualifiers
|
||||
if osVersion != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "distro",
|
||||
Value: osVersion,
|
||||
})
|
||||
}
|
||||
if packArch != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "arch",
|
||||
Value: packArch,
|
||||
})
|
||||
}
|
||||
if packRepository != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "repo",
|
||||
Value: packRepository,
|
||||
})
|
||||
}
|
||||
|
||||
return packageurl.NewPackageURL(purlType, osFamily, packName, version, qualifiers, "").ToString()
|
||||
}
|
||||
|
||||
func cdxVulnerabilities(result models.ScanResult, ospkgToPURL map[string]string, libpkgToPURL, ghpkgToPURL map[string]map[string]string, wppkgToPURL map[string]string) *[]cdx.Vulnerability {
|
||||
vulnerabilities := make([]cdx.Vulnerability, 0, len(result.ScannedCves))
|
||||
for _, cve := range result.ScannedCves {
|
||||
vulnerabilities = append(vulnerabilities, cdx.Vulnerability{
|
||||
ID: cve.CveID,
|
||||
Ratings: cdxRatings(cve.CveContents),
|
||||
CWEs: cdxCWEs(cve.CveContents),
|
||||
Description: cdxDescription(cve.CveContents),
|
||||
Advisories: cdxAdvisories(cve.CveContents),
|
||||
Affects: cdxAffects(cve, ospkgToPURL, libpkgToPURL, ghpkgToPURL, wppkgToPURL),
|
||||
})
|
||||
}
|
||||
return &vulnerabilities
|
||||
}
|
||||
|
||||
func cdxRatings(cveContents models.CveContents) *[]cdx.VulnerabilityRating {
|
||||
var ratings []cdx.VulnerabilityRating
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
if content.Cvss2Score != 0 || content.Cvss2Vector != "" || content.Cvss2Severity != "" {
|
||||
ratings = append(ratings, cdxCVSS2Rating(string(content.Type), content.Cvss2Vector, content.Cvss2Score, content.Cvss2Severity))
|
||||
}
|
||||
if content.Cvss3Score != 0 || content.Cvss3Vector != "" || content.Cvss3Severity != "" {
|
||||
ratings = append(ratings, cdxCVSS3Rating(string(content.Type), content.Cvss3Vector, content.Cvss3Score, content.Cvss3Severity))
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ratings
|
||||
}
|
||||
|
||||
func cdxCVSS2Rating(source, vector string, score float64, severity string) cdx.VulnerabilityRating {
|
||||
r := cdx.VulnerabilityRating{
|
||||
Source: &cdx.Source{Name: source},
|
||||
Method: cdx.ScoringMethodCVSSv2,
|
||||
Vector: vector,
|
||||
}
|
||||
if score != 0 {
|
||||
r.Score = &score
|
||||
}
|
||||
switch strings.ToLower(severity) {
|
||||
case "high":
|
||||
r.Severity = cdx.SeverityHigh
|
||||
case "medium":
|
||||
r.Severity = cdx.SeverityMedium
|
||||
case "low":
|
||||
r.Severity = cdx.SeverityLow
|
||||
default:
|
||||
r.Severity = cdx.SeverityUnknown
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func cdxCVSS3Rating(source, vector string, score float64, severity string) cdx.VulnerabilityRating {
|
||||
r := cdx.VulnerabilityRating{
|
||||
Source: &cdx.Source{Name: source},
|
||||
Method: cdx.ScoringMethodCVSSv3,
|
||||
Vector: vector,
|
||||
}
|
||||
if strings.HasPrefix(vector, "CVSS:3.1") {
|
||||
r.Method = cdx.ScoringMethodCVSSv31
|
||||
}
|
||||
if score != 0 {
|
||||
r.Score = &score
|
||||
}
|
||||
switch strings.ToLower(severity) {
|
||||
case "critical":
|
||||
r.Severity = cdx.SeverityCritical
|
||||
case "high":
|
||||
r.Severity = cdx.SeverityHigh
|
||||
case "medium":
|
||||
r.Severity = cdx.SeverityMedium
|
||||
case "low":
|
||||
r.Severity = cdx.SeverityLow
|
||||
case "none":
|
||||
r.Severity = cdx.SeverityNone
|
||||
default:
|
||||
r.Severity = cdx.SeverityUnknown
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func cdxAffects(cve models.VulnInfo, ospkgToPURL map[string]string, libpkgToPURL, ghpkgToPURL map[string]map[string]string, wppkgToPURL map[string]string) *[]cdx.Affects {
|
||||
affects := make([]cdx.Affects, 0, len(cve.AffectedPackages)+len(cve.CpeURIs)+len(cve.LibraryFixedIns)+len(cve.WpPackageFixStats))
|
||||
|
||||
for _, p := range cve.AffectedPackages {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: ospkgToPURL[p.Name],
|
||||
})
|
||||
}
|
||||
for _, cpe := range cve.CpeURIs {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: cpe,
|
||||
})
|
||||
}
|
||||
for _, lib := range cve.LibraryFixedIns {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: libpkgToPURL[lib.Path][lib.Name],
|
||||
})
|
||||
}
|
||||
for _, alert := range cve.GitHubSecurityAlerts {
|
||||
// TODO: not in dependency graph
|
||||
if purl, ok := ghpkgToPURL[alert.RepoURLManifestPath()][alert.Package.Name]; ok {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: purl,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, wppack := range cve.WpPackageFixStats {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: wppkgToPURL[wppack.Name],
|
||||
})
|
||||
}
|
||||
|
||||
return &affects
|
||||
}
|
||||
|
||||
func cdxCWEs(cveContents models.CveContents) *[]int {
|
||||
m := map[int]struct{}{}
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
for _, cweID := range content.CweIDs {
|
||||
if !strings.HasPrefix(cweID, "CWE-") {
|
||||
continue
|
||||
}
|
||||
i, err := strconv.Atoi(strings.TrimPrefix(cweID, "CWE-"))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
cweIDs := maps.Keys(m)
|
||||
return &cweIDs
|
||||
}
|
||||
|
||||
func cdxDescription(cveContents models.CveContents) string {
|
||||
if contents, ok := cveContents[models.Nvd]; ok {
|
||||
return contents[0].Summary
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cdxAdvisories(cveContents models.CveContents) *[]cdx.Advisory {
|
||||
urls := map[string]struct{}{}
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
if content.SourceLink != "" {
|
||||
urls[content.SourceLink] = struct{}{}
|
||||
}
|
||||
for _, r := range content.References {
|
||||
urls[r.Link] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
advisories := make([]cdx.Advisory, 0, len(urls))
|
||||
for u := range urls {
|
||||
advisories = append(advisories, cdx.Advisory{
|
||||
URL: u,
|
||||
})
|
||||
}
|
||||
return &advisories
|
||||
}
|
||||
@@ -33,12 +33,13 @@ type message struct {
|
||||
Attachments []slack.Attachment `json:"attachments"`
|
||||
}
|
||||
|
||||
// Write results to Slack
|
||||
func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
|
||||
channel := w.Cnf.Channel
|
||||
for _, r := range rs {
|
||||
w.lang, w.osFamily = r.Lang, r.Family
|
||||
if channel == "${servername}" {
|
||||
channel := w.Cnf.Channel
|
||||
if w.Cnf.Channel == "${servername}" {
|
||||
channel = fmt.Sprintf("#%s", r.ServerName)
|
||||
}
|
||||
|
||||
@@ -195,7 +196,7 @@ func (w SlackWriter) toSlackAttachments(r models.ScanResult) (attaches []slack.A
|
||||
candidate = append(candidate, "?")
|
||||
}
|
||||
for _, n := range vinfo.GitHubSecurityAlerts {
|
||||
installed = append(installed, n.PackageName)
|
||||
installed = append(installed, n.RepoURLPackageName())
|
||||
candidate = append(candidate, "?")
|
||||
}
|
||||
|
||||
@@ -326,23 +327,19 @@ func (w SlackWriter) attachmentText(vinfo models.VulnInfo, cweDict map[string]mo
|
||||
func (w SlackWriter) cweIDs(vinfo models.VulnInfo, osFamily string, cweDict models.CweDict) string {
|
||||
links := []string{}
|
||||
for _, c := range vinfo.CveContents.UniqCweIDs(osFamily) {
|
||||
name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL := cweDict.Get(c.Value, w.lang)
|
||||
line := ""
|
||||
if top10Rank != "" {
|
||||
line = fmt.Sprintf("<%s|[OWASP Top %s]>",
|
||||
top10URL, top10Rank)
|
||||
name, url, owasp, cwe25, sans := cweDict.Get(c.Value, w.lang)
|
||||
line := fmt.Sprintf("<%s|%s>: %s", url, c.Value, name)
|
||||
for year, info := range owasp {
|
||||
links = append(links, fmt.Sprintf("<%s|[OWASP(%s) Top %s]> %s", info.URL, year, info.Rank, line))
|
||||
}
|
||||
if cweTop25Rank != "" {
|
||||
line = fmt.Sprintf("<%s|[CWE Top %s]>",
|
||||
cweTop25URL, cweTop25Rank)
|
||||
for year, info := range cwe25 {
|
||||
links = append(links, fmt.Sprintf("<%s|[CWE(%s) Top %s]> %s", info.URL, year, info.Rank, line))
|
||||
}
|
||||
if sansTop25Rank != "" {
|
||||
line = fmt.Sprintf("<%s|[CWE/SANS Top %s]>",
|
||||
sansTop25URL, sansTop25Rank)
|
||||
for year, info := range sans {
|
||||
links = append(links, fmt.Sprintf("<%s|[CWE/SANS(%s) Top %s]> %s", info.URL, year, info.Rank, line))
|
||||
}
|
||||
if top10Rank == "" && cweTop25Rank == "" && sansTop25Rank == "" {
|
||||
links = append(links, fmt.Sprintf("%s <%s|%s>: %s",
|
||||
line, url, c.Value, name))
|
||||
if len(owasp) == 0 && len(cwe25) == 0 && len(sans) == 0 {
|
||||
links = append(links, line)
|
||||
}
|
||||
}
|
||||
return strings.Join(links, "\n")
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
// StdoutWriter write to stdout
|
||||
type StdoutWriter struct {
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
@@ -24,6 +23,7 @@ func (w StdoutWriter) WriteScanSummary(rs ...models.ScanResult) {
|
||||
fmt.Printf("%s\n", formatScanSummary(rs...))
|
||||
}
|
||||
|
||||
// Write results to stdout
|
||||
func (w StdoutWriter) Write(rs ...models.ScanResult) error {
|
||||
if w.FormatOneLineText {
|
||||
fmt.Print("\n\n")
|
||||
@@ -33,7 +33,7 @@ func (w StdoutWriter) Write(rs ...models.ScanResult) error {
|
||||
fmt.Print("\n")
|
||||
}
|
||||
|
||||
if w.FormatList || w.FormatCsv {
|
||||
if w.FormatList {
|
||||
for _, r := range rs {
|
||||
fmt.Println(formatList(r))
|
||||
}
|
||||
|
||||
@@ -16,6 +16,7 @@ type SyslogWriter struct {
|
||||
Cnf config.SyslogConf
|
||||
}
|
||||
|
||||
// Write results to syslog
|
||||
func (w SyslogWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
facility, _ := w.Cnf.GetFacility()
|
||||
severity, _ := w.Cnf.GetSeverity()
|
||||
|
||||
@@ -21,6 +21,7 @@ type TelegramWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to Telegram
|
||||
func (w TelegramWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
for _, r := range rs {
|
||||
msgs := []string{fmt.Sprintf("*%s*\n%s\n%s\n%s",
|
||||
|
||||
168
reporter/util.go
168
reporter/util.go
@@ -5,7 +5,8 @@ import (
|
||||
"encoding/csv"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -15,10 +16,12 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/cti"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/gosuri/uitable"
|
||||
"github.com/olekukonko/tablewriter"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
@@ -40,8 +43,8 @@ func OverwriteJSONFile(dir string, r models.ScanResult) error {
|
||||
|
||||
// LoadScanResults read JSON data
|
||||
func LoadScanResults(jsonDir string) (results models.ScanResults, err error) {
|
||||
var files []os.FileInfo
|
||||
if files, err = ioutil.ReadDir(jsonDir); err != nil {
|
||||
var files []fs.DirEntry
|
||||
if files, err = os.ReadDir(jsonDir); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", jsonDir, err)
|
||||
}
|
||||
for _, f := range files {
|
||||
@@ -68,7 +71,7 @@ func loadOneServerScanResult(jsonFile string) (*models.ScanResult, error) {
|
||||
data []byte
|
||||
err error
|
||||
)
|
||||
if data, err = ioutil.ReadFile(jsonFile); err != nil {
|
||||
if data, err = os.ReadFile(jsonFile); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", jsonFile, err)
|
||||
}
|
||||
result := &models.ScanResult{}
|
||||
@@ -87,8 +90,8 @@ var jsonDirPattern = regexp.MustCompile(
|
||||
// ListValidJSONDirs returns valid json directory as array
|
||||
// Returned array is sorted so that recent directories are at the head
|
||||
func ListValidJSONDirs(resultsDir string) (dirs []string, err error) {
|
||||
var dirInfo []os.FileInfo
|
||||
if dirInfo, err = ioutil.ReadDir(resultsDir); err != nil {
|
||||
var dirInfo []fs.DirEntry
|
||||
if dirInfo, err = os.ReadDir(resultsDir); err != nil {
|
||||
err = xerrors.Errorf("Failed to read %s: %w", resultsDir, err)
|
||||
return
|
||||
}
|
||||
@@ -128,7 +131,7 @@ func JSONDir(resultsDir string, args []string) (path string, err error) {
|
||||
|
||||
// TODO remove Pipe flag
|
||||
if config.Conf.Pipe {
|
||||
bytes, err := ioutil.ReadAll(os.Stdin)
|
||||
bytes, err := io.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return "", xerrors.Errorf("Failed to read stdin: %w", err)
|
||||
}
|
||||
@@ -255,6 +258,7 @@ No CVE-IDs are found in updatable packages.
|
||||
// v2max := vinfo.MaxCvss2Score().Value.Score
|
||||
// v3max := vinfo.MaxCvss3Score().Value.Score
|
||||
|
||||
packnames := strings.Join(vinfo.AffectedPackages.Names(), ", ")
|
||||
// packname := vinfo.AffectedPackages.FormatTuiSummary()
|
||||
// packname += strings.Join(vinfo.CpeURIs, ", ")
|
||||
|
||||
@@ -263,12 +267,12 @@ No CVE-IDs are found in updatable packages.
|
||||
exploits = "POC"
|
||||
}
|
||||
|
||||
link := ""
|
||||
if strings.HasPrefix(vinfo.CveID, "CVE-") {
|
||||
link = fmt.Sprintf("https://nvd.nist.gov/vuln/detail/%s", vinfo.CveID)
|
||||
} else if strings.HasPrefix(vinfo.CveID, "WPVDBID-") {
|
||||
link = fmt.Sprintf("https://wpscan.com/vulnerabilities/%s", strings.TrimPrefix(vinfo.CveID, "WPVDBID-"))
|
||||
}
|
||||
// link := ""
|
||||
// if strings.HasPrefix(vinfo.CveID, "CVE-") {
|
||||
// link = fmt.Sprintf("https://nvd.nist.gov/vuln/detail/%s", vinfo.CveID)
|
||||
// } else if strings.HasPrefix(vinfo.CveID, "WPVDBID-") {
|
||||
// link = fmt.Sprintf("https://wpscan.com/vulnerabilities/%s", strings.TrimPrefix(vinfo.CveID, "WPVDBID-"))
|
||||
// }
|
||||
|
||||
data = append(data, []string{
|
||||
vinfo.CveIDDiffFormat(),
|
||||
@@ -279,7 +283,7 @@ No CVE-IDs are found in updatable packages.
|
||||
exploits,
|
||||
fmt.Sprintf("%9s", vinfo.AlertDict.FormatSource()),
|
||||
fmt.Sprintf("%7s", vinfo.PatchStatus(r.Packages)),
|
||||
link,
|
||||
packnames,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -294,9 +298,11 @@ No CVE-IDs are found in updatable packages.
|
||||
"PoC",
|
||||
"Alert",
|
||||
"Fixed",
|
||||
"NVD",
|
||||
// "NVD",
|
||||
"Packages",
|
||||
})
|
||||
table.SetBorder(true)
|
||||
table.SetRowLine(true)
|
||||
table.AppendBulk(data)
|
||||
table.Render()
|
||||
return fmt.Sprintf("%s\n%s", header, b.String())
|
||||
@@ -373,8 +379,8 @@ No CVE-IDs are found in updatable packages.
|
||||
if len(pack.AffectedProcs) != 0 {
|
||||
for _, p := range pack.AffectedProcs {
|
||||
if len(p.ListenPortStats) == 0 {
|
||||
data = append(data, []string{"",
|
||||
fmt.Sprintf(" - PID: %s %s, Port: []", p.PID, p.Name)})
|
||||
data = append(data, []string{"", fmt.Sprintf(" - PID: %s %s", p.PID, p.Name)})
|
||||
continue
|
||||
}
|
||||
|
||||
var ports []string
|
||||
@@ -398,7 +404,7 @@ No CVE-IDs are found in updatable packages.
|
||||
}
|
||||
|
||||
for _, alert := range vuln.GitHubSecurityAlerts {
|
||||
data = append(data, []string{"GitHub", alert.PackageName})
|
||||
data = append(data, []string{"GitHub", alert.RepoURLPackageName()})
|
||||
}
|
||||
|
||||
for _, wp := range vuln.WpPackageFixStats {
|
||||
@@ -412,8 +418,7 @@ No CVE-IDs are found in updatable packages.
|
||||
wp.Name, p.Version, p.Update, wp.FixedIn, p.Status)})
|
||||
}
|
||||
} else {
|
||||
data = append(data, []string{"WordPress",
|
||||
fmt.Sprintf("%s", wp.Name)})
|
||||
data = append(data, []string{"WordPress", wp.Name})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -430,31 +435,42 @@ No CVE-IDs are found in updatable packages.
|
||||
data = append(data, []string{"Confidence", confidence.String()})
|
||||
}
|
||||
|
||||
cweURLs, top10URLs := []string{}, []string{}
|
||||
cweTop25URLs, sansTop25URLs := []string{}, []string{}
|
||||
cweURLs, top10URLs, cweTop25URLs, sansTop25URLs := []string{}, map[string][]string{}, map[string][]string{}, map[string][]string{}
|
||||
for _, v := range vuln.CveContents.UniqCweIDs(r.Family) {
|
||||
name, url, top10Rank, top10URL, cweTop25Rank, cweTop25URL, sansTop25Rank, sansTop25URL := r.CweDict.Get(v.Value, r.Lang)
|
||||
if top10Rank != "" {
|
||||
data = append(data, []string{"CWE",
|
||||
fmt.Sprintf("[OWASP Top%s] %s: %s (%s)",
|
||||
top10Rank, v.Value, name, v.Type)})
|
||||
top10URLs = append(top10URLs, top10URL)
|
||||
name, url, owasp, cwe25, sans := r.CweDict.Get(v.Value, r.Lang)
|
||||
|
||||
ds := [][]string{}
|
||||
for year, info := range owasp {
|
||||
ds = append(ds, []string{"CWE", fmt.Sprintf("[OWASP(%s) Top%s] %s: %s (%s)", year, info.Rank, v.Value, name, v.Type)})
|
||||
top10URLs[year] = append(top10URLs[year], info.URL)
|
||||
}
|
||||
if cweTop25Rank != "" {
|
||||
data = append(data, []string{"CWE",
|
||||
fmt.Sprintf("[CWE Top%s] %s: %s (%s)",
|
||||
cweTop25Rank, v.Value, name, v.Type)})
|
||||
cweTop25URLs = append(cweTop25URLs, cweTop25URL)
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[1] < b[1]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
|
||||
ds = [][]string{}
|
||||
for year, info := range cwe25 {
|
||||
ds = append(ds, []string{"CWE", fmt.Sprintf("[CWE(%s) Top%s] %s: %s (%s)", year, info.Rank, v.Value, name, v.Type)})
|
||||
cweTop25URLs[year] = append(cweTop25URLs[year], info.URL)
|
||||
}
|
||||
if sansTop25Rank != "" {
|
||||
data = append(data, []string{"CWE",
|
||||
fmt.Sprintf("[CWE/SANS Top%s] %s: %s (%s)",
|
||||
sansTop25Rank, v.Value, name, v.Type)})
|
||||
sansTop25URLs = append(sansTop25URLs, sansTop25URL)
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[1] < b[1]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
|
||||
ds = [][]string{}
|
||||
for year, info := range sans {
|
||||
ds = append(ds, []string{"CWE", fmt.Sprintf("[CWE/SANS(%s) Top%s] %s: %s (%s)", year, info.Rank, v.Value, name, v.Type)})
|
||||
sansTop25URLs[year] = append(sansTop25URLs[year], info.URL)
|
||||
}
|
||||
if top10Rank == "" && cweTop25Rank == "" && sansTop25Rank == "" {
|
||||
data = append(data, []string{"CWE", fmt.Sprintf("%s: %s (%s)",
|
||||
v.Value, name, v.Type)})
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[1] < b[1]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
|
||||
if len(owasp) == 0 && len(cwe25) == 0 && len(sans) == 0 {
|
||||
data = append(data, []string{"CWE", fmt.Sprintf("%s: %s (%s)", v.Value, name, v.Type)})
|
||||
}
|
||||
cweURLs = append(cweURLs, url)
|
||||
}
|
||||
@@ -462,18 +478,44 @@ No CVE-IDs are found in updatable packages.
|
||||
for _, url := range cweURLs {
|
||||
data = append(data, []string{"CWE", url})
|
||||
}
|
||||
|
||||
m := map[string]struct{}{}
|
||||
for _, exploit := range vuln.Exploits {
|
||||
if _, ok := m[exploit.URL]; ok {
|
||||
continue
|
||||
}
|
||||
data = append(data, []string{string(exploit.ExploitType), exploit.URL})
|
||||
m[exploit.URL] = struct{}{}
|
||||
}
|
||||
for _, url := range top10URLs {
|
||||
data = append(data, []string{"OWASP Top10", url})
|
||||
|
||||
for year, urls := range top10URLs {
|
||||
ds := [][]string{}
|
||||
for _, url := range urls {
|
||||
ds = append(ds, []string{fmt.Sprintf("OWASP(%s) Top10", year), url})
|
||||
}
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[0] < b[0]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
}
|
||||
if len(cweTop25URLs) != 0 {
|
||||
data = append(data, []string{"CWE Top25", cweTop25URLs[0]})
|
||||
|
||||
ds := [][]string{}
|
||||
for year, urls := range cweTop25URLs {
|
||||
ds = append(ds, []string{fmt.Sprintf("CWE(%s) Top25", year), urls[0]})
|
||||
}
|
||||
if len(sansTop25URLs) != 0 {
|
||||
data = append(data, []string{"SANS/CWE Top25", sansTop25URLs[0]})
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[0] < b[0]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
|
||||
ds = [][]string{}
|
||||
for year, urls := range sansTop25URLs {
|
||||
ds = append(ds, []string{fmt.Sprintf("SANS/CWE(%s) Top25", year), urls[0]})
|
||||
}
|
||||
slices.SortFunc(ds, func(a, b []string) bool {
|
||||
return a[0] < b[0]
|
||||
})
|
||||
data = append(data, ds...)
|
||||
|
||||
for _, alert := range vuln.AlertDict.CISA {
|
||||
data = append(data, []string{"CISA Alert", alert.URL})
|
||||
@@ -487,6 +529,22 @@ No CVE-IDs are found in updatable packages.
|
||||
data = append(data, []string{"US-CERT Alert", alert.URL})
|
||||
}
|
||||
|
||||
attacks := []string{}
|
||||
for _, techniqueID := range vuln.Ctis {
|
||||
if strings.HasPrefix(techniqueID, "CAPEC-") {
|
||||
continue
|
||||
}
|
||||
technique, ok := cti.TechniqueDict[techniqueID]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
attacks = append(attacks, technique.Name)
|
||||
}
|
||||
slices.Sort(attacks)
|
||||
for _, attack := range attacks {
|
||||
data = append(data, []string{"MITER ATT&CK", attack})
|
||||
}
|
||||
|
||||
// for _, rr := range vuln.CveContents.References(r.Family) {
|
||||
// for _, ref := range rr.Value {
|
||||
// data = append(data, []string{ref.Source, ref.Link})
|
||||
@@ -614,7 +672,7 @@ func getPlusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
previousCveIDsSet[previousVulnInfo.CveID] = true
|
||||
}
|
||||
|
||||
new := models.VulnInfos{}
|
||||
newer := models.VulnInfos{}
|
||||
updated := models.VulnInfos{}
|
||||
for _, v := range current.ScannedCves {
|
||||
if previousCveIDsSet[v.CveID] {
|
||||
@@ -634,17 +692,17 @@ func getPlusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
logging.Log.Debugf("same: %s", v.CveID)
|
||||
}
|
||||
} else {
|
||||
logging.Log.Debugf("new: %s", v.CveID)
|
||||
logging.Log.Debugf("newer: %s", v.CveID)
|
||||
v.DiffStatus = models.DiffPlus
|
||||
new[v.CveID] = v
|
||||
newer[v.CveID] = v
|
||||
}
|
||||
}
|
||||
|
||||
if len(updated) == 0 && len(new) == 0 {
|
||||
if len(updated) == 0 && len(newer) == 0 {
|
||||
logging.Log.Infof("%s: There are %d vulnerabilities, but no difference between current result and previous one.", current.FormatServerName(), len(current.ScannedCves))
|
||||
}
|
||||
|
||||
for cveID, vuln := range new {
|
||||
for cveID, vuln := range newer {
|
||||
updated[cveID] = vuln
|
||||
}
|
||||
return updated
|
||||
@@ -672,11 +730,7 @@ func getMinusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
}
|
||||
|
||||
func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
cTypes := []models.CveContentType{
|
||||
models.Nvd,
|
||||
models.Jvn,
|
||||
models.NewCveContentType(current.Family),
|
||||
}
|
||||
cTypes := append([]models.CveContentType{models.Nvd, models.Jvn}, models.GetCveContentTypes(current.Family)...)
|
||||
|
||||
prevLastModifieds := map[models.CveContentType][]time.Time{}
|
||||
preVinfo, ok := previous.ScannedCves[cveID]
|
||||
|
||||
@@ -19,8 +19,8 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestIsCveInfoUpdated(t *testing.T) {
|
||||
f := "2006-01-02"
|
||||
old, _ := time.Parse(f, "2015-12-15")
|
||||
new, _ := time.Parse(f, "2015-12-16")
|
||||
base, _ := time.Parse(f, "2015-12-15")
|
||||
newer, _ := time.Parse(f, "2015-12-16")
|
||||
|
||||
type In struct {
|
||||
cveID string
|
||||
@@ -78,7 +78,7 @@ func TestIsCveInfoUpdated(t *testing.T) {
|
||||
models.CveContent{
|
||||
Type: models.Jvn,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
LastModified: base,
|
||||
},
|
||||
),
|
||||
},
|
||||
@@ -92,7 +92,7 @@ func TestIsCveInfoUpdated(t *testing.T) {
|
||||
models.CveContent{
|
||||
Type: models.Jvn,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
LastModified: base,
|
||||
},
|
||||
),
|
||||
},
|
||||
@@ -114,7 +114,7 @@ func TestIsCveInfoUpdated(t *testing.T) {
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: new,
|
||||
LastModified: newer,
|
||||
},
|
||||
),
|
||||
},
|
||||
@@ -129,7 +129,7 @@ func TestIsCveInfoUpdated(t *testing.T) {
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
LastModified: base,
|
||||
},
|
||||
),
|
||||
},
|
||||
@@ -151,7 +151,7 @@ func TestIsCveInfoUpdated(t *testing.T) {
|
||||
models.CveContent{
|
||||
Type: models.Nvd,
|
||||
CveID: "CVE-2017-0002",
|
||||
LastModified: old,
|
||||
LastModified: base,
|
||||
},
|
||||
),
|
||||
},
|
||||
|
||||
12
saas/saas.go
12
saas/saas.go
@@ -5,7 +5,7 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
@@ -47,6 +47,7 @@ func (w Writer) Write(rs ...models.ScanResult) error {
|
||||
if len(rs) == 0 {
|
||||
return nil
|
||||
}
|
||||
tags := strings.Split(os.Getenv("VULS_TAGS"), ",")
|
||||
|
||||
ipv4s, ipv6s, err := util.IP()
|
||||
if err != nil {
|
||||
@@ -88,7 +89,7 @@ func (w Writer) Write(rs ...models.ScanResult) error {
|
||||
return xerrors.Errorf("Failed to get Credential. Request JSON : %s,", string(body))
|
||||
}
|
||||
|
||||
t, err := ioutil.ReadAll(resp.Body)
|
||||
t, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -111,6 +112,13 @@ func (w Writer) Write(rs ...models.ScanResult) error {
|
||||
|
||||
svc := s3.New(sess)
|
||||
for _, r := range rs {
|
||||
if 0 < len(tags) {
|
||||
if r.Optional == nil {
|
||||
r.Optional = map[string]interface{}{}
|
||||
}
|
||||
r.Optional["VULS_TAGS"] = tags
|
||||
}
|
||||
|
||||
b, err := json.Marshal(r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to Marshal to JSON: %w", err)
|
||||
|
||||
12
saas/uuid.go
12
saas/uuid.go
@@ -3,7 +3,6 @@ package saas
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
@@ -104,6 +103,9 @@ func writeToFile(cnf config.Config, path string) error {
|
||||
if cnf.Default.WordPress != nil && cnf.Default.WordPress.IsZero() {
|
||||
cnf.Default.WordPress = nil
|
||||
}
|
||||
if cnf.Default.PortScan != nil && cnf.Default.PortScan.IsZero() {
|
||||
cnf.Default.PortScan = nil
|
||||
}
|
||||
|
||||
c := struct {
|
||||
Saas *config.SaasConf `toml:"saas"`
|
||||
@@ -139,7 +141,7 @@ func writeToFile(cnf config.Config, path string) error {
|
||||
"# See README for details: https://vuls.io/docs/en/usage-settings.html",
|
||||
str)
|
||||
|
||||
return ioutil.WriteFile(realPath, []byte(str), 0600)
|
||||
return os.WriteFile(realPath, []byte(str), 0600)
|
||||
}
|
||||
|
||||
func cleanForTOMLEncoding(server config.ServerInfo, def config.ServerInfo) config.ServerInfo {
|
||||
@@ -199,5 +201,11 @@ func cleanForTOMLEncoding(server config.ServerInfo, def config.ServerInfo) confi
|
||||
}
|
||||
}
|
||||
|
||||
if server.PortScan != nil {
|
||||
if server.PortScan.IsZero() || reflect.DeepEqual(server.PortScan, def.PortScan) {
|
||||
server.PortScan = nil
|
||||
}
|
||||
}
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
298
scanner/base.go
298
scanner/base.go
@@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@@ -16,8 +15,9 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/aquasecurity/fanal/analyzer"
|
||||
dio "github.com/aquasecurity/go-dep-parser/pkg/io"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/analyzer"
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
@@ -28,23 +28,27 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
// Import library scanner
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/dotnet/nuget"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/golang/binary"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/golang/mod"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/java/pom"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/nodejs/npm"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/nodejs/yarn"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/php/composer"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/pip"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/pipenv"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/poetry"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/ruby/bundler"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/rust/cargo"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/c/conan"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/deps"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/nuget"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/binary"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/mod"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/gradle"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/pom"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/npm"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/pnpm"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/yarn"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/php/composer"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/pip"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/pipenv"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/poetry"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/ruby/bundler"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/rust/cargo"
|
||||
|
||||
// _ "github.com/aquasecurity/fanal/analyzer/language/ruby/gemspec"
|
||||
// _ "github.com/aquasecurity/fanal/analyzer/language/nodejs/pkg"
|
||||
// _ "github.com/aquasecurity/fanal/analyzer/language/python/packaging"
|
||||
// _ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/ruby/gemspec"
|
||||
// _ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/pkg"
|
||||
// _ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/packaging"
|
||||
|
||||
nmap "github.com/Ullaakut/nmap/v2"
|
||||
)
|
||||
@@ -133,6 +137,10 @@ func (l *base) runningKernel() (release, version string, err error) {
|
||||
if 6 < len(ss) {
|
||||
version = ss[6]
|
||||
}
|
||||
if _, err := debver.NewVersion(version); err != nil {
|
||||
l.log.Warnf("kernel running version is invalid. skip kernel vulnerability detection. actual kernel version: %s, err: %s", version, err)
|
||||
version = ""
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -355,7 +363,6 @@ func (l *base) detectPlatform() {
|
||||
|
||||
//TODO Azure, GCP...
|
||||
l.setPlatform(models.Platform{Name: "other"})
|
||||
return
|
||||
}
|
||||
|
||||
var dsFingerPrintPrefix = "AgentStatus.agentCertHash: "
|
||||
@@ -392,10 +399,24 @@ func (l *base) detectRunningOnAws() (ok bool, instanceID string, err error) {
|
||||
r := l.exec(cmd, noSudo)
|
||||
if r.isSuccess() {
|
||||
id := strings.TrimSpace(r.Stdout)
|
||||
if !l.isAwsInstanceID(id) {
|
||||
return false, "", nil
|
||||
if l.isAwsInstanceID(id) {
|
||||
return true, id, nil
|
||||
}
|
||||
}
|
||||
|
||||
cmd = "curl -X PUT --max-time 1 --noproxy 169.254.169.254 -H \"X-aws-ec2-metadata-token-ttl-seconds: 300\" http://169.254.169.254/latest/api/token"
|
||||
r = l.exec(cmd, noSudo)
|
||||
if r.isSuccess() {
|
||||
token := strings.TrimSpace(r.Stdout)
|
||||
cmd = fmt.Sprintf("curl -H \"X-aws-ec2-metadata-token: %s\" --max-time 1 --noproxy 169.254.169.254 http://169.254.169.254/latest/meta-data/instance-id", token)
|
||||
r = l.exec(cmd, noSudo)
|
||||
if r.isSuccess() {
|
||||
id := strings.TrimSpace(r.Stdout)
|
||||
if !l.isAwsInstanceID(id) {
|
||||
return false, "", nil
|
||||
}
|
||||
return true, id, nil
|
||||
}
|
||||
return true, id, nil
|
||||
}
|
||||
|
||||
switch r.ExitStatus {
|
||||
@@ -562,11 +583,6 @@ func (l *base) parseSystemctlStatus(stdout string) string {
|
||||
return ss[1]
|
||||
}
|
||||
|
||||
type libFile struct {
|
||||
contents []byte
|
||||
filemode os.FileMode
|
||||
}
|
||||
|
||||
func (l *base) scanLibraries() (err error) {
|
||||
if len(l.LibraryScanners) != 0 {
|
||||
return nil
|
||||
@@ -577,22 +593,35 @@ func (l *base) scanLibraries() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.log.Info("Scanning Lockfile...")
|
||||
l.log.Info("Scanning Language-specific Packages...")
|
||||
|
||||
libFilemap := map[string]libFile{}
|
||||
found := map[string]bool{}
|
||||
detectFiles := l.ServerInfo.Lockfiles
|
||||
|
||||
priv := noSudo
|
||||
if l.getServerInfo().Mode.IsFastRoot() || l.getServerInfo().Mode.IsDeep() {
|
||||
priv = sudo
|
||||
}
|
||||
|
||||
// auto detect lockfile
|
||||
if l.ServerInfo.FindLock {
|
||||
findopt := ""
|
||||
for filename := range models.LibraryMap {
|
||||
findopt += fmt.Sprintf("-name %q -o ", "*"+filename)
|
||||
for _, filename := range models.FindLockFiles {
|
||||
findopt += fmt.Sprintf("-name %q -o ", filename)
|
||||
}
|
||||
|
||||
dir := "/"
|
||||
if len(l.ServerInfo.FindLockDirs) != 0 {
|
||||
dir = strings.Join(l.ServerInfo.FindLockDirs, " ")
|
||||
} else {
|
||||
l.log.Infof("It's recommended to specify FindLockDirs in config.toml. If FindLockDirs is not specified, all directories under / will be searched, which may increase CPU load")
|
||||
}
|
||||
l.log.Infof("Finding files under %s", dir)
|
||||
|
||||
// delete last "-o "
|
||||
// find / -name "*package-lock.json" -o -name "*yarn.lock" ... 2>&1 | grep -v "find: "
|
||||
cmd := fmt.Sprintf(`find / ` + findopt[:len(findopt)-3] + ` 2>&1 | grep -v "find: "`)
|
||||
r := exec(l.ServerInfo, cmd, noSudo)
|
||||
// find / -type f -and \( -name "package-lock.json" -o -name "yarn.lock" ... \) 2>&1 | grep -v "find: "
|
||||
cmd := fmt.Sprintf(`find %s -type f -and \( `+findopt[:len(findopt)-3]+` \) 2>&1 | grep -v "find: "`, dir)
|
||||
r := exec(l.ServerInfo, cmd, priv)
|
||||
if r.ExitStatus != 0 && r.ExitStatus != 1 {
|
||||
return xerrors.Errorf("Failed to find lock files")
|
||||
}
|
||||
@@ -609,115 +638,167 @@ func (l *base) scanLibraries() (err error) {
|
||||
}
|
||||
|
||||
// skip already exist
|
||||
if _, ok := libFilemap[path]; ok {
|
||||
if _, ok := found[path]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var f libFile
|
||||
var contents []byte
|
||||
var filemode os.FileMode
|
||||
|
||||
switch l.Distro.Family {
|
||||
case constant.ServerTypePseudo:
|
||||
fileinfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to get target file info. err: %w, filepath: %s", err, path)
|
||||
l.log.Warnf("Failed to get target file info. err: %s, filepath: %s", err, path)
|
||||
continue
|
||||
}
|
||||
f.filemode = fileinfo.Mode().Perm()
|
||||
f.contents, err = ioutil.ReadFile(path)
|
||||
filemode = fileinfo.Mode().Perm()
|
||||
contents, err = os.ReadFile(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to read target file contents. err: %w, filepath: %s", err, path)
|
||||
l.log.Warnf("Failed to read target file contents. err: %s, filepath: %s", err, path)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
l.log.Debugf("Analyzing file: %s", path)
|
||||
cmd := fmt.Sprintf(`stat -c "%%a" %s`, path)
|
||||
r := exec(l.ServerInfo, cmd, noSudo)
|
||||
r := exec(l.ServerInfo, cmd, priv, logging.NewIODiscardLogger())
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to get target file permission: %s, filepath: %s", r, path)
|
||||
l.log.Warnf("Failed to get target file permission: %s, filepath: %s", r, path)
|
||||
continue
|
||||
}
|
||||
permStr := fmt.Sprintf("0%s", strings.ReplaceAll(r.Stdout, "\n", ""))
|
||||
perm, err := strconv.ParseUint(permStr, 8, 32)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse permission string. err: %w, permission string: %s", err, permStr)
|
||||
l.log.Warnf("Failed to parse permission string. err: %s, permission string: %s", err, permStr)
|
||||
continue
|
||||
}
|
||||
f.filemode = os.FileMode(perm)
|
||||
filemode = os.FileMode(perm)
|
||||
|
||||
cmd = fmt.Sprintf("cat %s", path)
|
||||
r = exec(l.ServerInfo, cmd, noSudo)
|
||||
r = exec(l.ServerInfo, cmd, priv, logging.NewIODiscardLogger())
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to get target file contents: %s, filepath: %s", r, path)
|
||||
l.log.Warnf("Failed to get target file contents: %s, filepath: %s", r, path)
|
||||
continue
|
||||
}
|
||||
f.contents = []byte(r.Stdout)
|
||||
contents = []byte(r.Stdout)
|
||||
}
|
||||
libFilemap[path] = f
|
||||
found[path] = true
|
||||
var libraryScanners []models.LibraryScanner
|
||||
if libraryScanners, err = AnalyzeLibrary(context.Background(), path, contents, filemode, l.ServerInfo.Mode.IsOffline()); err != nil {
|
||||
return err
|
||||
}
|
||||
l.LibraryScanners = append(l.LibraryScanners, libraryScanners...)
|
||||
}
|
||||
|
||||
var libraryScanners []models.LibraryScanner
|
||||
if libraryScanners, err = AnalyzeLibraries(context.Background(), libFilemap, l.ServerInfo.Mode.IsOffline()); err != nil {
|
||||
return err
|
||||
}
|
||||
l.LibraryScanners = append(l.LibraryScanners, libraryScanners...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AnalyzeLibraries : detects libs defined in lockfile
|
||||
func AnalyzeLibraries(ctx context.Context, libFilemap map[string]libFile, isOffline bool) (libraryScanners []models.LibraryScanner, err error) {
|
||||
disabledAnalyzers := []analyzer.Type{
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeAlma,
|
||||
analyzer.TypeAmazon,
|
||||
analyzer.TypeDebian,
|
||||
analyzer.TypePhoton,
|
||||
analyzer.TypeCentOS,
|
||||
analyzer.TypeFedora,
|
||||
analyzer.TypeOracle,
|
||||
analyzer.TypeRedHatBase,
|
||||
analyzer.TypeRocky,
|
||||
analyzer.TypeSUSE,
|
||||
analyzer.TypeUbuntu,
|
||||
analyzer.TypeApk,
|
||||
analyzer.TypeDpkg,
|
||||
analyzer.TypeRpm,
|
||||
analyzer.TypeApkCommand,
|
||||
analyzer.TypeYaml,
|
||||
analyzer.TypeTOML,
|
||||
analyzer.TypeJSON,
|
||||
analyzer.TypeDockerfile,
|
||||
analyzer.TypeHCL,
|
||||
// AnalyzeLibrary : detects library defined in artifact such as lockfile or jar
|
||||
func AnalyzeLibrary(ctx context.Context, path string, contents []byte, filemode os.FileMode, isOffline bool) (libraryScanners []models.LibraryScanner, err error) {
|
||||
anal, err := analyzer.NewAnalyzerGroup(analyzer.AnalyzerOptions{
|
||||
Group: analyzer.GroupBuiltin,
|
||||
DisabledAnalyzers: disabledAnalyzers,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to new analyzer group. err: %w", err)
|
||||
}
|
||||
anal := analyzer.NewAnalyzer(disabledAnalyzers)
|
||||
|
||||
for path, f := range libFilemap {
|
||||
var wg sync.WaitGroup
|
||||
result := new(analyzer.AnalysisResult)
|
||||
if err := anal.AnalyzeFile(
|
||||
ctx,
|
||||
&wg,
|
||||
semaphore.NewWeighted(1),
|
||||
result,
|
||||
"",
|
||||
path,
|
||||
&DummyFileInfo{size: int64(len(f.contents)), filemode: f.filemode},
|
||||
func() (dio.ReadSeekCloserAt, error) { return dio.NopCloser(bytes.NewReader(f.contents)), nil },
|
||||
analyzer.AnalysisOptions{Offline: isOffline},
|
||||
); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get libs. err: %w", err)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
libscan, err := convertLibWithScanner(result.Applications)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to convert libs. err: %w", err)
|
||||
}
|
||||
libraryScanners = append(libraryScanners, libscan...)
|
||||
var wg sync.WaitGroup
|
||||
result := new(analyzer.AnalysisResult)
|
||||
if err := anal.AnalyzeFile(
|
||||
ctx,
|
||||
&wg,
|
||||
semaphore.NewWeighted(1),
|
||||
result,
|
||||
"",
|
||||
path,
|
||||
&DummyFileInfo{name: filepath.Base(path), size: int64(len(contents)), filemode: filemode},
|
||||
func() (dio.ReadSeekCloserAt, error) { return dio.NopCloser(bytes.NewReader(contents)), nil },
|
||||
nil,
|
||||
analyzer.AnalysisOptions{Offline: isOffline},
|
||||
); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get libs. err: %w", err)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
libscan, err := convertLibWithScanner(result.Applications)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to convert libs. err: %w", err)
|
||||
}
|
||||
libraryScanners = append(libraryScanners, libscan...)
|
||||
return libraryScanners, nil
|
||||
}
|
||||
|
||||
// https://github.com/aquasecurity/trivy/blob/84677903a6fa1b707a32d0e8b2bffc23dde52afa/pkg/fanal/analyzer/const.go
|
||||
var disabledAnalyzers = []analyzer.Type{
|
||||
// ======
|
||||
// OS
|
||||
// ======
|
||||
analyzer.TypeOSRelease,
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeAmazon,
|
||||
analyzer.TypeCBLMariner,
|
||||
analyzer.TypeDebian,
|
||||
analyzer.TypePhoton,
|
||||
analyzer.TypeCentOS,
|
||||
analyzer.TypeRocky,
|
||||
analyzer.TypeAlma,
|
||||
analyzer.TypeFedora,
|
||||
analyzer.TypeOracle,
|
||||
analyzer.TypeRedHatBase,
|
||||
analyzer.TypeSUSE,
|
||||
analyzer.TypeUbuntu,
|
||||
|
||||
// OS Package
|
||||
analyzer.TypeApk,
|
||||
analyzer.TypeDpkg,
|
||||
analyzer.TypeDpkgLicense,
|
||||
analyzer.TypeRpm,
|
||||
analyzer.TypeRpmqa,
|
||||
|
||||
// OS Package Repository
|
||||
analyzer.TypeApkRepo,
|
||||
|
||||
// ============
|
||||
// Image Config
|
||||
// ============
|
||||
analyzer.TypeApkCommand,
|
||||
|
||||
// =================
|
||||
// Structured Config
|
||||
// =================
|
||||
analyzer.TypeYaml,
|
||||
analyzer.TypeJSON,
|
||||
analyzer.TypeDockerfile,
|
||||
analyzer.TypeTerraform,
|
||||
analyzer.TypeCloudFormation,
|
||||
analyzer.TypeHelm,
|
||||
|
||||
// ========
|
||||
// License
|
||||
// ========
|
||||
analyzer.TypeLicenseFile,
|
||||
|
||||
// ========
|
||||
// Secrets
|
||||
// ========
|
||||
analyzer.TypeSecret,
|
||||
|
||||
// =======
|
||||
// Red Hat
|
||||
// =======
|
||||
analyzer.TypeRedHatContentManifestType,
|
||||
analyzer.TypeRedHatDockerfileType,
|
||||
}
|
||||
|
||||
// DummyFileInfo is a dummy struct for libscan
|
||||
type DummyFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
filemode os.FileMode
|
||||
}
|
||||
|
||||
// Name is
|
||||
func (d *DummyFileInfo) Name() string { return "dummy" }
|
||||
func (d *DummyFileInfo) Name() string { return d.name }
|
||||
|
||||
// Size is
|
||||
func (d *DummyFileInfo) Size() int64 { return d.size }
|
||||
@@ -725,13 +806,13 @@ func (d *DummyFileInfo) Size() int64 { return d.size }
|
||||
// Mode is
|
||||
func (d *DummyFileInfo) Mode() os.FileMode { return d.filemode }
|
||||
|
||||
//ModTime is
|
||||
// ModTime is
|
||||
func (d *DummyFileInfo) ModTime() time.Time { return time.Now() }
|
||||
|
||||
// IsDir is
|
||||
func (d *DummyFileInfo) IsDir() bool { return false }
|
||||
|
||||
//Sys is
|
||||
// Sys is
|
||||
func (d *DummyFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
func (l *base) scanWordPress() error {
|
||||
@@ -739,9 +820,10 @@ func (l *base) scanWordPress() error {
|
||||
return nil
|
||||
}
|
||||
l.log.Info("Scanning WordPress...")
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s cli version --allow-root",
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s core version --path=%s --allow-root",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath)
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
if r := exec(l.ServerInfo, cmd, noSudo); !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to exec `%s`. Check the OS user, command path of wp-cli, DocRoot and permission: %#v", cmd, l.ServerInfo.WordPress)
|
||||
}
|
||||
@@ -783,7 +865,7 @@ func (l *base) detectWordPress() (*models.WordPressPackages, error) {
|
||||
}
|
||||
|
||||
func (l *base) detectWpCore() (string, error) {
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s core version --path=%s --allow-root",
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s core version --path=%s --allow-root 2>/dev/null",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
|
||||
@@ -4,18 +4,21 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/dotnet/nuget"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/golang/binary"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/golang/mod"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/nodejs/npm"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/nodejs/yarn"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/php/composer"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/pip"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/pipenv"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/python/poetry"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/ruby/bundler"
|
||||
_ "github.com/aquasecurity/fanal/analyzer/language/rust/cargo"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/deps"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/nuget"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/binary"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/mod"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/pom"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/npm"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/pnpm"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/yarn"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/php/composer"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/pip"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/pipenv"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/python/poetry"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/ruby/bundler"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/rust/cargo"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
@@ -48,8 +48,7 @@ func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
return false, nil, nil
|
||||
}
|
||||
if r.ExitStatus == 255 {
|
||||
deb := newDebian(c) // Panic occur when return value 2 is nil and 3 is non-nil
|
||||
return false, deb, xerrors.Errorf("Unable to connect via SSH. Scan with -vvv option to print SSH debugging messages and check SSH settings. If you have never SSH to the host to be scanned, SSH to the host before scanning in order to add the HostKey. %s@%s port: %s\n%s", c.User, c.Host, c.Port, r)
|
||||
return false, &unknown{base{ServerInfo: c}}, xerrors.Errorf("Unable to connect via SSH. Scan with -vvv option to print SSH debugging messages and check SSH settings.\n%s", r)
|
||||
}
|
||||
logging.Log.Debugf("Not Debian like Linux. %s", r)
|
||||
return false, nil, nil
|
||||
@@ -1156,7 +1155,7 @@ func (o *debian) checkrestart() error {
|
||||
o.Packages[p.Name] = pack
|
||||
|
||||
for j, proc := range p.NeedRestartProcs {
|
||||
if proc.HasInit == false {
|
||||
if !proc.HasInit {
|
||||
continue
|
||||
}
|
||||
packs[i].NeedRestartProcs[j].InitSystem = initName
|
||||
|
||||
@@ -62,7 +62,7 @@ const sudo = true
|
||||
// noSudo is Const value for normal user mode
|
||||
const noSudo = false
|
||||
|
||||
// Issue commands to the target servers in parallel via SSH or local execution. If execution fails, the server will be excluded from the target server list(servers) and added to the error server list(errServers).
|
||||
// Issue commands to the target servers in parallel via SSH or local execution. If execution fails, the server will be excluded from the target server list(servers) and added to the error server list(errServers).
|
||||
func parallelExec(fn func(osTypeInterface) error, timeoutSec ...int) {
|
||||
resChan := make(chan osTypeInterface, len(servers))
|
||||
defer close(resChan)
|
||||
@@ -128,7 +128,6 @@ func parallelExec(fn func(osTypeInterface) error, timeoutSec ...int) {
|
||||
}
|
||||
}
|
||||
servers = successes
|
||||
return
|
||||
}
|
||||
|
||||
func exec(c config.ServerInfo, cmd string, sudo bool, log ...logging.Logger) (result execResult) {
|
||||
@@ -186,10 +185,10 @@ func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execRes
|
||||
return execResult{Error: err}
|
||||
}
|
||||
|
||||
defaultSSHArgs := []string{"-tt"}
|
||||
args := []string{"-tt"}
|
||||
|
||||
if 0 < len(c.SSHConfigPath) {
|
||||
defaultSSHArgs = append(defaultSSHArgs, "-F", c.SSHConfigPath)
|
||||
if c.SSHConfigPath != "" {
|
||||
args = append(args, "-F", c.SSHConfigPath)
|
||||
} else {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
@@ -200,7 +199,7 @@ func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execRes
|
||||
}
|
||||
controlPath := filepath.Join(home, ".vuls", `controlmaster-%r-`+c.ServerName+`.%p`)
|
||||
|
||||
defaultSSHArgs = append(defaultSSHArgs,
|
||||
args = append(args,
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "LogLevel=quiet",
|
||||
"-o", "ConnectionAttempts=3",
|
||||
@@ -212,19 +211,22 @@ func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execRes
|
||||
}
|
||||
|
||||
if config.Conf.Vvv {
|
||||
defaultSSHArgs = append(defaultSSHArgs, "-vvv")
|
||||
args = append(args, "-vvv")
|
||||
}
|
||||
|
||||
if len(c.JumpServer) != 0 {
|
||||
defaultSSHArgs = append(defaultSSHArgs, "-J", strings.Join(c.JumpServer, ","))
|
||||
args = append(args, "-J", strings.Join(c.JumpServer, ","))
|
||||
}
|
||||
|
||||
args := append(defaultSSHArgs, fmt.Sprintf("%s@%s", c.User, c.Host))
|
||||
args = append(args, "-p", c.Port)
|
||||
if 0 < len(c.KeyPath) {
|
||||
if c.User != "" {
|
||||
args = append(args, "-l", c.User)
|
||||
}
|
||||
if c.Port != "" {
|
||||
args = append(args, "-p", c.Port)
|
||||
}
|
||||
if c.KeyPath != "" {
|
||||
args = append(args, "-i", c.KeyPath)
|
||||
args = append(args, "-o", "PasswordAuthentication=no")
|
||||
}
|
||||
args = append(args, c.Host)
|
||||
|
||||
cmd = decorateCmd(c, cmd, sudo)
|
||||
cmd = fmt.Sprintf("stty cols 1000; %s", cmd)
|
||||
|
||||
116
scanner/fedora.go
Normal file
116
scanner/fedora.go
Normal file
@@ -0,0 +1,116 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// inherit OsTypeInterface
|
||||
type fedora struct {
|
||||
redhatBase
|
||||
}
|
||||
|
||||
// NewFedora is constructor
|
||||
func newFedora(c config.ServerInfo) *fedora {
|
||||
r := &fedora{
|
||||
redhatBase{
|
||||
base: base{
|
||||
osPackages: osPackages{
|
||||
Packages: models.Packages{},
|
||||
VulnInfos: models.VulnInfos{},
|
||||
},
|
||||
},
|
||||
sudo: rootPrivFedora{},
|
||||
},
|
||||
}
|
||||
r.log = logging.NewNormalLogger()
|
||||
r.setServerInfo(c)
|
||||
return r
|
||||
}
|
||||
|
||||
func (o *fedora) checkScanMode() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *fedora) checkDeps() error {
|
||||
if o.getServerInfo().Mode.IsFast() {
|
||||
return o.execCheckDeps(o.depsFast())
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() {
|
||||
return o.execCheckDeps(o.depsFastRoot())
|
||||
} else {
|
||||
return o.execCheckDeps(o.depsDeep())
|
||||
}
|
||||
}
|
||||
|
||||
func (o *fedora) depsFast() []string {
|
||||
if o.getServerInfo().Mode.IsOffline() {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// repoquery
|
||||
return []string{"dnf-utils"}
|
||||
}
|
||||
|
||||
func (o *fedora) depsFastRoot() []string {
|
||||
if o.getServerInfo().Mode.IsOffline() {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// repoquery
|
||||
return []string{"dnf-utils"}
|
||||
}
|
||||
|
||||
func (o *fedora) depsDeep() []string {
|
||||
return o.depsFastRoot()
|
||||
}
|
||||
|
||||
func (o *fedora) checkIfSudoNoPasswd() error {
|
||||
if o.getServerInfo().Mode.IsFast() {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsFast())
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsFastRoot())
|
||||
} else {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsDeep())
|
||||
}
|
||||
}
|
||||
|
||||
func (o *fedora) sudoNoPasswdCmdsFast() []cmd {
|
||||
return []cmd{}
|
||||
}
|
||||
|
||||
func (o *fedora) sudoNoPasswdCmdsFastRoot() []cmd {
|
||||
if !o.ServerInfo.IsContainer() {
|
||||
return []cmd{
|
||||
{"repoquery -h", exitStatusZero},
|
||||
{"needs-restarting", exitStatusZero},
|
||||
{"which which", exitStatusZero},
|
||||
{"stat /proc/1/exe", exitStatusZero},
|
||||
{"ls -l /proc/1/exe", exitStatusZero},
|
||||
{"cat /proc/1/maps", exitStatusZero},
|
||||
{"lsof -i -P -n", exitStatusZero},
|
||||
}
|
||||
}
|
||||
return []cmd{
|
||||
{"repoquery -h", exitStatusZero},
|
||||
{"needs-restarting", exitStatusZero},
|
||||
}
|
||||
}
|
||||
|
||||
func (o *fedora) sudoNoPasswdCmdsDeep() []cmd {
|
||||
return o.sudoNoPasswdCmdsFastRoot()
|
||||
}
|
||||
|
||||
type rootPrivFedora struct{}
|
||||
|
||||
func (o rootPrivFedora) repoquery() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (o rootPrivFedora) yumMakeCache() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (o rootPrivFedora) yumPS() bool {
|
||||
return false
|
||||
}
|
||||
@@ -34,7 +34,7 @@ func newBsd(c config.ServerInfo) *bsd {
|
||||
return d
|
||||
}
|
||||
|
||||
//https://github.com/mizzy/specinfra/blob/master/lib/specinfra/helper/detect_os/freebsd.rb
|
||||
// https://github.com/mizzy/specinfra/blob/master/lib/specinfra/helper/detect_os/freebsd.rb
|
||||
func detectFreebsd(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
// Prevent from adding `set -o pipefail` option
|
||||
c.Distro = config.Distro{Family: constant.FreeBSD}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"github.com/aquasecurity/fanal/types"
|
||||
"github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
@@ -16,96 +17,173 @@ import (
|
||||
ver "github.com/knqyf263/go-rpm-version"
|
||||
)
|
||||
|
||||
var releasePattern = regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
|
||||
// https://github.com/serverspec/specinfra/blob/master/lib/specinfra/helper/detect_os/redhat.rb
|
||||
func detectRedhat(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
if r := exec(c, "ls /etc/fedora-release", noSudo); r.isSuccess() {
|
||||
logging.Log.Warnf("Fedora not tested yet: %s", r)
|
||||
return true, &unknown{}
|
||||
if r := exec(c, "cat /etc/fedora-release", noSudo); r.isSuccess() {
|
||||
fed := newFedora(c)
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to parse /etc/fedora-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, fed
|
||||
}
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, fed
|
||||
}
|
||||
if major < 32 {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to init Fedora. err: not supported major version. versions prior to Fedora 32 are not supported, detected version is %s", release)})
|
||||
return true, fed
|
||||
}
|
||||
fed.setDistro(constant.Fedora, release)
|
||||
return true, fed
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/oracle-release", noSudo); r.isSuccess() {
|
||||
// Need to discover Oracle Linux first, because it provides an
|
||||
// /etc/redhat-release that matches the upstream distribution
|
||||
if r := exec(c, "cat /etc/oracle-release", noSudo); r.isSuccess() {
|
||||
re := regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
logging.Log.Warnf("Failed to parse Oracle Linux version: %s", r)
|
||||
return true, newOracle(c)
|
||||
}
|
||||
|
||||
ora := newOracle(c)
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
ora.setErrs([]error{xerrors.Errorf("Failed to parse /etc/oracle-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, ora
|
||||
}
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
ora.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, ora
|
||||
}
|
||||
if major < 5 {
|
||||
ora.setErrs([]error{xerrors.Errorf("Failed to init Oracle Linux. err: not supported major version. versions prior to Oracle Linux 5 are not supported, detected version is %s", release)})
|
||||
return true, ora
|
||||
}
|
||||
ora.setDistro(constant.Oracle, release)
|
||||
return true, ora
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/almalinux-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/almalinux-release", noSudo); r.isSuccess() {
|
||||
alma := newAlma(c)
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to parse /etc/almalinux-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, alma
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
if major < 8 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to init AlmaLinux. err: not supported major version. versions prior to AlmaLinux 8 are not supported, detected version is %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "alma", "almalinux":
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
default:
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to parse AlmaLinux Name. release: %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/rocky-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/rocky-release", noSudo); r.isSuccess() {
|
||||
rocky := newRocky(c)
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to parse /etc/rocky-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, rocky
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
if major < 8 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to init Rocky Linux. err: not supported major version. versions prior to Rocky Linux 8 are not supported, detected version is %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "rocky", "rocky linux":
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to parse Rocky Linux Name. release: %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://bugzilla.redhat.com/show_bug.cgi?id=1332025
|
||||
// CentOS cloud image
|
||||
if r := exec(c, "ls /etc/centos-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/centos-release", noSudo); r.isSuccess() {
|
||||
re := regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
logging.Log.Warnf("Failed to parse CentOS version: %s", r)
|
||||
return true, newCentOS(c)
|
||||
cent := newCentOS(c)
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to parse /etc/centos-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, cent
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "centos", "centos linux", "centos stream":
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
cent := newCentOS(c)
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "centos", "centos linux":
|
||||
cent := newCentOS(c)
|
||||
if major < 5 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS. err: not supported major version. versions prior to CentOS 5 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, release)
|
||||
return true, cent
|
||||
case "centos stream":
|
||||
cent := newCentOS(c)
|
||||
if major < 8 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS Stream. err: not supported major version. versions prior to CentOS Stream 8 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, fmt.Sprintf("stream%s", release))
|
||||
return true, cent
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
if major < 8 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to init AlmaLinux. err: not supported major version. versions prior to AlmaLinux 8 are not supported, detected version is %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
default:
|
||||
logging.Log.Warnf("Failed to parse CentOS: %s", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/almalinux-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/almalinux-release", noSudo); r.isSuccess() {
|
||||
re := regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
logging.Log.Warnf("Failed to parse Alma version: %s", r)
|
||||
return true, newAlma(c)
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
default:
|
||||
logging.Log.Warnf("Failed to parse Alma: %s", r)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/rocky-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/rocky-release", noSudo); r.isSuccess() {
|
||||
re := regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
logging.Log.Warnf("Failed to parse Rocky version: %s", r)
|
||||
return true, newRocky(c)
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "rocky", "rocky linux":
|
||||
rocky := newRocky(c)
|
||||
if major < 8 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to init Rocky Linux. err: not supported major version. versions prior to Rocky Linux 8 are not supported, detected version is %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
logging.Log.Warnf("Failed to parse Rocky: %s", r)
|
||||
cent := newCentOS(c)
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to parse CentOS Name. release: %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -114,34 +192,72 @@ func detectRedhat(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
// https://www.rackaid.com/blog/how-to-determine-centos-or-red-hat-version/
|
||||
// e.g.
|
||||
// $ cat /etc/redhat-release
|
||||
// Red Hat Enterprise Linux Server release 6.8 (Santiago)
|
||||
// CentOS release 6.5 (Final)
|
||||
// CentOS Stream release 8
|
||||
// AlmaLinux release 8.5 (Arctic Sphynx)
|
||||
// Rocky Linux release 8.5 (Green Obsidian)
|
||||
// Fedora release 35 (Thirty Five)
|
||||
if r := exec(c, "cat /etc/redhat-release", noSudo); r.isSuccess() {
|
||||
re := regexp.MustCompile(`(.*) release (\d[\d\.]*)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
logging.Log.Warnf("Failed to parse RedHat/CentOS version: %s", r)
|
||||
return true, newCentOS(c)
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "centos", "centos linux", "centos stream":
|
||||
cent := newCentOS(c)
|
||||
cent.setDistro(constant.CentOS, release)
|
||||
return true, cent
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
case "rocky", "rocky linux":
|
||||
rocky := newRocky(c)
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
// RHEL
|
||||
rhel := newRHEL(c)
|
||||
rhel.setDistro(constant.RedHat, release)
|
||||
return true, rhel
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) == 3 {
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
rhel := newRHEL(c)
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, rhel
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "fedora":
|
||||
fed := newFedora(c)
|
||||
if major < 32 {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to init Fedora. err: not supported major version. versions prior to Fedora 32 are not supported, detected version is %s", release)})
|
||||
return true, fed
|
||||
}
|
||||
fed.setDistro(constant.Fedora, release)
|
||||
return true, fed
|
||||
case "centos", "centos linux":
|
||||
cent := newCentOS(c)
|
||||
if major < 5 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS. err: not supported major version. versions prior to CentOS 5 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, release)
|
||||
return true, cent
|
||||
case "centos stream":
|
||||
cent := newCentOS(c)
|
||||
if major < 8 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS Stream. err: not supported major version. versions prior to CentOS Stream 8 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, fmt.Sprintf("stream%s", release))
|
||||
return true, cent
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
if major < 8 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to init AlmaLinux. err: not supported major version. versions prior to AlmaLinux 8 are not supported, detected version is %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
case "rocky", "rocky linux":
|
||||
rocky := newRocky(c)
|
||||
if major < 8 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to init Rocky Linux. err: not supported major version. versions prior to Rocky Linux 8 are not supported, detected version is %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
rhel := newRHEL(c)
|
||||
if major < 5 {
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to init RedHat Enterprise Linux. err: not supported major version. versions prior to RedHat Enterprise Linux 5 are not supported, detected version is %s", release)})
|
||||
return true, rhel
|
||||
}
|
||||
rhel.setDistro(constant.RedHat, release)
|
||||
return true, rhel
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -328,13 +444,28 @@ func (o *redhatBase) scanInstalledPackages() (models.Packages, error) {
|
||||
Version: version,
|
||||
}
|
||||
|
||||
r := o.exec(o.rpmQa(), noSudo)
|
||||
var r execResult
|
||||
switch o.getDistro().Family {
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(o.getDistro().Release)[0] {
|
||||
case "2":
|
||||
if o.exec("rpm -q yum-utils", noSudo).isSuccess() {
|
||||
r = o.exec("repoquery --all --pkgnarrow=installed --qf='%{NAME} %{EPOCH} %{VERSION} %{RELEASE} %{ARCH} %{UI_FROM_REPO}'", o.sudo.repoquery())
|
||||
} else {
|
||||
r = o.exec(o.rpmQa(), noSudo)
|
||||
}
|
||||
default:
|
||||
r = o.exec(o.rpmQa(), noSudo)
|
||||
}
|
||||
default:
|
||||
r = o.exec(o.rpmQa(), noSudo)
|
||||
}
|
||||
if !r.isSuccess() {
|
||||
return nil, xerrors.Errorf("Scan packages failed: %s", r)
|
||||
}
|
||||
installed, _, err := o.parseInstalledPackages(r.Stdout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, xerrors.Errorf("Failed to parse installed packages. err: %w", err)
|
||||
}
|
||||
return installed, nil
|
||||
}
|
||||
@@ -349,7 +480,29 @@ func (o *redhatBase) parseInstalledPackages(stdout string) (models.Packages, mod
|
||||
if trimmed := strings.TrimSpace(line); trimmed == "" {
|
||||
continue
|
||||
}
|
||||
pack, err := o.parseInstalledPackagesLine(line)
|
||||
|
||||
var (
|
||||
pack *models.Package
|
||||
err error
|
||||
)
|
||||
switch o.getDistro().Family {
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(o.getDistro().Release)[0] {
|
||||
case "2":
|
||||
switch len(strings.Fields(line)) {
|
||||
case 5:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
case 6:
|
||||
pack, err = o.parseInstalledPackagesLineFromRepoquery(line)
|
||||
default:
|
||||
return nil, nil, xerrors.Errorf("Failed to parse package line: %s", line)
|
||||
}
|
||||
default:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
}
|
||||
default:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -402,6 +555,34 @@ func (o *redhatBase) parseInstalledPackagesLine(line string) (*models.Package, e
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *redhatBase) parseInstalledPackagesLineFromRepoquery(line string) (*models.Package, error) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 6 {
|
||||
return nil, xerrors.Errorf("Failed to parse package line: %s", line)
|
||||
}
|
||||
|
||||
ver := ""
|
||||
epoch := fields[1]
|
||||
if epoch == "0" || epoch == "(none)" {
|
||||
ver = fields[2]
|
||||
} else {
|
||||
ver = fmt.Sprintf("%s:%s", epoch, fields[2])
|
||||
}
|
||||
|
||||
repo := strings.TrimPrefix(fields[5], "@")
|
||||
if repo == "installed" {
|
||||
repo = "amzn2-core"
|
||||
}
|
||||
|
||||
return &models.Package{
|
||||
Name: fields[0],
|
||||
Version: ver,
|
||||
Release: fields[3],
|
||||
Arch: fields[4],
|
||||
Repository: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *redhatBase) parseRpmQfLine(line string) (pkg *models.Package, ignored bool, err error) {
|
||||
for _, suffix := range []string{
|
||||
"Permission denied",
|
||||
@@ -494,12 +675,7 @@ func (o *redhatBase) parseUpdatablePacksLine(line string) (models.Package, error
|
||||
|
||||
func (o *redhatBase) isExecYumPS() bool {
|
||||
switch o.Distro.Family {
|
||||
case constant.Oracle,
|
||||
constant.OpenSUSE,
|
||||
constant.OpenSUSELeap,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.SUSEEnterpriseDesktop,
|
||||
constant.SUSEOpenstackCloud:
|
||||
case constant.Oracle:
|
||||
return false
|
||||
}
|
||||
return !o.getServerInfo().Mode.IsFast()
|
||||
@@ -507,21 +683,33 @@ func (o *redhatBase) isExecYumPS() bool {
|
||||
|
||||
func (o *redhatBase) isExecNeedsRestarting() bool {
|
||||
switch o.Distro.Family {
|
||||
case constant.OpenSUSE,
|
||||
constant.OpenSUSELeap,
|
||||
constant.SUSEEnterpriseServer,
|
||||
constant.SUSEEnterpriseDesktop,
|
||||
constant.SUSEOpenstackCloud:
|
||||
// TODO zypper ps
|
||||
// https://github.com/future-architect/vuls/issues/696
|
||||
case constant.OpenSUSE, constant.OpenSUSELeap, constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
if o.getServerInfo().Mode.IsOffline() {
|
||||
return false
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() || o.getServerInfo().Mode.IsDeep() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case constant.RedHat, constant.CentOS, constant.Rocky, constant.Oracle:
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky, constant.Oracle:
|
||||
majorVersion, err := o.Distro.MajorVersion()
|
||||
if err != nil || majorVersion < 6 {
|
||||
o.log.Errorf("Not implemented yet: %s, err: %+v", o.Distro, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if o.getServerInfo().Mode.IsOffline() {
|
||||
return false
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() || o.getServerInfo().Mode.IsDeep() {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
case constant.Fedora:
|
||||
majorVersion, err := o.Distro.MajorVersion()
|
||||
if err != nil || majorVersion < 13 {
|
||||
o.log.Errorf("Not implemented yet: %s, err: %+v", o.Distro, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if o.getServerInfo().Mode.IsOffline() {
|
||||
return false
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() ||
|
||||
@@ -531,10 +719,7 @@ func (o *redhatBase) isExecNeedsRestarting() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
if o.getServerInfo().Mode.IsFast() {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
return !o.getServerInfo().Mode.IsFast()
|
||||
}
|
||||
|
||||
func (o *redhatBase) needsRestarting() error {
|
||||
@@ -616,7 +801,7 @@ func (o *redhatBase) parseNeedsRestarting(stdout string) (procs []models.NeedRes
|
||||
return
|
||||
}
|
||||
|
||||
//TODO refactor
|
||||
// TODO refactor
|
||||
// procPathToFQPN returns Fully-Qualified-Package-Name from the command
|
||||
func (o *redhatBase) procPathToFQPN(execCommand string) (string, error) {
|
||||
execCommand = strings.Replace(execCommand, "\x00", " ", -1) // for CentOS6.9
|
||||
@@ -660,41 +845,55 @@ func (o *redhatBase) getOwnerPkgs(paths []string) (names []string, _ error) {
|
||||
|
||||
func (o *redhatBase) rpmQa() string {
|
||||
const old = `rpm -qa --queryformat "%{NAME} %{EPOCH} %{VERSION} %{RELEASE} %{ARCH}\n"`
|
||||
const new = `rpm -qa --queryformat "%{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE} %{ARCH}\n"`
|
||||
const newer = `rpm -qa --queryformat "%{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE} %{ARCH}\n"`
|
||||
switch o.Distro.Family {
|
||||
case constant.SUSEEnterpriseServer:
|
||||
case constant.OpenSUSE:
|
||||
if o.Distro.Release == "tumbleweed" {
|
||||
return newer
|
||||
}
|
||||
return old
|
||||
case constant.OpenSUSELeap:
|
||||
return newer
|
||||
case constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
if v, _ := o.Distro.MajorVersion(); v < 12 {
|
||||
return old
|
||||
}
|
||||
return new
|
||||
return newer
|
||||
default:
|
||||
if v, _ := o.Distro.MajorVersion(); v < 6 {
|
||||
return old
|
||||
}
|
||||
return new
|
||||
return newer
|
||||
}
|
||||
}
|
||||
|
||||
func (o *redhatBase) rpmQf() string {
|
||||
const old = `rpm -qf --queryformat "%{NAME} %{EPOCH} %{VERSION} %{RELEASE} %{ARCH}\n" `
|
||||
const new = `rpm -qf --queryformat "%{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE} %{ARCH}\n" `
|
||||
const newer = `rpm -qf --queryformat "%{NAME} %{EPOCHNUM} %{VERSION} %{RELEASE} %{ARCH}\n" `
|
||||
switch o.Distro.Family {
|
||||
case constant.SUSEEnterpriseServer:
|
||||
case constant.OpenSUSE:
|
||||
if o.Distro.Release == "tumbleweed" {
|
||||
return newer
|
||||
}
|
||||
return old
|
||||
case constant.OpenSUSELeap:
|
||||
return newer
|
||||
case constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
if v, _ := o.Distro.MajorVersion(); v < 12 {
|
||||
return old
|
||||
}
|
||||
return new
|
||||
return newer
|
||||
default:
|
||||
if v, _ := o.Distro.MajorVersion(); v < 6 {
|
||||
return old
|
||||
}
|
||||
return new
|
||||
return newer
|
||||
}
|
||||
}
|
||||
|
||||
func (o *redhatBase) detectEnabledDnfModules() ([]string, error) {
|
||||
switch o.Distro.Family {
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky:
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky, constant.Fedora:
|
||||
//TODO OracleLinux
|
||||
default:
|
||||
return nil, nil
|
||||
|
||||
@@ -17,10 +17,10 @@ import (
|
||||
|
||||
func TestParseInstalledPackagesLinesRedhat(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
r.Distro = config.Distro{Family: constant.RedHat}
|
||||
|
||||
var packagetests = []struct {
|
||||
in string
|
||||
distro config.Distro
|
||||
kernel models.Kernel
|
||||
packages models.Packages
|
||||
}{
|
||||
@@ -30,6 +30,7 @@ Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64
|
||||
kernel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
distro: config.Distro{Family: constant.RedHat},
|
||||
kernel: models.Kernel{},
|
||||
packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
@@ -58,6 +59,7 @@ kernel 0 2.6.32 695.20.3.el6 x86_64
|
||||
kernel-devel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel-devel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel-devel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
distro: config.Distro{Family: constant.RedHat},
|
||||
kernel: models.Kernel{Release: "2.6.32-696.20.3.el6.x86_64"},
|
||||
packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
@@ -91,6 +93,7 @@ kernel 0 2.6.32 695.20.3.el6 x86_64
|
||||
kernel-devel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel-devel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel-devel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
distro: config.Distro{Family: constant.RedHat},
|
||||
kernel: models.Kernel{Release: "2.6.32-695.20.3.el6.x86_64"},
|
||||
packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
@@ -115,9 +118,65 @@ kernel-devel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `openssl 0 1.0.1e 30.el6.11 x86_64
|
||||
Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64
|
||||
kernel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
distro: config.Distro{Family: constant.Amazon, Release: "2 (Karoo)"},
|
||||
kernel: models.Kernel{},
|
||||
packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
Name: "openssl",
|
||||
Version: "1.0.1e",
|
||||
Release: "30.el6.11",
|
||||
},
|
||||
"Percona-Server-shared-56": models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
Release: "rel67.0.el6",
|
||||
},
|
||||
"kernel": models.Package{
|
||||
Name: "kernel",
|
||||
Version: "2.6.32",
|
||||
Release: "696.20.3.el6",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `yum-utils 0 1.1.31 46.amzn2.0.1 noarch @amzn2-core
|
||||
zlib 0 1.2.7 19.amzn2.0.1 x86_64 installed
|
||||
java-1.8.0-amazon-corretto 1 1.8.0_192.b12 1.amzn2 x86_64 @amzn2extra-corretto8`,
|
||||
distro: config.Distro{Family: constant.Amazon, Release: "2 (Karoo)"},
|
||||
packages: models.Packages{
|
||||
"yum-utils": models.Package{
|
||||
Name: "yum-utils",
|
||||
Version: "1.1.31",
|
||||
Release: "46.amzn2.0.1",
|
||||
Arch: "noarch",
|
||||
Repository: "amzn2-core",
|
||||
},
|
||||
"zlib": models.Package{
|
||||
Name: "zlib",
|
||||
Version: "1.2.7",
|
||||
Release: "19.amzn2.0.1",
|
||||
Arch: "x86_64",
|
||||
Repository: "amzn2-core",
|
||||
},
|
||||
"java-1.8.0-amazon-corretto": models.Package{
|
||||
Name: "java-1.8.0-amazon-corretto",
|
||||
Version: "1:1.8.0_192.b12",
|
||||
Release: "1.amzn2",
|
||||
Arch: "x86_64",
|
||||
Repository: "amzn2extra-corretto8",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range packagetests {
|
||||
r.Distro = tt.distro
|
||||
r.Kernel = tt.kernel
|
||||
packages, _, err := r.parseInstalledPackages(tt.in)
|
||||
if err != nil {
|
||||
@@ -184,6 +243,72 @@ func TestParseInstalledPackagesLine(t *testing.T) {
|
||||
t.Errorf("release: expected %s, actual %s", tt.pack.Release, p.Release)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInstalledPackagesLineFromRepoquery(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
|
||||
var packagetests = []struct {
|
||||
in string
|
||||
pack models.Package
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
in: "yum-utils 0 1.1.31 46.amzn2.0.1 noarch @amzn2-core",
|
||||
pack: models.Package{
|
||||
Name: "yum-utils",
|
||||
Version: "1.1.31",
|
||||
Release: "46.amzn2.0.1",
|
||||
Arch: "noarch",
|
||||
Repository: "amzn2-core",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "zlib 0 1.2.7 19.amzn2.0.1 x86_64 installed",
|
||||
pack: models.Package{
|
||||
Name: "zlib",
|
||||
Version: "1.2.7",
|
||||
Release: "19.amzn2.0.1",
|
||||
Arch: "x86_64",
|
||||
Repository: "amzn2-core",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "java-1.8.0-amazon-corretto 1 1.8.0_192.b12 1.amzn2 x86_64 @amzn2extra-corretto8",
|
||||
pack: models.Package{
|
||||
Name: "java-1.8.0-amazon-corretto",
|
||||
Version: "1:1.8.0_192.b12",
|
||||
Release: "1.amzn2",
|
||||
Arch: "x86_64",
|
||||
Repository: "amzn2extra-corretto8",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range packagetests {
|
||||
p, err := r.parseInstalledPackagesLineFromRepoquery(tt.in)
|
||||
if err == nil && tt.err {
|
||||
t.Errorf("Expected err not occurred: %d", i)
|
||||
}
|
||||
if err != nil && !tt.err {
|
||||
t.Errorf("UnExpected err not occurred: %d", i)
|
||||
}
|
||||
if p.Name != tt.pack.Name {
|
||||
t.Errorf("name: expected %s, actual %s", tt.pack.Name, p.Name)
|
||||
}
|
||||
if p.Version != tt.pack.Version {
|
||||
t.Errorf("version: expected %s, actual %s", tt.pack.Version, p.Version)
|
||||
}
|
||||
if p.Release != tt.pack.Release {
|
||||
t.Errorf("release: expected %s, actual %s", tt.pack.Release, p.Release)
|
||||
}
|
||||
if p.Arch != tt.pack.Arch {
|
||||
t.Errorf("arch: expected %s, actual %s", tt.pack.Arch, p.Arch)
|
||||
}
|
||||
if p.Repository != tt.pack.Repository {
|
||||
t.Errorf("repository: expected %s, actual %s", tt.pack.Repository, p.Repository)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -478,7 +603,7 @@ func Test_redhatBase_parseRpmQfLine(t *testing.T) {
|
||||
{
|
||||
name: "valid line",
|
||||
fields: fields{base: base{}},
|
||||
args: args{line: "Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x86_64"},
|
||||
args: args{line: "Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x86_64"},
|
||||
wantPkg: &models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
|
||||
@@ -5,15 +5,19 @@ import (
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"os"
|
||||
ex "os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/cache"
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,10 +27,9 @@ const (
|
||||
)
|
||||
|
||||
var (
|
||||
errOSFamilyHeader = xerrors.New("X-Vuls-OS-Family header is required")
|
||||
errOSReleaseHeader = xerrors.New("X-Vuls-OS-Release header is required")
|
||||
errKernelVersionHeader = xerrors.New("X-Vuls-Kernel-Version header is required")
|
||||
errServerNameHeader = xerrors.New("X-Vuls-Server-Name header is required")
|
||||
errOSFamilyHeader = xerrors.New("X-Vuls-OS-Family header is required")
|
||||
errOSReleaseHeader = xerrors.New("X-Vuls-OS-Release header is required")
|
||||
errServerNameHeader = xerrors.New("X-Vuls-Server-Name header is required")
|
||||
)
|
||||
|
||||
var servers, errServers []osTypeInterface
|
||||
@@ -162,8 +165,15 @@ func ViaHTTP(header http.Header, body string, toLocalFile bool) (models.ScanResu
|
||||
}
|
||||
|
||||
kernelVersion := header.Get("X-Vuls-Kernel-Version")
|
||||
if family == constant.Debian && kernelVersion == "" {
|
||||
return models.ScanResult{}, errKernelVersionHeader
|
||||
if family == constant.Debian {
|
||||
if kernelVersion == "" {
|
||||
logging.Log.Warn("X-Vuls-Kernel-Version is empty. skip kernel vulnerability detection.")
|
||||
} else {
|
||||
if _, err := debver.NewVersion(kernelVersion); err != nil {
|
||||
logging.Log.Warnf("X-Vuls-Kernel-Version is invalid. skip kernel vulnerability detection. actual kernelVersion: %s, err: %s", kernelVersion, err)
|
||||
kernelVersion = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serverName := header.Get("X-Vuls-Server-Name")
|
||||
@@ -225,6 +235,10 @@ func ParseInstalledPkgs(distro config.Distro, kernel models.Kernel, pkgList stri
|
||||
osType = &oracle{redhatBase: redhatBase{base: base}}
|
||||
case constant.Amazon:
|
||||
osType = &amazon{redhatBase: redhatBase{base: base}}
|
||||
case constant.Fedora:
|
||||
osType = &fedora{redhatBase: redhatBase{base: base}}
|
||||
case constant.OpenSUSE, constant.OpenSUSELeap, constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
osType = &suse{redhatBase: redhatBase{base: base}}
|
||||
default:
|
||||
return models.Packages{}, models.SrcPackages{}, xerrors.Errorf("Server mode for %s is not implemented yet", base.Distro.Family)
|
||||
}
|
||||
@@ -276,6 +290,12 @@ func (s Scanner) detectServerOSes() (servers, errServers []osTypeInterface) {
|
||||
logging.Log.Debugf("Panic: %s on %s", p, srv.ServerName)
|
||||
}
|
||||
}()
|
||||
if err := validateSSHConfig(&srv); err != nil {
|
||||
checkOS := unknown{base{ServerInfo: srv}}
|
||||
checkOS.setErrs([]error{err})
|
||||
osTypeChan <- &checkOS
|
||||
return
|
||||
}
|
||||
osTypeChan <- s.detectOS(srv)
|
||||
}(target)
|
||||
}
|
||||
@@ -286,12 +306,10 @@ func (s Scanner) detectServerOSes() (servers, errServers []osTypeInterface) {
|
||||
case res := <-osTypeChan:
|
||||
if 0 < len(res.getErrs()) {
|
||||
errServers = append(errServers, res)
|
||||
logging.Log.Errorf("(%d/%d) Failed: %s, err: %+v",
|
||||
i+1, len(s.Targets), res.getServerInfo().ServerName, res.getErrs())
|
||||
logging.Log.Errorf("(%d/%d) Failed: %s, err: %+v", i+1, len(s.Targets), res.getServerInfo().ServerName, res.getErrs())
|
||||
} else {
|
||||
servers = append(servers, res)
|
||||
logging.Log.Infof("(%d/%d) Detected: %s: %s",
|
||||
i+1, len(s.Targets), res.getServerInfo().ServerName, res.getDistro())
|
||||
logging.Log.Infof("(%d/%d) Detected: %s: %s", i+1, len(s.Targets), res.getServerInfo().ServerName, res.getDistro())
|
||||
}
|
||||
case <-timeout:
|
||||
msg := "Timed out while detecting servers"
|
||||
@@ -317,6 +335,214 @@ func (s Scanner) detectServerOSes() (servers, errServers []osTypeInterface) {
|
||||
return
|
||||
}
|
||||
|
||||
func validateSSHConfig(c *config.ServerInfo) error {
|
||||
if isLocalExec(c.Port, c.Host) || c.Type == constant.ServerTypePseudo {
|
||||
return nil
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Validating SSH Settings for Server:%s ...", c.GetServerName())
|
||||
|
||||
sshBinaryPath, err := ex.LookPath("ssh")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh binary path. err: %w", err)
|
||||
}
|
||||
|
||||
sshConfigCmd := buildSSHConfigCmd(sshBinaryPath, c)
|
||||
logging.Log.Debugf("Executing... %s", strings.Replace(sshConfigCmd, "\n", "", -1))
|
||||
configResult := localExec(*c, sshConfigCmd, noSudo)
|
||||
if !configResult.isSuccess() {
|
||||
return xerrors.Errorf("Failed to print SSH configuration. err: %w", configResult.Error)
|
||||
}
|
||||
sshConfig := parseSSHConfiguration(configResult.Stdout)
|
||||
c.User = sshConfig.user
|
||||
logging.Log.Debugf("Setting SSH User:%s for Server:%s ...", sshConfig.user, c.GetServerName())
|
||||
c.Port = sshConfig.port
|
||||
logging.Log.Debugf("Setting SSH Port:%s for Server:%s ...", sshConfig.port, c.GetServerName())
|
||||
if c.User == "" || c.Port == "" {
|
||||
return xerrors.New("Failed to find User or Port setting. Please check the User or Port settings for SSH")
|
||||
}
|
||||
|
||||
if sshConfig.strictHostKeyChecking == "false" {
|
||||
return nil
|
||||
}
|
||||
if sshConfig.proxyCommand != "" || sshConfig.proxyJump != "" {
|
||||
logging.Log.Debug("known_host check under Proxy is not yet implemented")
|
||||
return nil
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Checking if the host's public key is in known_hosts...")
|
||||
knownHostsPaths := []string{}
|
||||
for _, knownHost := range append(sshConfig.userKnownHosts, sshConfig.globalKnownHosts...) {
|
||||
if knownHost != "" && knownHost != "/dev/null" {
|
||||
knownHostsPaths = append(knownHostsPaths, knownHost)
|
||||
}
|
||||
}
|
||||
if len(knownHostsPaths) == 0 {
|
||||
return xerrors.New("Failed to find any known_hosts to use. Please check the UserKnownHostsFile and GlobalKnownHostsFile settings for SSH")
|
||||
}
|
||||
|
||||
sshKeyscanBinaryPath, err := ex.LookPath("ssh-keyscan")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh-keyscan binary path. err: %w", err)
|
||||
}
|
||||
sshScanCmd := strings.Join([]string{sshKeyscanBinaryPath, "-p", c.Port, sshConfig.hostname}, " ")
|
||||
r := localExec(*c, sshScanCmd, noSudo)
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to ssh-keyscan. cmd: %s, err: %w", sshScanCmd, r.Error)
|
||||
}
|
||||
serverKeys := parseSSHScan(r.Stdout)
|
||||
|
||||
sshKeygenBinaryPath, err := ex.LookPath("ssh-keygen")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh-keygen binary path. err: %w", err)
|
||||
}
|
||||
for _, knownHosts := range knownHostsPaths {
|
||||
var hostname string
|
||||
if sshConfig.hostKeyAlias != "" {
|
||||
hostname = sshConfig.hostKeyAlias
|
||||
} else {
|
||||
if c.Port != "" && c.Port != "22" {
|
||||
hostname = fmt.Sprintf("\"[%s]:%s\"", sshConfig.hostname, c.Port)
|
||||
} else {
|
||||
hostname = sshConfig.hostname
|
||||
}
|
||||
}
|
||||
cmd := fmt.Sprintf("%s -F %s -f %s", sshKeygenBinaryPath, hostname, knownHosts)
|
||||
logging.Log.Debugf("Executing... %s", strings.Replace(cmd, "\n", "", -1))
|
||||
if r := localExec(*c, cmd, noSudo); r.isSuccess() {
|
||||
keyType, clientKey, err := parseSSHKeygen(r.Stdout)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse ssh-keygen result. stdout: %s, err: %w", r.Stdout, r.Error)
|
||||
}
|
||||
if serverKey, ok := serverKeys[keyType]; ok && serverKey == clientKey {
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("Failed to find the server key that matches the key registered in the client. The server key may have been changed. Please exec `$ %s` and `$ %s` or `$ %s`",
|
||||
fmt.Sprintf("%s -R %s -f %s", sshKeygenBinaryPath, hostname, knownHosts),
|
||||
strings.Join(buildSSHBaseCmd(sshBinaryPath, c, nil), " "),
|
||||
buildSSHKeyScanCmd(sshKeyscanBinaryPath, c.Port, knownHostsPaths[0], sshConfig))
|
||||
}
|
||||
}
|
||||
return xerrors.Errorf("Failed to find the host in known_hosts. Please exec `$ %s` or `$ %s`",
|
||||
strings.Join(buildSSHBaseCmd(sshBinaryPath, c, nil), " "),
|
||||
buildSSHKeyScanCmd(sshKeyscanBinaryPath, c.Port, knownHostsPaths[0], sshConfig))
|
||||
}
|
||||
|
||||
func buildSSHBaseCmd(sshBinaryPath string, c *config.ServerInfo, options []string) []string {
|
||||
cmd := []string{sshBinaryPath}
|
||||
if len(options) > 0 {
|
||||
cmd = append(cmd, options...)
|
||||
}
|
||||
if c.SSHConfigPath != "" {
|
||||
cmd = append(cmd, "-F", c.SSHConfigPath)
|
||||
}
|
||||
if c.KeyPath != "" {
|
||||
cmd = append(cmd, "-i", c.KeyPath)
|
||||
}
|
||||
if c.Port != "" {
|
||||
cmd = append(cmd, "-p", c.Port)
|
||||
}
|
||||
if c.User != "" {
|
||||
cmd = append(cmd, "-l", c.User)
|
||||
}
|
||||
if len(c.JumpServer) > 0 {
|
||||
cmd = append(cmd, "-J", strings.Join(c.JumpServer, ","))
|
||||
}
|
||||
cmd = append(cmd, c.Host)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func buildSSHConfigCmd(sshBinaryPath string, c *config.ServerInfo) string {
|
||||
return strings.Join(buildSSHBaseCmd(sshBinaryPath, c, []string{"-G"}), " ")
|
||||
}
|
||||
|
||||
func buildSSHKeyScanCmd(sshKeyscanBinaryPath, port, knownHosts string, sshConfig sshConfiguration) string {
|
||||
cmd := []string{sshKeyscanBinaryPath}
|
||||
if sshConfig.hashKnownHosts == "yes" {
|
||||
cmd = append(cmd, "-H")
|
||||
}
|
||||
if port != "" {
|
||||
cmd = append(cmd, "-p", port)
|
||||
}
|
||||
return strings.Join(append(cmd, sshConfig.hostname, ">>", knownHosts), " ")
|
||||
}
|
||||
|
||||
type sshConfiguration struct {
|
||||
hostname string
|
||||
hostKeyAlias string
|
||||
hashKnownHosts string
|
||||
user string
|
||||
port string
|
||||
strictHostKeyChecking string
|
||||
globalKnownHosts []string
|
||||
userKnownHosts []string
|
||||
proxyCommand string
|
||||
proxyJump string
|
||||
}
|
||||
|
||||
func parseSSHConfiguration(stdout string) sshConfiguration {
|
||||
sshConfig := sshConfiguration{}
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
switch {
|
||||
case strings.HasPrefix(line, "user "):
|
||||
sshConfig.user = strings.TrimPrefix(line, "user ")
|
||||
case strings.HasPrefix(line, "hostname "):
|
||||
sshConfig.hostname = strings.TrimPrefix(line, "hostname ")
|
||||
case strings.HasPrefix(line, "hostkeyalias "):
|
||||
sshConfig.hostKeyAlias = strings.TrimPrefix(line, "hostkeyalias ")
|
||||
case strings.HasPrefix(line, "hashknownhosts "):
|
||||
sshConfig.hashKnownHosts = strings.TrimPrefix(line, "hashknownhosts ")
|
||||
case strings.HasPrefix(line, "port "):
|
||||
sshConfig.port = strings.TrimPrefix(line, "port ")
|
||||
case strings.HasPrefix(line, "stricthostkeychecking "):
|
||||
sshConfig.strictHostKeyChecking = strings.TrimPrefix(line, "stricthostkeychecking ")
|
||||
case strings.HasPrefix(line, "globalknownhostsfile "):
|
||||
sshConfig.globalKnownHosts = strings.Split(strings.TrimPrefix(line, "globalknownhostsfile "), " ")
|
||||
case strings.HasPrefix(line, "userknownhostsfile "):
|
||||
sshConfig.userKnownHosts = strings.Split(strings.TrimPrefix(line, "userknownhostsfile "), " ")
|
||||
case strings.HasPrefix(line, "proxycommand "):
|
||||
sshConfig.proxyCommand = strings.TrimPrefix(line, "proxycommand ")
|
||||
case strings.HasPrefix(line, "proxyjump "):
|
||||
sshConfig.proxyJump = strings.TrimPrefix(line, "proxyjump ")
|
||||
}
|
||||
}
|
||||
return sshConfig
|
||||
}
|
||||
|
||||
func parseSSHScan(stdout string) map[string]string {
|
||||
keys := map[string]string{}
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
if line == "" || strings.HasPrefix(line, "# ") {
|
||||
continue
|
||||
}
|
||||
if ss := strings.Split(line, " "); len(ss) == 3 {
|
||||
keys[ss[1]] = ss[2]
|
||||
}
|
||||
}
|
||||
return keys
|
||||
}
|
||||
|
||||
func parseSSHKeygen(stdout string) (string, string, error) {
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
if line == "" || strings.HasPrefix(line, "# ") {
|
||||
continue
|
||||
}
|
||||
|
||||
// HashKnownHosts yes
|
||||
if strings.HasPrefix(line, "|1|") {
|
||||
ss := strings.Split(line, "|")
|
||||
if ss := strings.Split(ss[len(ss)-1], " "); len(ss) == 3 {
|
||||
return ss[1], ss[2], nil
|
||||
}
|
||||
} else {
|
||||
if ss := strings.Split(line, " "); len(ss) == 3 {
|
||||
return ss[1], ss[2], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", "", xerrors.New("Failed to parse ssh-keygen result. err: public key not found")
|
||||
}
|
||||
|
||||
func (s Scanner) detectContainerOSes(hosts []osTypeInterface) (actives, inactives []osTypeInterface) {
|
||||
logging.Log.Info("Detecting OS of containers... ")
|
||||
osTypesChan := make(chan []osTypeInterface, len(hosts))
|
||||
@@ -438,49 +664,42 @@ func (s Scanner) detectContainerOSesOnServer(containerHost osTypeInterface) (ose
|
||||
return oses
|
||||
}
|
||||
|
||||
func (s Scanner) detectOS(c config.ServerInfo) (osType osTypeInterface) {
|
||||
var itsMe bool
|
||||
var fatalErr error
|
||||
|
||||
if itsMe, osType, _ = detectPseudo(c); itsMe {
|
||||
return
|
||||
func (s Scanner) detectOS(c config.ServerInfo) osTypeInterface {
|
||||
if itsMe, osType, _ := detectPseudo(c); itsMe {
|
||||
return osType
|
||||
}
|
||||
|
||||
itsMe, osType, fatalErr = s.detectDebianWithRetry(c)
|
||||
if fatalErr != nil {
|
||||
osType.setErrs([]error{
|
||||
xerrors.Errorf("Failed to detect OS: %w", fatalErr)})
|
||||
return
|
||||
if itsMe, osType, fatalErr := s.detectDebianWithRetry(c); fatalErr != nil {
|
||||
osType.setErrs([]error{xerrors.Errorf("Failed to detect OS: %w", fatalErr)})
|
||||
return osType
|
||||
} else if itsMe {
|
||||
logging.Log.Debugf("Debian based Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return osType
|
||||
}
|
||||
|
||||
if itsMe {
|
||||
logging.Log.Debugf("Debian like Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return
|
||||
if itsMe, osType := detectRedhat(c); itsMe {
|
||||
logging.Log.Debugf("Redhat based Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return osType
|
||||
}
|
||||
|
||||
if itsMe, osType = detectRedhat(c); itsMe {
|
||||
logging.Log.Debugf("Redhat like Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return
|
||||
}
|
||||
|
||||
if itsMe, osType = detectSUSE(c); itsMe {
|
||||
if itsMe, osType := detectSUSE(c); itsMe {
|
||||
logging.Log.Debugf("SUSE Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return
|
||||
return osType
|
||||
}
|
||||
|
||||
if itsMe, osType = detectFreebsd(c); itsMe {
|
||||
if itsMe, osType := detectFreebsd(c); itsMe {
|
||||
logging.Log.Debugf("FreeBSD. Host: %s:%s", c.Host, c.Port)
|
||||
return
|
||||
return osType
|
||||
}
|
||||
|
||||
if itsMe, osType = detectAlpine(c); itsMe {
|
||||
if itsMe, osType := detectAlpine(c); itsMe {
|
||||
logging.Log.Debugf("Alpine. Host: %s:%s", c.Host, c.Port)
|
||||
return
|
||||
return osType
|
||||
}
|
||||
|
||||
//TODO darwin https://github.com/mizzy/specinfra/blob/master/lib/specinfra/helper/detect_os/darwin.rb
|
||||
osType := &unknown{base{ServerInfo: c}}
|
||||
osType.setErrs([]error{xerrors.New("Unknown OS Type")})
|
||||
return
|
||||
return osType
|
||||
}
|
||||
|
||||
// Retry as it may stall on the first SSH connection
|
||||
341
scanner/scanner_test.go
Normal file
341
scanner/scanner_test.go
Normal file
@@ -0,0 +1,341 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
func TestViaHTTP(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
r.Distro = config.Distro{Family: constant.RedHat}
|
||||
|
||||
var tests = []struct {
|
||||
header map[string]string
|
||||
body string
|
||||
packages models.Packages
|
||||
expectedResult models.ScanResult
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Release": "6.9",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
wantErr: errOSFamilyHeader,
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "redhat",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
wantErr: errOSReleaseHeader,
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "centos",
|
||||
"X-Vuls-OS-Release": "6.9",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
body: `openssl 0 1.0.1e 30.el6.11 x86_64
|
||||
Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64
|
||||
kernel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "centos",
|
||||
Release: "6.9",
|
||||
RunningKernel: models.Kernel{
|
||||
Release: "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
Packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
Name: "openssl",
|
||||
Version: "1.0.1e",
|
||||
Release: "30.el6.11",
|
||||
},
|
||||
"Percona-Server-shared-56": models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
Release: "rel67.0.el6",
|
||||
},
|
||||
"kernel": models.Package{
|
||||
Name: "kernel",
|
||||
Version: "2.6.32",
|
||||
Release: "695.20.3.el6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "debian",
|
||||
"X-Vuls-OS-Release": "8.10",
|
||||
"X-Vuls-Kernel-Release": "3.16.0-4-amd64",
|
||||
"X-Vuls-Kernel-Version": "3.16.51-2",
|
||||
},
|
||||
body: "",
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "debian",
|
||||
Release: "8.10",
|
||||
RunningKernel: models.Kernel{
|
||||
Release: "3.16.0-4-amd64",
|
||||
Version: "3.16.51-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "debian",
|
||||
"X-Vuls-OS-Release": "8.10",
|
||||
"X-Vuls-Kernel-Release": "3.16.0-4-amd64",
|
||||
},
|
||||
body: "",
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "debian",
|
||||
Release: "8.10",
|
||||
RunningKernel: models.Kernel{
|
||||
Release: "3.16.0-4-amd64",
|
||||
Version: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
header := http.Header{}
|
||||
for k, v := range tt.header {
|
||||
header.Set(k, v)
|
||||
}
|
||||
|
||||
result, err := ViaHTTP(header, tt.body, false)
|
||||
if err != tt.wantErr {
|
||||
t.Errorf("error: expected %s, actual: %s", tt.wantErr, err)
|
||||
}
|
||||
|
||||
if result.Family != tt.expectedResult.Family {
|
||||
t.Errorf("os family: expected %s, actual %s", tt.expectedResult.Family, result.Family)
|
||||
}
|
||||
if result.Release != tt.expectedResult.Release {
|
||||
t.Errorf("os release: expected %s, actual %s", tt.expectedResult.Release, result.Release)
|
||||
}
|
||||
if result.RunningKernel.Release != tt.expectedResult.RunningKernel.Release {
|
||||
t.Errorf("kernel release: expected %s, actual %s",
|
||||
tt.expectedResult.RunningKernel.Release, result.RunningKernel.Release)
|
||||
}
|
||||
if result.RunningKernel.Version != tt.expectedResult.RunningKernel.Version {
|
||||
t.Errorf("kernel version: expected %s, actual %s",
|
||||
tt.expectedResult.RunningKernel.Version, result.RunningKernel.Version)
|
||||
}
|
||||
|
||||
for name, expectedPack := range tt.expectedResult.Packages {
|
||||
pack := result.Packages[name]
|
||||
if pack.Name != expectedPack.Name {
|
||||
t.Errorf("name: expected %s, actual %s", expectedPack.Name, pack.Name)
|
||||
}
|
||||
if pack.Version != expectedPack.Version {
|
||||
t.Errorf("version: expected %s, actual %s", expectedPack.Version, pack.Version)
|
||||
}
|
||||
if pack.Release != expectedPack.Release {
|
||||
t.Errorf("release: expected %s, actual %s", expectedPack.Release, pack.Release)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSSHConfiguration(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
expected sshConfiguration
|
||||
}{
|
||||
{
|
||||
in: `user root
|
||||
hostname 127.0.0.1
|
||||
port 2222
|
||||
addkeystoagent false
|
||||
addressfamily any
|
||||
batchmode no
|
||||
canonicalizefallbacklocal yes
|
||||
canonicalizehostname false
|
||||
challengeresponseauthentication yes
|
||||
checkhostip no
|
||||
compression no
|
||||
controlmaster false
|
||||
enablesshkeysign no
|
||||
clearallforwardings no
|
||||
exitonforwardfailure no
|
||||
fingerprinthash SHA256
|
||||
forwardx11 no
|
||||
forwardx11trusted yes
|
||||
gatewayports no
|
||||
gssapiauthentication yes
|
||||
gssapikeyexchange no
|
||||
gssapidelegatecredentials no
|
||||
gssapitrustdns no
|
||||
gssapirenewalforcesrekey no
|
||||
gssapikexalgorithms gss-gex-sha1-,gss-group14-sha1-
|
||||
hashknownhosts no
|
||||
hostbasedauthentication no
|
||||
identitiesonly yes
|
||||
kbdinteractiveauthentication yes
|
||||
nohostauthenticationforlocalhost no
|
||||
passwordauthentication yes
|
||||
permitlocalcommand no
|
||||
proxyusefdpass no
|
||||
pubkeyauthentication yes
|
||||
requesttty auto
|
||||
streamlocalbindunlink no
|
||||
stricthostkeychecking ask
|
||||
tcpkeepalive yes
|
||||
tunnel false
|
||||
verifyhostkeydns false
|
||||
visualhostkey no
|
||||
updatehostkeys false
|
||||
canonicalizemaxdots 1
|
||||
connectionattempts 1
|
||||
forwardx11timeout 1200
|
||||
numberofpasswordprompts 3
|
||||
serveralivecountmax 3
|
||||
serveraliveinterval 0
|
||||
ciphers chacha20-poly1305@openssh.com,aes128-ctr,aes192-ctr,aes256-ctr,aes128-gcm@openssh.com,aes256-gcm@openssh.com
|
||||
hostkeyalgorithms ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,sk-ecdsa-sha2-nistp256-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,sk-ecdsa-sha2-nistp256@openssh.com,ssh-ed25519,sk-ssh-ed25519@openssh.com,rsa-sha2-512,rsa-sha2-256,ssh-rsa
|
||||
hostkeyalias vuls
|
||||
hostbasedkeytypes ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,sk-ecdsa-sha2-nistp256-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,sk-ecdsa-sha2-nistp256@openssh.com,ssh-ed25519,sk-ssh-ed25519@openssh.com,rsa-sha2-512,rsa-sha2-256,ssh-rsa
|
||||
kexalgorithms curve25519-sha256,curve25519-sha256@libssh.org,ecdh-sha2-nistp256,ecdh-sha2-nistp384,ecdh-sha2-nistp521,diffie-hellman-group-exchange-sha256,diffie-hellman-group16-sha512,diffie-hellman-group18-sha512,diffie-hellman-group14-sha256,diffie-hellman-group1-sha1
|
||||
casignaturealgorithms ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,sk-ecdsa-sha2-nistp256@openssh.com,ssh-ed25519,sk-ssh-ed25519@openssh.com,rsa-sha2-512,rsa-sha2-256
|
||||
loglevel INFO
|
||||
macs umac-64-etm@openssh.com,umac-128-etm@openssh.com,hmac-sha2-256-etm@openssh.com,hmac-sha2-512-etm@openssh.com,hmac-sha1-etm@openssh.com,umac-64@openssh.com,umac-128@openssh.com,hmac-sha2-256,hmac-sha2-512,hmac-sha1
|
||||
securitykeyprovider internal
|
||||
pubkeyacceptedkeytypes ecdsa-sha2-nistp256-cert-v01@openssh.com,ecdsa-sha2-nistp384-cert-v01@openssh.com,ecdsa-sha2-nistp521-cert-v01@openssh.com,sk-ecdsa-sha2-nistp256-cert-v01@openssh.com,ssh-ed25519-cert-v01@openssh.com,sk-ssh-ed25519-cert-v01@openssh.com,rsa-sha2-512-cert-v01@openssh.com,rsa-sha2-256-cert-v01@openssh.com,ssh-rsa-cert-v01@openssh.com,ecdsa-sha2-nistp256,ecdsa-sha2-nistp384,ecdsa-sha2-nistp521,sk-ecdsa-sha2-nistp256@openssh.com,ssh-ed25519,sk-ssh-ed25519@openssh.com,rsa-sha2-512,rsa-sha2-256,ssh-rsa
|
||||
xauthlocation /usr/bin/xauth
|
||||
identityfile ~/github/github.com/MaineK00n/vuls-targets-docker/.ssh/id_rsa
|
||||
canonicaldomains
|
||||
globalknownhostsfile /etc/ssh/ssh_known_hosts /etc/ssh/ssh_known_hosts2
|
||||
userknownhostsfile ~/.ssh/known_hosts ~/.ssh/known_hosts2
|
||||
sendenv LANG
|
||||
sendenv LC_*
|
||||
forwardagent no
|
||||
connecttimeout none
|
||||
tunneldevice any:any
|
||||
controlpersist no
|
||||
escapechar ~
|
||||
ipqos lowdelay throughput
|
||||
rekeylimit 0 0
|
||||
streamlocalbindmask 0177
|
||||
syslogfacility USER
|
||||
`,
|
||||
expected: sshConfiguration{
|
||||
hostname: "127.0.0.1",
|
||||
hostKeyAlias: "vuls",
|
||||
hashKnownHosts: "no",
|
||||
user: "root",
|
||||
port: "2222",
|
||||
strictHostKeyChecking: "ask",
|
||||
globalKnownHosts: []string{"/etc/ssh/ssh_known_hosts", "/etc/ssh/ssh_known_hosts2"},
|
||||
userKnownHosts: []string{"~/.ssh/known_hosts", "~/.ssh/known_hosts2"},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `proxycommand ssh -W %h:%p step`,
|
||||
expected: sshConfiguration{
|
||||
proxyCommand: "ssh -W %h:%p step",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `proxyjump step`,
|
||||
expected: sshConfiguration{
|
||||
proxyJump: "step",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := parseSSHConfiguration(tt.in); !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("expected %v, actual %v", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSSHScan(t *testing.T) {
|
||||
tests := []struct {
|
||||
in string
|
||||
expected map[string]string
|
||||
}{
|
||||
{
|
||||
in: `# 127.0.0.1:2222 SSH-2.0-OpenSSH_8.8p1 Ubuntu-1
|
||||
# 127.0.0.1:2222 SSH-2.0-OpenSSH_8.8p1 Ubuntu-1
|
||||
[127.0.0.1]:2222 ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIGuUutp6L4whnv5YzyjFuQM8TQF2G01M+OGolSfRnPgD
|
||||
# 127.0.0.1:2222 SSH-2.0-OpenSSH_8.8p1 Ubuntu-1
|
||||
# 127.0.0.1:2222 SSH-2.0-OpenSSH_8.8p1 Ubuntu-1
|
||||
[127.0.0.1]:2222 ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQDDXRr3jhZtTJuqLAhRvxGP4iozmzkWDPXTqbB+xCH79ak4RDll6Z+jCzMfggLEK3U7j4gK/rzs1cjUdLOGRSgf9B78MOGtyAJd86rNUJwhCHxrKeoIe5RiS7CrsugCp4ZTBWiPyB0ORSqYI1o6tfOFVLqV/Zv7WmRs1gwzSn4wcnkhxtEfgeFjy1dV59Z9k0HMlonxsn4g0OcGMqa4IyQh0r/YZ9V1EGMKkHm6YbND9JCFtTv6J0mzFCK2BhMMNPqVF8GUFQqUUAQMlpGSuurxqCbAzbNuTKRfZqwdq/OnNpHJbzzrbTpeUTQX2VxN7z/VmpQfGxxhet+/hFWOjSqUMpALV02UNeFIYm9+Yrvm4c8xsr2SVitsJotA+xtrI4NSDzOjXFe0c4KoQItuq1E6zmhFVtq3NtzdySPPE+269Uy1palVQuJnyqIw7ZUq7Lz+veaLSAlBMNO4LbLLOYIQ7qCRzNA2ZvBpRABs9STpgkuyMrCee7hdsskb5hX6se8=
|
||||
# 127.0.0.1:2222 SSH-2.0-OpenSSH_8.8p1 Ubuntu-1
|
||||
[127.0.0.1]:2222 ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCvonZPuWvVd+qqVaIkC7IMP1GWITccQKCZWZCgbsES5/tzFlhJtcaaeVjnjBCbwAgRyhxyNj2FtyXKtKlaWEeQ=
|
||||
|
||||
`,
|
||||
expected: map[string]string{
|
||||
"ssh-ed25519": "AAAAC3NzaC1lZDI1NTE5AAAAIGuUutp6L4whnv5YzyjFuQM8TQF2G01M+OGolSfRnPgD",
|
||||
"ssh-rsa": "AAAAB3NzaC1yc2EAAAADAQABAAABgQDDXRr3jhZtTJuqLAhRvxGP4iozmzkWDPXTqbB+xCH79ak4RDll6Z+jCzMfggLEK3U7j4gK/rzs1cjUdLOGRSgf9B78MOGtyAJd86rNUJwhCHxrKeoIe5RiS7CrsugCp4ZTBWiPyB0ORSqYI1o6tfOFVLqV/Zv7WmRs1gwzSn4wcnkhxtEfgeFjy1dV59Z9k0HMlonxsn4g0OcGMqa4IyQh0r/YZ9V1EGMKkHm6YbND9JCFtTv6J0mzFCK2BhMMNPqVF8GUFQqUUAQMlpGSuurxqCbAzbNuTKRfZqwdq/OnNpHJbzzrbTpeUTQX2VxN7z/VmpQfGxxhet+/hFWOjSqUMpALV02UNeFIYm9+Yrvm4c8xsr2SVitsJotA+xtrI4NSDzOjXFe0c4KoQItuq1E6zmhFVtq3NtzdySPPE+269Uy1palVQuJnyqIw7ZUq7Lz+veaLSAlBMNO4LbLLOYIQ7qCRzNA2ZvBpRABs9STpgkuyMrCee7hdsskb5hX6se8=",
|
||||
"ecdsa-sha2-nistp256": "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCvonZPuWvVd+qqVaIkC7IMP1GWITccQKCZWZCgbsES5/tzFlhJtcaaeVjnjBCbwAgRyhxyNj2FtyXKtKlaWEeQ=",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
if got := parseSSHScan(tt.in); !reflect.DeepEqual(got, tt.expected) {
|
||||
t.Errorf("expected %v, actual %v", tt.expected, got)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseSSHKeygen(t *testing.T) {
|
||||
type expected struct {
|
||||
keyType string
|
||||
key string
|
||||
wantErr bool
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
in string
|
||||
expected expected
|
||||
}{
|
||||
{
|
||||
in: `# Host [127.0.0.1]:2222 found: line 6
|
||||
|1|hR8ZOXDcB9Q+b2vCvgOjqp4EkSw=|NiNE9zsi2y3WfjA4LxVX0ls37P4= ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCvonZPuWvVd+qqVaIkC7IMP1GWITccQKCZWZCgbsES5/tzFlhJtcaaeVjnjBCbwAgRyhxyNj2FtyXKtKlaWEeQ=
|
||||
|
||||
`,
|
||||
expected: expected{
|
||||
keyType: "ecdsa-sha2-nistp256",
|
||||
key: "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBCvonZPuWvVd+qqVaIkC7IMP1GWITccQKCZWZCgbsES5/tzFlhJtcaaeVjnjBCbwAgRyhxyNj2FtyXKtKlaWEeQ=",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `# Host vuls found: line 6
|
||||
vuls ecdsa-sha2-nistp256 AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=
|
||||
|
||||
`,
|
||||
expected: expected{
|
||||
keyType: "ecdsa-sha2-nistp256",
|
||||
key: "AAAAE2VjZHNhLXNoYTItbmlzdHAyNTYAAAAIbmlzdHAyNTYAAABBBEmKSENjQEezOmxkZMy7opKgwFB9nkt5YRrYMjNuG5N87uRgg6CLrbo5wAdT/y6v0mKV0U2w0WZ2YB/++Tpockg=",
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "invalid",
|
||||
expected: expected{wantErr: true},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
keyType, key, err := parseSSHKeygen(tt.in)
|
||||
if !tt.expected.wantErr && err != nil {
|
||||
t.Errorf("parseSSHKeygen error: %s", err)
|
||||
continue
|
||||
}
|
||||
if keyType != tt.expected.keyType {
|
||||
t.Errorf("expected keyType %s, actual %s", tt.expected.keyType, keyType)
|
||||
}
|
||||
if key != tt.expected.key {
|
||||
t.Errorf("expected key %s, actual %s", tt.expected.key, key)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,139 +0,0 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
func TestViaHTTP(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
r.Distro = config.Distro{Family: constant.RedHat}
|
||||
|
||||
var tests = []struct {
|
||||
header map[string]string
|
||||
body string
|
||||
packages models.Packages
|
||||
expectedResult models.ScanResult
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Release": "6.9",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
wantErr: errOSFamilyHeader,
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "redhat",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
wantErr: errOSReleaseHeader,
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "debian",
|
||||
"X-Vuls-OS-Release": "8",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
wantErr: errKernelVersionHeader,
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "centos",
|
||||
"X-Vuls-OS-Release": "6.9",
|
||||
"X-Vuls-Kernel-Release": "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
body: `openssl 0 1.0.1e 30.el6.11 x86_64
|
||||
Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64
|
||||
kernel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "centos",
|
||||
Release: "6.9",
|
||||
RunningKernel: models.Kernel{
|
||||
Release: "2.6.32-695.20.3.el6.x86_64",
|
||||
},
|
||||
Packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
Name: "openssl",
|
||||
Version: "1.0.1e",
|
||||
Release: "30.el6.11",
|
||||
},
|
||||
"Percona-Server-shared-56": models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
Release: "rel67.0.el6",
|
||||
},
|
||||
"kernel": models.Package{
|
||||
Name: "kernel",
|
||||
Version: "2.6.32",
|
||||
Release: "695.20.3.el6",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "debian",
|
||||
"X-Vuls-OS-Release": "8.10",
|
||||
"X-Vuls-Kernel-Release": "3.16.0-4-amd64",
|
||||
"X-Vuls-Kernel-Version": "3.16.51-2",
|
||||
},
|
||||
body: "",
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "debian",
|
||||
Release: "8.10",
|
||||
RunningKernel: models.Kernel{
|
||||
Release: "3.16.0-4-amd64",
|
||||
Version: "3.16.51-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
header := http.Header{}
|
||||
for k, v := range tt.header {
|
||||
header.Set(k, v)
|
||||
}
|
||||
|
||||
result, err := ViaHTTP(header, tt.body, false)
|
||||
if err != tt.wantErr {
|
||||
t.Errorf("error: expected %s, actual: %s", tt.wantErr, err)
|
||||
}
|
||||
|
||||
if result.Family != tt.expectedResult.Family {
|
||||
t.Errorf("os family: expected %s, actual %s", tt.expectedResult.Family, result.Family)
|
||||
}
|
||||
if result.Release != tt.expectedResult.Release {
|
||||
t.Errorf("os release: expected %s, actual %s", tt.expectedResult.Release, result.Release)
|
||||
}
|
||||
if result.RunningKernel.Release != tt.expectedResult.RunningKernel.Release {
|
||||
t.Errorf("kernel release: expected %s, actual %s",
|
||||
tt.expectedResult.RunningKernel.Release, result.RunningKernel.Release)
|
||||
}
|
||||
if result.RunningKernel.Version != tt.expectedResult.RunningKernel.Version {
|
||||
t.Errorf("kernel version: expected %s, actual %s",
|
||||
tt.expectedResult.RunningKernel.Version, result.RunningKernel.Version)
|
||||
}
|
||||
|
||||
for name, expectedPack := range tt.expectedResult.Packages {
|
||||
pack := result.Packages[name]
|
||||
if pack.Name != expectedPack.Name {
|
||||
t.Errorf("name: expected %s, actual %s", expectedPack.Name, pack.Name)
|
||||
}
|
||||
if pack.Version != expectedPack.Version {
|
||||
t.Errorf("version: expected %s, actual %s", expectedPack.Version, pack.Version)
|
||||
}
|
||||
if pack.Release != expectedPack.Release {
|
||||
t.Errorf("release: expected %s, actual %s", expectedPack.Release, pack.Release)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
201
scanner/suse.go
201
scanner/suse.go
@@ -19,7 +19,7 @@ type suse struct {
|
||||
redhatBase
|
||||
}
|
||||
|
||||
// NewRedhat is constructor
|
||||
// newSUSE is constructor
|
||||
func newSUSE(c config.ServerInfo) *suse {
|
||||
r := &suse{
|
||||
redhatBase: redhatBase{
|
||||
@@ -43,6 +43,10 @@ func detectSUSE(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
if r := exec(c, "cat /etc/os-release", noSudo); r.isSuccess() {
|
||||
s := newSUSE(c)
|
||||
name, ver := s.parseOSRelease(r.Stdout)
|
||||
if name == "" || ver == "" {
|
||||
s.setErrs([]error{xerrors.Errorf("Failed to parse /etc/os-release: %s", r.Stdout)})
|
||||
return true, s
|
||||
}
|
||||
s.setDistro(name, ver)
|
||||
return true, s
|
||||
}
|
||||
@@ -50,11 +54,10 @@ func detectSUSE(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
} else if r := exec(c, "ls /etc/SuSE-release", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "zypper -V", noSudo); r.isSuccess() {
|
||||
if r := exec(c, "cat /etc/SuSE-release", noSudo); r.isSuccess() {
|
||||
s := newSUSE(c)
|
||||
re := regexp.MustCompile(`openSUSE (\d+\.\d+|\d+)`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) == 2 {
|
||||
//TODO check opensuse or opensuse.leap
|
||||
s := newSUSE(c)
|
||||
s.setDistro(constant.OpenSUSE, result[1])
|
||||
return true, s
|
||||
}
|
||||
@@ -66,14 +69,12 @@ func detectSUSE(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
re = regexp.MustCompile(`PATCHLEVEL = (\d+)`)
|
||||
result = re.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) == 2 {
|
||||
s := newSUSE(c)
|
||||
s.setDistro(constant.SUSEEnterpriseServer,
|
||||
fmt.Sprintf("%s.%s", version, result[1]))
|
||||
s.setDistro(constant.SUSEEnterpriseServer, fmt.Sprintf("%s.%s", version, result[1]))
|
||||
return true, s
|
||||
}
|
||||
}
|
||||
logging.Log.Warnf("Failed to parse SUSE Linux version: %s", r)
|
||||
return true, newSUSE(c)
|
||||
s.setErrs([]error{xerrors.Errorf("Failed to parse /etc/SuSE-release: %s", r.Stdout)})
|
||||
return true, s
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -82,23 +83,24 @@ func detectSUSE(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
}
|
||||
|
||||
func (o *suse) parseOSRelease(content string) (name string, ver string) {
|
||||
if strings.Contains(content, "ID=opensuse") {
|
||||
//TODO check opensuse or opensuse.leap
|
||||
if strings.Contains(content, `CPE_NAME="cpe:/o:opensuse:opensuse`) {
|
||||
name = constant.OpenSUSE
|
||||
} else if strings.Contains(content, `NAME="SLES"`) {
|
||||
name = constant.SUSEEnterpriseServer
|
||||
} else if strings.Contains(content, `NAME="SLES_SAP"`) {
|
||||
} else if strings.Contains(content, `CPE_NAME="cpe:/o:opensuse:tumbleweed`) {
|
||||
return constant.OpenSUSE, "tumbleweed"
|
||||
} else if strings.Contains(content, `CPE_NAME="cpe:/o:opensuse:leap`) {
|
||||
name = constant.OpenSUSELeap
|
||||
} else if strings.Contains(content, `CPE_NAME="cpe:/o:suse:sles`) {
|
||||
name = constant.SUSEEnterpriseServer
|
||||
} else if strings.Contains(content, `CPE_NAME="cpe:/o:suse:sled`) {
|
||||
name = constant.SUSEEnterpriseDesktop
|
||||
} else {
|
||||
logging.Log.Warnf("Failed to parse SUSE edition: %s", content)
|
||||
return "unknown", "unknown"
|
||||
return "", ""
|
||||
}
|
||||
|
||||
re := regexp.MustCompile(`VERSION_ID=\"(.+)\"`)
|
||||
result := re.FindStringSubmatch(strings.TrimSpace(content))
|
||||
if len(result) != 2 {
|
||||
logging.Log.Warnf("Failed to parse SUSE Linux version: %s", content)
|
||||
return "unknown", "unknown"
|
||||
return "", ""
|
||||
}
|
||||
return name, result[1]
|
||||
}
|
||||
@@ -108,14 +110,60 @@ func (o *suse) checkScanMode() error {
|
||||
}
|
||||
|
||||
func (o *suse) checkDeps() error {
|
||||
o.log.Infof("Dependencies... No need")
|
||||
return nil
|
||||
if o.getServerInfo().Mode.IsFast() {
|
||||
return o.execCheckDeps(o.depsFast())
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() {
|
||||
return o.execCheckDeps(o.depsFastRoot())
|
||||
} else if o.getServerInfo().Mode.IsDeep() {
|
||||
return o.execCheckDeps(o.depsDeep())
|
||||
}
|
||||
return xerrors.New("Unknown scan mode")
|
||||
}
|
||||
|
||||
func (o *suse) depsFast() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (o *suse) depsFastRoot() []string {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
func (o *suse) depsDeep() []string {
|
||||
return o.depsFastRoot()
|
||||
}
|
||||
|
||||
func (o *suse) checkIfSudoNoPasswd() error {
|
||||
// SUSE doesn't need root privilege
|
||||
o.log.Infof("sudo ... No need")
|
||||
return nil
|
||||
if o.getServerInfo().Mode.IsFast() {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsFast())
|
||||
} else if o.getServerInfo().Mode.IsFastRoot() {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsFastRoot())
|
||||
} else {
|
||||
return o.execCheckIfSudoNoPasswd(o.sudoNoPasswdCmdsDeep())
|
||||
}
|
||||
}
|
||||
|
||||
func (o *suse) sudoNoPasswdCmdsFast() []cmd {
|
||||
return []cmd{}
|
||||
}
|
||||
|
||||
func (o *suse) sudoNoPasswdCmdsDeep() []cmd {
|
||||
return o.sudoNoPasswdCmdsFastRoot()
|
||||
}
|
||||
|
||||
func (o *suse) sudoNoPasswdCmdsFastRoot() []cmd {
|
||||
if !o.ServerInfo.IsContainer() {
|
||||
return []cmd{
|
||||
{"zypper ps -s", exitStatusZero},
|
||||
{"which which", exitStatusZero},
|
||||
{"stat /proc/1/exe", exitStatusZero},
|
||||
{"ls -l /proc/1/exe", exitStatusZero},
|
||||
{"cat /proc/1/maps", exitStatusZero},
|
||||
{"lsof -i -P -n", exitStatusZero},
|
||||
}
|
||||
}
|
||||
return []cmd{
|
||||
{"zypper ps -s", exitStatusZero},
|
||||
}
|
||||
}
|
||||
|
||||
func (o *suse) scanPackages() error {
|
||||
@@ -176,13 +224,14 @@ func (o *suse) scanUpdatablePackages() (models.Packages, error) {
|
||||
return o.parseZypperLULines(r.Stdout)
|
||||
}
|
||||
|
||||
var warnRepoPattern = regexp.MustCompile(`Warning: Repository '.+' appears to be outdated\. Consider using a different mirror or server\.`)
|
||||
|
||||
func (o *suse) parseZypperLULines(stdout string) (models.Packages, error) {
|
||||
updatables := models.Packages{}
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
if strings.Index(line, "S | Repository") != -1 ||
|
||||
strings.Index(line, "--+----------------") != -1 {
|
||||
if strings.Contains(line, "S | Repository") || strings.Contains(line, "--+----------------") || warnRepoPattern.MatchString(line) {
|
||||
continue
|
||||
}
|
||||
pack, err := o.parseZypperLUOneLine(line)
|
||||
@@ -213,3 +262,107 @@ func (o *suse) hasZypperColorOption() bool {
|
||||
r := o.exec(util.PrependProxyEnv(cmd), noSudo)
|
||||
return len(r.Stdout) > 0
|
||||
}
|
||||
|
||||
func (o *suse) postScan() error {
|
||||
if o.isExecYumPS() {
|
||||
if err := o.pkgPs(o.getOwnerPkgs); err != nil {
|
||||
err = xerrors.Errorf("Failed to execute zypper-ps: %w", err)
|
||||
o.log.Warnf("err: %+v", err)
|
||||
o.warns = append(o.warns, err)
|
||||
// Only warning this error
|
||||
}
|
||||
}
|
||||
|
||||
if o.isExecNeedsRestarting() {
|
||||
if err := o.needsRestarting(); err != nil {
|
||||
err = xerrors.Errorf("Failed to execute need-restarting: %w", err)
|
||||
o.log.Warnf("err: %+v", err)
|
||||
o.warns = append(o.warns, err)
|
||||
// Only warning this error
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *suse) needsRestarting() error {
|
||||
initName, err := o.detectInitSystem()
|
||||
if err != nil {
|
||||
o.log.Warn(err)
|
||||
// continue scanning
|
||||
}
|
||||
|
||||
cmd := "LANGUAGE=en_US.UTF-8 zypper ps -s"
|
||||
r := o.exec(cmd, sudo)
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to SSH: %w", r)
|
||||
}
|
||||
procs := o.parseNeedsRestarting(r.Stdout)
|
||||
for _, proc := range procs {
|
||||
//TODO refactor
|
||||
fqpn, err := o.procPathToFQPN(proc.Path)
|
||||
if err != nil {
|
||||
o.log.Warnf("Failed to detect a package name of need restarting process from the command path: %s, %s",
|
||||
proc.Path, err)
|
||||
continue
|
||||
}
|
||||
pack, err := o.Packages.FindByFQPN(fqpn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if initName == systemd {
|
||||
name, err := o.detectServiceName(proc.PID)
|
||||
if err != nil {
|
||||
o.log.Warn(err)
|
||||
// continue scanning
|
||||
}
|
||||
proc.ServiceName = name
|
||||
proc.InitSystem = systemd
|
||||
}
|
||||
pack.NeedRestartProcs = append(pack.NeedRestartProcs, proc)
|
||||
o.Packages[pack.Name] = *pack
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *suse) parseNeedsRestarting(stdout string) []models.NeedRestartProcess {
|
||||
procs := []models.NeedRestartProcess{}
|
||||
|
||||
// PID | PPID | UID | User | Command | Service
|
||||
// ----+------+-----+------+---------+-----------
|
||||
// 9 | 7 | 0 | root | bash | containerd
|
||||
// 53 | 9 | 0 | root | zypper | containerd
|
||||
// 55 | 53 | 0 | root | lsof |
|
||||
|
||||
scanner := bufio.NewScanner(strings.NewReader(stdout))
|
||||
for scanner.Scan() {
|
||||
line := scanner.Text()
|
||||
ss := strings.Split(line, " | ")
|
||||
if len(ss) < 6 {
|
||||
continue
|
||||
}
|
||||
pid := strings.TrimSpace(ss[0])
|
||||
if strings.HasPrefix(pid, "PID") {
|
||||
continue
|
||||
}
|
||||
// https://unix.stackexchange.com/a/419375
|
||||
if pid == "1" {
|
||||
continue
|
||||
}
|
||||
|
||||
cmd := strings.TrimSpace(ss[4])
|
||||
whichCmd := fmt.Sprintf("LANGUAGE=en_US.UTF-8 which %s", cmd)
|
||||
r := o.exec(whichCmd, sudo)
|
||||
if !r.isSuccess() {
|
||||
o.log.Debugf("Failed to exec which %s: %s", cmd, r)
|
||||
continue
|
||||
}
|
||||
path := strings.TrimSpace(r.Stdout)
|
||||
|
||||
procs = append(procs, models.NeedRestartProcess{
|
||||
PID: pid,
|
||||
Path: path,
|
||||
HasInit: true,
|
||||
})
|
||||
}
|
||||
return procs
|
||||
}
|
||||
|
||||
@@ -105,28 +105,41 @@ func TestParseOSRelease(t *testing.T) {
|
||||
ver string
|
||||
}{
|
||||
{
|
||||
in: `NAME="openSUSE Leap"
|
||||
ID=opensuse
|
||||
VERSION_ID="42.3.4"`,
|
||||
in: `CPE_NAME="cpe:/o:opensuse:opensuse:13.2"
|
||||
VERSION_ID="13.2"`,
|
||||
name: constant.OpenSUSE,
|
||||
ver: "13.2",
|
||||
},
|
||||
{
|
||||
in: `CPE_NAME="cpe:/o:opensuse:tumbleweed:20220124"
|
||||
VERSION_ID="20220124"`,
|
||||
name: constant.OpenSUSE,
|
||||
ver: "tumbleweed",
|
||||
},
|
||||
{
|
||||
in: `CPE_NAME="cpe:/o:opensuse:leap:42.3"
|
||||
VERSION_ID="42.3.4"`,
|
||||
name: constant.OpenSUSELeap,
|
||||
ver: "42.3.4",
|
||||
},
|
||||
{
|
||||
in: `NAME="SLES"
|
||||
VERSION="12-SP1"
|
||||
VERSION_ID="12.1"
|
||||
ID="sles"`,
|
||||
in: `CPE_NAME="cpe:/o:suse:sles:12:sp1"
|
||||
VERSION_ID="12.1"`,
|
||||
name: constant.SUSEEnterpriseServer,
|
||||
ver: "12.1",
|
||||
},
|
||||
{
|
||||
in: `NAME="SLES_SAP"
|
||||
VERSION="12-SP1"
|
||||
VERSION_ID="12.1.0.1"
|
||||
ID="sles"`,
|
||||
in: `CPE_NAME="cpe:/o:suse:sles_sap:12:sp1"
|
||||
VERSION_ID="12.1.0.1"`,
|
||||
name: constant.SUSEEnterpriseServer,
|
||||
ver: "12.1.0.1",
|
||||
},
|
||||
{
|
||||
in: `CPE_NAME="cpe:/o:suse:sled:15"
|
||||
VERSION_ID="15"`,
|
||||
name: constant.SUSEEnterpriseDesktop,
|
||||
ver: "15",
|
||||
},
|
||||
}
|
||||
|
||||
r := newSUSE(config.ServerInfo{})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user