Compare commits
80 Commits
v0.20.3
...
MaineK00n/
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bbb8fcbb42 | ||
|
|
a23abf48fd | ||
|
|
6e14a2dee6 | ||
|
|
e12fa0ba64 | ||
|
|
fa5b875c34 | ||
|
|
f9276a7ea8 | ||
|
|
457a3a9627 | ||
|
|
4253550c99 | ||
|
|
97cf033ed6 | ||
|
|
5a6980436a | ||
|
|
6271ec522e | ||
|
|
83681ad4f0 | ||
|
|
779833872b | ||
|
|
5c79720f56 | ||
|
|
b2c5b79672 | ||
|
|
b0cc908b73 | ||
|
|
ea3d8a6d0b | ||
|
|
7475b27f6a | ||
|
|
ef80838ddd | ||
|
|
b445b71ca5 | ||
|
|
1ccc5f031a | ||
|
|
8356e976c4 | ||
|
|
3cc7e92ce5 | ||
|
|
046a29467b | ||
|
|
ef5ab8eaf0 | ||
|
|
c8daa5c982 | ||
|
|
9309081b3d | ||
|
|
f541c32d1f | ||
|
|
79a8b62105 | ||
|
|
74c91a5a21 | ||
|
|
6787ab45c5 | ||
|
|
f631e9e603 | ||
|
|
2ab48afe47 | ||
|
|
53ccd61687 | ||
|
|
b91a7b75e2 | ||
|
|
333eae06ea | ||
|
|
93d401c70c | ||
|
|
99dc8e892f | ||
|
|
fb904f0543 | ||
|
|
d4d33fc81d | ||
|
|
a1d3fbf66f | ||
|
|
2cdfbe3bb4 | ||
|
|
ac8290119d | ||
|
|
abdb081af7 | ||
|
|
e506125017 | ||
|
|
8ccaa8c3ef | ||
|
|
de1ed8ecaa | ||
|
|
947d668452 | ||
|
|
db21149f00 | ||
|
|
7f35f4e661 | ||
|
|
6682232b5c | ||
|
|
984debe929 | ||
|
|
a528362663 | ||
|
|
ee97d98c39 | ||
|
|
4e486dae1d | ||
|
|
897fef24a3 | ||
|
|
73f0adad95 | ||
|
|
704492963c | ||
|
|
1927ed344c | ||
|
|
ad2edbb844 | ||
|
|
bfe0db77b4 | ||
|
|
ff3b9cdc16 | ||
|
|
2deb1b9d32 | ||
|
|
ca64d7fc31 | ||
|
|
554ecc437e | ||
|
|
f6cd4d9223 | ||
|
|
03c59866d4 | ||
|
|
1d97e91341 | ||
|
|
96333f38c9 | ||
|
|
8b5d1c8e92 | ||
|
|
dea80f860c | ||
|
|
6eb4c5a5fe | ||
|
|
b219a8495e | ||
|
|
eb87d5d4e1 | ||
|
|
6963442a5e | ||
|
|
f7299b9dba | ||
|
|
379fc8a1a1 | ||
|
|
947fbbb29e | ||
|
|
06d2032c9c | ||
|
|
d055c48827 |
2
.github/workflows/golangci.yml
vendored
2
.github/workflows/golangci.yml
vendored
@@ -19,7 +19,7 @@ jobs:
|
||||
uses: golangci/golangci-lint-action@v3
|
||||
with:
|
||||
# Optional: version of golangci-lint to use in form of v1.2 or v1.2.3 or `latest` to use the latest version
|
||||
version: v1.46
|
||||
version: v1.50.1
|
||||
args: --timeout=10m
|
||||
|
||||
# Optional: working directory, useful for monorepos
|
||||
|
||||
10
.github/workflows/goreleaser.yml
vendored
10
.github/workflows/goreleaser.yml
vendored
@@ -12,9 +12,6 @@ jobs:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
-
|
||||
name: install package for cross compile
|
||||
run: sudo apt update && sudo apt install -y gcc-aarch64-linux-gnu
|
||||
-
|
||||
name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
@@ -22,13 +19,14 @@ jobs:
|
||||
name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.18
|
||||
go-version-file: go.mod
|
||||
-
|
||||
name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v2
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
with:
|
||||
distribution: goreleaser
|
||||
version: latest
|
||||
args: release --rm-dist
|
||||
args: release --clean
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -20,3 +20,5 @@ vuls
|
||||
!cmd/vuls
|
||||
future-vuls
|
||||
trivy-to-vuls
|
||||
snmp2cpe
|
||||
!snmp2cpe/
|
||||
@@ -1,34 +1,18 @@
|
||||
project_name: vuls
|
||||
env:
|
||||
- GO111MODULE=on
|
||||
release:
|
||||
github:
|
||||
owner: future-architect
|
||||
name: vuls
|
||||
builds:
|
||||
- id: vuls-amd64
|
||||
- id: vuls
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=x86_64-linux-gnu-gcc
|
||||
main: ./cmd/vuls/main.go
|
||||
flags:
|
||||
- -a
|
||||
ldflags:
|
||||
- -s -w -X github.com/future-architect/vuls/config.Version={{.Version}} -X github.com/future-architect/vuls/config.Revision={{.Commit}}-{{ .CommitDate }}
|
||||
binary: vuls
|
||||
|
||||
- id: vuls-arm64
|
||||
goos:
|
||||
- linux
|
||||
goarch:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=1
|
||||
- CC=aarch64-linux-gnu-gcc
|
||||
main: ./cmd/vuls/main.go
|
||||
flags:
|
||||
- -a
|
||||
@@ -41,6 +25,7 @@ builds:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
@@ -60,6 +45,7 @@ builds:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
@@ -68,6 +54,8 @@ builds:
|
||||
tags:
|
||||
- scanner
|
||||
main: ./contrib/trivy/cmd/main.go
|
||||
ldflags:
|
||||
- -s -w -X github.com/future-architect/vuls/config.Version={{.Version}} -X github.com/future-architect/vuls/config.Revision={{.Commit}}-{{ .CommitDate }}
|
||||
binary: trivy-to-vuls
|
||||
|
||||
- id: future-vuls
|
||||
@@ -75,6 +63,7 @@ builds:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
@@ -84,16 +73,37 @@ builds:
|
||||
- -a
|
||||
tags:
|
||||
- scanner
|
||||
ldflags:
|
||||
- -s -w -X github.com/future-architect/vuls/config.Version={{.Version}} -X github.com/future-architect/vuls/config.Revision={{.Commit}}-{{ .CommitDate }}
|
||||
main: ./contrib/future-vuls/cmd/main.go
|
||||
binary: future-vuls
|
||||
|
||||
- id: snmp2cpe
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
goarch:
|
||||
- 386
|
||||
- amd64
|
||||
- arm
|
||||
- arm64
|
||||
flags:
|
||||
- -a
|
||||
tags:
|
||||
- scanner
|
||||
ldflags:
|
||||
- -s -w -X github.com/future-architect/vuls/config.Version={{.Version}} -X github.com/future-architect/vuls/config.Revision={{.Commit}}-{{ .CommitDate }}
|
||||
main: ./contrib/snmp2cpe/cmd/main.go
|
||||
binary: snmp2cpe
|
||||
|
||||
archives:
|
||||
|
||||
- id: vuls
|
||||
name_template: '{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}'
|
||||
builds:
|
||||
- vuls-amd64
|
||||
- vuls-arm64
|
||||
- vuls
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
@@ -129,5 +139,16 @@ archives:
|
||||
- LICENSE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
|
||||
- id: snmp2cpe
|
||||
name_template: '{{ .Binary }}_{{.Version}}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ .Arm }}{{ end }}'
|
||||
builds:
|
||||
- snmp2cpe
|
||||
format: tar.gz
|
||||
files:
|
||||
- LICENSE
|
||||
- README*
|
||||
- CHANGELOG.md
|
||||
|
||||
snapshot:
|
||||
name_template: SNAPSHOT-{{ .Commit }}
|
||||
|
||||
43
GNUmakefile
43
GNUmakefile
@@ -18,10 +18,7 @@ VERSION := $(shell git describe --tags --abbrev=0)
|
||||
REVISION := $(shell git rev-parse --short HEAD)
|
||||
BUILDTIME := $(shell date "+%Y%m%d_%H%M%S")
|
||||
LDFLAGS := -X 'github.com/future-architect/vuls/config.Version=$(VERSION)' -X 'github.com/future-architect/vuls/config.Revision=build-$(BUILDTIME)_$(REVISION)'
|
||||
GO := GO111MODULE=on go
|
||||
CGO_UNABLED := CGO_ENABLED=0 go
|
||||
GO_OFF := GO111MODULE=off go
|
||||
|
||||
GO := CGO_ENABLED=0 go
|
||||
|
||||
all: build test
|
||||
|
||||
@@ -32,28 +29,25 @@ install: ./cmd/vuls/main.go
|
||||
$(GO) install -ldflags "$(LDFLAGS)" ./cmd/vuls
|
||||
|
||||
build-scanner: ./cmd/scanner/main.go
|
||||
$(CGO_UNABLED) build -tags=scanner -a -ldflags "$(LDFLAGS)" -o vuls ./cmd/scanner
|
||||
$(GO) build -tags=scanner -a -ldflags "$(LDFLAGS)" -o vuls ./cmd/scanner
|
||||
|
||||
install-scanner: ./cmd/scanner/main.go
|
||||
$(CGO_UNABLED) install -tags=scanner -ldflags "$(LDFLAGS)" ./cmd/scanner
|
||||
$(GO) install -tags=scanner -ldflags "$(LDFLAGS)" ./cmd/scanner
|
||||
|
||||
lint:
|
||||
$(GO) install github.com/mgechev/revive@latest
|
||||
go install github.com/mgechev/revive@latest
|
||||
revive -config ./.revive.toml -formatter plain $(PKGS)
|
||||
|
||||
vet:
|
||||
echo $(PKGS) | xargs env $(GO) vet || exit;
|
||||
|
||||
golangci:
|
||||
$(GO) install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
go install github.com/golangci/golangci-lint/cmd/golangci-lint@latest
|
||||
golangci-lint run
|
||||
|
||||
fmt:
|
||||
gofmt -s -w $(SRCS)
|
||||
|
||||
mlint:
|
||||
$(foreach file,$(SRCS),gometalinter $(file) || exit;)
|
||||
|
||||
fmtcheck:
|
||||
$(foreach file,$(SRCS),gofmt -s -d $(file);)
|
||||
|
||||
@@ -62,9 +56,6 @@ pretest: lint vet fmtcheck
|
||||
test: pretest
|
||||
$(GO) test -cover -v ./... || exit;
|
||||
|
||||
unused:
|
||||
$(foreach pkg,$(PKGS),unused $(pkg);)
|
||||
|
||||
cov:
|
||||
@ go get -v github.com/axw/gocov/gocov
|
||||
@ go get golang.org/x/tools/cmd/cover
|
||||
@@ -81,14 +72,18 @@ build-trivy-to-vuls: ./contrib/trivy/cmd/main.go
|
||||
build-future-vuls: ./contrib/future-vuls/cmd/main.go
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o future-vuls ./contrib/future-vuls/cmd
|
||||
|
||||
# snmp2cpe
|
||||
build-snmp2cpe: ./contrib/snmp2cpe/cmd/main.go
|
||||
$(GO) build -a -ldflags "$(LDFLAGS)" -o snmp2cpe ./contrib/snmp2cpe/cmd
|
||||
|
||||
# integration-test
|
||||
BASE_DIR := '${PWD}/integration/results'
|
||||
# $(shell mkdir -p ${BASE_DIR})
|
||||
NOW=$(shell date --iso-8601=seconds)
|
||||
CURRENT := `find ${BASE_DIR} -type d -exec basename {} \; | sort -nr | head -n 1`
|
||||
NOW=$(shell date '+%Y-%m-%dT%H-%M-%S%z')
|
||||
NOW_JSON_DIR := '${BASE_DIR}/$(NOW)'
|
||||
ONE_SEC_AFTER=$(shell date -d '+1 second' --iso-8601=seconds)
|
||||
ONE_SEC_AFTER=$(shell date -d '+1 second' '+%Y-%m-%dT%H-%M-%S%z')
|
||||
ONE_SEC_AFTER_JSON_DIR := '${BASE_DIR}/$(ONE_SEC_AFTER)'
|
||||
LIBS := 'bundler' 'pip' 'pipenv' 'poetry' 'composer' 'npm' 'yarn' 'pnpm' 'cargo' 'gomod' 'gosum' 'gobinary' 'jar' 'pom' 'nuget-lock' 'nuget-config' 'dotnet-deps' 'nvd_exact' 'nvd_rough' 'nvd_vendor_product' 'nvd_match_no_jvn' 'jvn_vendor_product' 'jvn_vendor_product_nover'
|
||||
LIBS := 'bundler' 'pip' 'pipenv' 'poetry' 'composer' 'npm' 'yarn' 'pnpm' 'cargo' 'gomod' 'gosum' 'gobinary' 'jar' 'pom' 'gradle' 'nuget-lock' 'nuget-config' 'dotnet-deps' 'conan' 'nvd_exact' 'nvd_rough' 'nvd_vendor_product' 'nvd_match_no_jvn' 'jvn_vendor_product' 'jvn_vendor_product_nover'
|
||||
|
||||
diff:
|
||||
# git clone git@github.com:vulsio/vulsctl.git
|
||||
@@ -106,14 +101,14 @@ endif
|
||||
mkdir -p ${NOW_JSON_DIR}
|
||||
sleep 1
|
||||
./vuls.old scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp ${BASE_DIR}/current/*.json ${NOW_JSON_DIR}
|
||||
cp ${BASE_DIR}/$(CURRENT)/*.json ${NOW_JSON_DIR}
|
||||
- cp integration/data/results/*.json ${NOW_JSON_DIR}
|
||||
./vuls.old report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml ${NOW}
|
||||
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
sleep 1
|
||||
./vuls.new scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp ${BASE_DIR}/current/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp ${BASE_DIR}/$(CURRENT)/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
- cp integration/data/results/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml ${ONE_SEC_AFTER}
|
||||
|
||||
@@ -139,14 +134,14 @@ endif
|
||||
mkdir -p ${NOW_JSON_DIR}
|
||||
sleep 1
|
||||
./vuls.old scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp -f ${BASE_DIR}/current/*.json ${NOW_JSON_DIR}
|
||||
cp -f ${BASE_DIR}/$(CURRENT)/*.json ${NOW_JSON_DIR}
|
||||
- cp integration/data/results/*.json ${NOW_JSON_DIR}
|
||||
./vuls.old report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml ${NOW}
|
||||
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
sleep 1
|
||||
./vuls.new scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp -f ${BASE_DIR}/current/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp -f ${BASE_DIR}/$(CURRENT)/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
- cp integration/data/results/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml ${ONE_SEC_AFTER}
|
||||
|
||||
@@ -163,14 +158,14 @@ endif
|
||||
sleep 1
|
||||
# new vs new
|
||||
./vuls.new scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp -f ${BASE_DIR}/current/*.json ${NOW_JSON_DIR}
|
||||
cp -f ${BASE_DIR}/$(CURRENT)/*.json ${NOW_JSON_DIR}
|
||||
cp integration/data/results/*.json ${NOW_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-config.toml ${NOW}
|
||||
|
||||
mkdir -p ${ONE_SEC_AFTER_JSON_DIR}
|
||||
sleep 1
|
||||
./vuls.new scan -config=./integration/int-config.toml --results-dir=${BASE_DIR} ${LIBS}
|
||||
cp -f ${BASE_DIR}/current/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp -f ${BASE_DIR}/$(CURRENT)/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
cp integration/data/results/*.json ${ONE_SEC_AFTER_JSON_DIR}
|
||||
./vuls.new report --format-json --refresh-cve --results-dir=${BASE_DIR} -config=./integration/int-redis-config.toml ${ONE_SEC_AFTER}
|
||||
|
||||
|
||||
15
README.md
15
README.md
@@ -3,14 +3,13 @@
|
||||
|
||||
[](http://goo.gl/forms/xm5KFo35tu)
|
||||
[](https://github.com/future-architect/vuls/blob/master/LICENSE)
|
||||
[](https://travis-ci.org/future-architect/vuls)
|
||||
[](https://goreportcard.com/report/github.com/future-architect/vuls)
|
||||
[](https://github.com/future-architect/vuls/graphs/contributors)
|
||||
|
||||

|
||||
|
||||
Vulnerability scanner for Linux/FreeBSD, agent-less, written in Go.
|
||||
We have a slack team. [Join slack team](http://goo.gl/forms/xm5KFo35tu)
|
||||
Vulnerability scanner for Linux/FreeBSD, agent-less, written in Go.
|
||||
We have a slack team. [Join slack team](https://join.slack.com/t/vuls-github/shared_invite/zt-1fculjwj4-6nex2JNE7DpOSiKZ1ztDFw)
|
||||
Twitter: [@vuls_en](https://twitter.com/vuls_en)
|
||||
|
||||

|
||||
@@ -48,10 +47,11 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
|
||||
### Scan for any vulnerabilities in Linux/FreeBSD Server
|
||||
|
||||
[Supports major Linux/FreeBSD](https://vuls.io/docs/en/supported-os.html)
|
||||
[Supports major Linux/FreeBSD/Windows](https://vuls.io/docs/en/supported-os.html)
|
||||
|
||||
- Alpine, Amazon Linux, CentOS, AlmaLinux, Rocky Linux, Debian, Oracle Linux, Raspbian, RHEL, openSUSE, openSUSE Leap, SUSE Enterprise Linux, Fedora, and Ubuntu
|
||||
- FreeBSD
|
||||
- Windows
|
||||
- Cloud, on-premise, Running Docker Container
|
||||
|
||||
### High-quality scan
|
||||
@@ -72,6 +72,7 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
- [Red Hat Security Advisories](https://access.redhat.com/security/security-updates/)
|
||||
- [Debian Security Bug Tracker](https://security-tracker.debian.org/tracker/)
|
||||
- [Ubuntu CVE Tracker](https://people.canonical.com/~ubuntu-security/cve/)
|
||||
- [Microsoft CVRF](https://api.msrc.microsoft.com/cvrf/v2.0/swagger/index)
|
||||
|
||||
- Commands(yum, zypper, pkg-audit)
|
||||
- RHSA / ALAS / ELSA / FreeBSD-SA
|
||||
@@ -95,11 +96,7 @@ Vuls is a tool created to solve the problems listed above. It has the following
|
||||
- [mitre/cti](https://github.com/mitre/cti)
|
||||
|
||||
- Libraries
|
||||
- [Node.js Security Working Group](https://github.com/nodejs/security-wg)
|
||||
- [Ruby Advisory Database](https://github.com/rubysec/ruby-advisory-db)
|
||||
- [Safety DB(Python)](https://github.com/pyupio/safety-db)
|
||||
- [PHP Security Advisories Database](https://github.com/FriendsOfPHP/security-advisories)
|
||||
- [RustSec Advisory Database](https://github.com/RustSec/advisory-db)
|
||||
- [aquasecurity/vuln-list](https://github.com/aquasecurity/vuln-list)
|
||||
|
||||
- WordPress
|
||||
- [wpscan](https://wpscan.com/api)
|
||||
|
||||
2
cache/bolt.go
vendored
2
cache/bolt.go
vendored
@@ -48,7 +48,7 @@ func (b Bolt) Close() error {
|
||||
return b.db.Close()
|
||||
}
|
||||
|
||||
// CreateBucketIfNotExists creates a bucket that is specified by arg.
|
||||
// CreateBucketIfNotExists creates a bucket that is specified by arg.
|
||||
func (b *Bolt) createBucketIfNotExists(name string) error {
|
||||
return b.db.Update(func(tx *bolt.Tx) error {
|
||||
_, err := tx.CreateBucketIfNotExists([]byte(name))
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !windows
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
@@ -7,9 +9,10 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// Version of Vuls
|
||||
@@ -21,7 +24,7 @@ var Revision string
|
||||
// Conf has Configuration
|
||||
var Conf Config
|
||||
|
||||
//Config is struct of Configuration
|
||||
// Config is struct of Configuration
|
||||
type Config struct {
|
||||
logging.LogOpts
|
||||
|
||||
@@ -117,6 +120,9 @@ func (c Config) ValidateOnScan() bool {
|
||||
if es := server.PortScan.Validate(); 0 < len(es) {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
if es := server.Windows.Validate(); 0 < len(es) {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
}
|
||||
|
||||
for _, err := range errs {
|
||||
@@ -240,10 +246,12 @@ type ServerInfo struct {
|
||||
Optional map[string]interface{} `toml:"optional,omitempty" json:"optional,omitempty"` // Optional key-value set that will be outputted to JSON
|
||||
Lockfiles []string `toml:"lockfiles,omitempty" json:"lockfiles,omitempty"` // ie) path/to/package-lock.json
|
||||
FindLock bool `toml:"findLock,omitempty" json:"findLock,omitempty"`
|
||||
FindLockDirs []string `toml:"findLockDirs,omitempty" json:"findLockDirs,omitempty"`
|
||||
Type string `toml:"type,omitempty" json:"type,omitempty"` // "pseudo" or ""
|
||||
IgnoredJSONKeys []string `toml:"ignoredJSONKeys,omitempty" json:"ignoredJSONKeys,omitempty"`
|
||||
WordPress *WordPressConf `toml:"wordpress,omitempty" json:"wordpress,omitempty"`
|
||||
PortScan *PortScanConf `toml:"portscan,omitempty" json:"portscan,omitempty"`
|
||||
Windows *WindowsConf `toml:"windows,omitempty" json:"windows,omitempty"`
|
||||
|
||||
IPv4Addrs []string `toml:"-" json:"ipv4Addrs,omitempty"`
|
||||
IPv6Addrs []string `toml:"-" json:"ipv6Addrs,omitempty"`
|
||||
@@ -270,6 +278,7 @@ type WordPressConf struct {
|
||||
OSUser string `toml:"osUser,omitempty" json:"osUser,omitempty"`
|
||||
DocRoot string `toml:"docRoot,omitempty" json:"docRoot,omitempty"`
|
||||
CmdPath string `toml:"cmdPath,omitempty" json:"cmdPath,omitempty"`
|
||||
NoSudo bool `toml:"noSudo,omitempty" json:"noSudo,omitempty"`
|
||||
}
|
||||
|
||||
// IsZero return whether this struct is not specified in config.toml
|
||||
|
||||
351
config/config_windows.go
Normal file
351
config/config_windows.go
Normal file
@@ -0,0 +1,351 @@
|
||||
//go:build windows
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/asaskevich/govalidator"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
)
|
||||
|
||||
// Version of Vuls
|
||||
var Version = "`make build` or `make install` will show the version"
|
||||
|
||||
// Revision of Git
|
||||
var Revision string
|
||||
|
||||
// Conf has Configuration
|
||||
var Conf Config
|
||||
|
||||
// Config is struct of Configuration
|
||||
type Config struct {
|
||||
logging.LogOpts
|
||||
|
||||
// scan, report
|
||||
HTTPProxy string `valid:"url" json:"httpProxy,omitempty"`
|
||||
ResultsDir string `json:"resultsDir,omitempty"`
|
||||
Pipe bool `json:"pipe,omitempty"`
|
||||
|
||||
Default ServerInfo `json:"default,omitempty"`
|
||||
Servers map[string]ServerInfo `json:"servers,omitempty"`
|
||||
|
||||
ScanOpts
|
||||
|
||||
// report
|
||||
CveDict GoCveDictConf `json:"cveDict,omitempty"`
|
||||
OvalDict GovalDictConf `json:"ovalDict,omitempty"`
|
||||
Gost GostConf `json:"gost,omitempty"`
|
||||
Exploit ExploitConf `json:"exploit,omitempty"`
|
||||
Metasploit MetasploitConf `json:"metasploit,omitempty"`
|
||||
KEVuln KEVulnConf `json:"kevuln,omitempty"`
|
||||
Cti CtiConf `json:"cti,omitempty"`
|
||||
|
||||
Slack SlackConf `json:"-"`
|
||||
EMail SMTPConf `json:"-"`
|
||||
HTTP HTTPConf `json:"-"`
|
||||
AWS AWSConf `json:"-"`
|
||||
Azure AzureConf `json:"-"`
|
||||
ChatWork ChatWorkConf `json:"-"`
|
||||
GoogleChat GoogleChatConf `json:"-"`
|
||||
Telegram TelegramConf `json:"-"`
|
||||
WpScan WpScanConf `json:"-"`
|
||||
Saas SaasConf `json:"-"`
|
||||
|
||||
ReportOpts
|
||||
}
|
||||
|
||||
// ReportConf is an interface to Validate Report Config
|
||||
type ReportConf interface {
|
||||
Validate() []error
|
||||
}
|
||||
|
||||
// ScanOpts is options for scan
|
||||
type ScanOpts struct {
|
||||
Vvv bool `json:"vvv,omitempty"`
|
||||
}
|
||||
|
||||
// ReportOpts is options for report
|
||||
type ReportOpts struct {
|
||||
CvssScoreOver float64 `json:"cvssScoreOver,omitempty"`
|
||||
ConfidenceScoreOver int `json:"confidenceScoreOver,omitempty"`
|
||||
TrivyCacheDBDir string `json:"trivyCacheDBDir,omitempty"`
|
||||
NoProgress bool `json:"noProgress,omitempty"`
|
||||
RefreshCve bool `json:"refreshCve,omitempty"`
|
||||
IgnoreUnfixed bool `json:"ignoreUnfixed,omitempty"`
|
||||
IgnoreUnscoredCves bool `json:"ignoreUnscoredCves,omitempty"`
|
||||
DiffPlus bool `json:"diffPlus,omitempty"`
|
||||
DiffMinus bool `json:"diffMinus,omitempty"`
|
||||
Diff bool `json:"diff,omitempty"`
|
||||
Lang string `json:"lang,omitempty"`
|
||||
}
|
||||
|
||||
// ValidateOnConfigtest validates
|
||||
func (c Config) ValidateOnConfigtest() bool {
|
||||
errs := c.checkSSHKeyExist()
|
||||
if _, err := govalidator.ValidateStruct(c); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
for _, err := range errs {
|
||||
logging.Log.Error(err)
|
||||
}
|
||||
return len(errs) == 0
|
||||
}
|
||||
|
||||
// ValidateOnScan validates configuration
|
||||
func (c Config) ValidateOnScan() bool {
|
||||
errs := c.checkSSHKeyExist()
|
||||
if len(c.ResultsDir) != 0 {
|
||||
if ok, _ := govalidator.IsFilePath(c.ResultsDir); !ok {
|
||||
errs = append(errs, xerrors.Errorf(
|
||||
"JSON base directory must be a *Absolute* file path. -results-dir: %s", c.ResultsDir))
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := govalidator.ValidateStruct(c); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
for _, server := range c.Servers {
|
||||
if !server.Module.IsScanPort() {
|
||||
continue
|
||||
}
|
||||
if es := server.PortScan.Validate(); 0 < len(es) {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
if es := server.Windows.Validate(); 0 < len(es) {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
}
|
||||
|
||||
for _, err := range errs {
|
||||
logging.Log.Error(err)
|
||||
}
|
||||
return len(errs) == 0
|
||||
}
|
||||
|
||||
func (c Config) checkSSHKeyExist() (errs []error) {
|
||||
for serverName, v := range c.Servers {
|
||||
if v.Type == constant.ServerTypePseudo {
|
||||
continue
|
||||
}
|
||||
if v.KeyPath != "" {
|
||||
if _, err := os.Stat(v.KeyPath); err != nil {
|
||||
errs = append(errs, xerrors.Errorf(
|
||||
"%s is invalid. keypath: %s not exists", serverName, v.KeyPath))
|
||||
}
|
||||
}
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
// ValidateOnReport validates configuration
|
||||
func (c *Config) ValidateOnReport() bool {
|
||||
errs := []error{}
|
||||
|
||||
if len(c.ResultsDir) != 0 {
|
||||
if ok, _ := govalidator.IsFilePath(c.ResultsDir); !ok {
|
||||
errs = append(errs, xerrors.Errorf(
|
||||
"JSON base directory must be a *Absolute* file path. -results-dir: %s", c.ResultsDir))
|
||||
}
|
||||
}
|
||||
|
||||
_, err := govalidator.ValidateStruct(c)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
|
||||
for _, rc := range []ReportConf{
|
||||
&c.EMail,
|
||||
&c.Slack,
|
||||
&c.ChatWork,
|
||||
&c.GoogleChat,
|
||||
&c.Telegram,
|
||||
&c.HTTP,
|
||||
&c.AWS,
|
||||
&c.Azure,
|
||||
} {
|
||||
if es := rc.Validate(); 0 < len(es) {
|
||||
errs = append(errs, es...)
|
||||
}
|
||||
}
|
||||
|
||||
for _, cnf := range []VulnDictInterface{
|
||||
&Conf.CveDict,
|
||||
&Conf.OvalDict,
|
||||
&Conf.Gost,
|
||||
&Conf.Exploit,
|
||||
&Conf.Metasploit,
|
||||
&Conf.KEVuln,
|
||||
&Conf.Cti,
|
||||
} {
|
||||
if err := cnf.Validate(); err != nil {
|
||||
errs = append(errs, xerrors.Errorf("Failed to validate %s: %+v", cnf.GetName(), err))
|
||||
}
|
||||
if err := cnf.CheckHTTPHealth(); err != nil {
|
||||
errs = append(errs, xerrors.Errorf("Run %s as server mode before reporting: %+v", cnf.GetName(), err))
|
||||
}
|
||||
}
|
||||
|
||||
for _, err := range errs {
|
||||
logging.Log.Error(err)
|
||||
}
|
||||
|
||||
return len(errs) == 0
|
||||
}
|
||||
|
||||
// ValidateOnSaaS validates configuration
|
||||
func (c Config) ValidateOnSaaS() bool {
|
||||
saaserrs := c.Saas.Validate()
|
||||
for _, err := range saaserrs {
|
||||
logging.Log.Error("Failed to validate SaaS conf: %+w", err)
|
||||
}
|
||||
return len(saaserrs) == 0
|
||||
}
|
||||
|
||||
// WpScanConf is wpscan.com config
|
||||
type WpScanConf struct {
|
||||
Token string `toml:"token,omitempty" json:"-"`
|
||||
DetectInactive bool `toml:"detectInactive,omitempty" json:"detectInactive,omitempty"`
|
||||
}
|
||||
|
||||
// ServerInfo has SSH Info, additional CPE packages to scan.
|
||||
type ServerInfo struct {
|
||||
BaseName string `toml:"-" json:"-"`
|
||||
ServerName string `toml:"-" json:"serverName,omitempty"`
|
||||
User string `toml:"user,omitempty" json:"user,omitempty"`
|
||||
Host string `toml:"host,omitempty" json:"host,omitempty"`
|
||||
IgnoreIPAddresses []string `toml:"ignoreIPAddresses,omitempty" json:"ignoreIPAddresses,omitempty"`
|
||||
JumpServer []string `toml:"jumpServer,omitempty" json:"jumpServer,omitempty"`
|
||||
Port string `toml:"port,omitempty" json:"port,omitempty"`
|
||||
SSHConfigPath string `toml:"sshConfigPath,omitempty" json:"sshConfigPath,omitempty"`
|
||||
KeyPath string `toml:"keyPath,omitempty" json:"keyPath,omitempty"`
|
||||
CpeNames []string `toml:"cpeNames,omitempty" json:"cpeNames,omitempty"`
|
||||
ScanMode []string `toml:"scanMode,omitempty" json:"scanMode,omitempty"`
|
||||
ScanModules []string `toml:"scanModules,omitempty" json:"scanModules,omitempty"`
|
||||
OwaspDCXMLPath string `toml:"owaspDCXMLPath,omitempty" json:"owaspDCXMLPath,omitempty"`
|
||||
ContainersOnly bool `toml:"containersOnly,omitempty" json:"containersOnly,omitempty"`
|
||||
ContainersIncluded []string `toml:"containersIncluded,omitempty" json:"containersIncluded,omitempty"`
|
||||
ContainersExcluded []string `toml:"containersExcluded,omitempty" json:"containersExcluded,omitempty"`
|
||||
ContainerType string `toml:"containerType,omitempty" json:"containerType,omitempty"`
|
||||
Containers map[string]ContainerSetting `toml:"containers,omitempty" json:"containers,omitempty"`
|
||||
IgnoreCves []string `toml:"ignoreCves,omitempty" json:"ignoreCves,omitempty"`
|
||||
IgnorePkgsRegexp []string `toml:"ignorePkgsRegexp,omitempty" json:"ignorePkgsRegexp,omitempty"`
|
||||
GitHubRepos map[string]GitHubConf `toml:"githubs" json:"githubs,omitempty"` // key: owner/repo
|
||||
UUIDs map[string]string `toml:"uuids,omitempty" json:"uuids,omitempty"`
|
||||
Memo string `toml:"memo,omitempty" json:"memo,omitempty"`
|
||||
Enablerepo []string `toml:"enablerepo,omitempty" json:"enablerepo,omitempty"` // For CentOS, Alma, Rocky, RHEL, Amazon
|
||||
Optional map[string]interface{} `toml:"optional,omitempty" json:"optional,omitempty"` // Optional key-value set that will be outputted to JSON
|
||||
Lockfiles []string `toml:"lockfiles,omitempty" json:"lockfiles,omitempty"` // ie) path/to/package-lock.json
|
||||
FindLock bool `toml:"findLock,omitempty" json:"findLock,omitempty"`
|
||||
FindLockDirs []string `toml:"findLockDirs,omitempty" json:"findLockDirs,omitempty"`
|
||||
Type string `toml:"type,omitempty" json:"type,omitempty"` // "pseudo" or ""
|
||||
IgnoredJSONKeys []string `toml:"ignoredJSONKeys,omitempty" json:"ignoredJSONKeys,omitempty"`
|
||||
WordPress *WordPressConf `toml:"wordpress,omitempty" json:"wordpress,omitempty"`
|
||||
PortScan *PortScanConf `toml:"portscan,omitempty" json:"portscan,omitempty"`
|
||||
Windows *WindowsConf `toml:"windows,omitempty" json:"windows,omitempty"`
|
||||
|
||||
IPv4Addrs []string `toml:"-" json:"ipv4Addrs,omitempty"`
|
||||
IPv6Addrs []string `toml:"-" json:"ipv6Addrs,omitempty"`
|
||||
IPSIdentifiers map[string]string `toml:"-" json:"ipsIdentifiers,omitempty"`
|
||||
|
||||
// internal use
|
||||
LogMsgAnsiColor string `toml:"-" json:"-"` // DebugLog Color
|
||||
Container Container `toml:"-" json:"-"`
|
||||
Distro Distro `toml:"-" json:"-"`
|
||||
Mode ScanMode `toml:"-" json:"-"`
|
||||
Module ScanModule `toml:"-" json:"-"`
|
||||
}
|
||||
|
||||
// ContainerSetting is used for loading container setting in config.toml
|
||||
type ContainerSetting struct {
|
||||
Cpes []string `json:"cpes,omitempty"`
|
||||
OwaspDCXMLPath string `json:"owaspDCXMLPath,omitempty"`
|
||||
IgnorePkgsRegexp []string `json:"ignorePkgsRegexp,omitempty"`
|
||||
IgnoreCves []string `json:"ignoreCves,omitempty"`
|
||||
}
|
||||
|
||||
// WordPressConf used for WordPress Scanning
|
||||
type WordPressConf struct {
|
||||
OSUser string `toml:"osUser,omitempty" json:"osUser,omitempty"`
|
||||
DocRoot string `toml:"docRoot,omitempty" json:"docRoot,omitempty"`
|
||||
CmdPath string `toml:"cmdPath,omitempty" json:"cmdPath,omitempty"`
|
||||
NoSudo bool `toml:"noSudo,omitempty" json:"noSudo,omitempty"`
|
||||
}
|
||||
|
||||
// IsZero return whether this struct is not specified in config.toml
|
||||
func (cnf WordPressConf) IsZero() bool {
|
||||
return cnf.OSUser == "" && cnf.DocRoot == "" && cnf.CmdPath == ""
|
||||
}
|
||||
|
||||
// GitHubConf is used for GitHub Security Alerts
|
||||
type GitHubConf struct {
|
||||
Token string `json:"-"`
|
||||
IgnoreGitHubDismissed bool `json:"ignoreGitHubDismissed,omitempty"`
|
||||
}
|
||||
|
||||
// GetServerName returns ServerName if this serverInfo is about host.
|
||||
// If this serverInfo is about a container, returns containerID@ServerName
|
||||
func (s ServerInfo) GetServerName() string {
|
||||
if len(s.Container.ContainerID) == 0 {
|
||||
return s.ServerName
|
||||
}
|
||||
return fmt.Sprintf("%s@%s", s.Container.Name, s.ServerName)
|
||||
}
|
||||
|
||||
// Distro has distribution info
|
||||
type Distro struct {
|
||||
Family string
|
||||
Release string
|
||||
}
|
||||
|
||||
func (l Distro) String() string {
|
||||
return fmt.Sprintf("%s %s", l.Family, l.Release)
|
||||
}
|
||||
|
||||
// MajorVersion returns Major version
|
||||
func (l Distro) MajorVersion() (int, error) {
|
||||
switch l.Family {
|
||||
case constant.Amazon:
|
||||
return strconv.Atoi(getAmazonLinuxVersion(l.Release))
|
||||
case constant.CentOS:
|
||||
if 0 < len(l.Release) {
|
||||
return strconv.Atoi(strings.Split(strings.TrimPrefix(l.Release, "stream"), ".")[0])
|
||||
}
|
||||
case constant.OpenSUSE:
|
||||
if l.Release != "" {
|
||||
if l.Release == "tumbleweed" {
|
||||
return 0, nil
|
||||
}
|
||||
return strconv.Atoi(strings.Split(l.Release, ".")[0])
|
||||
}
|
||||
default:
|
||||
if 0 < len(l.Release) {
|
||||
return strconv.Atoi(strings.Split(l.Release, ".")[0])
|
||||
}
|
||||
}
|
||||
return 0, xerrors.New("Release is empty")
|
||||
}
|
||||
|
||||
// IsContainer returns whether this ServerInfo is about container
|
||||
func (s ServerInfo) IsContainer() bool {
|
||||
return 0 < len(s.Container.ContainerID)
|
||||
}
|
||||
|
||||
// SetContainer set container
|
||||
func (s *ServerInfo) SetContainer(d Container) {
|
||||
s.Container = d
|
||||
}
|
||||
|
||||
// Container has Container information.
|
||||
type Container struct {
|
||||
ContainerID string
|
||||
Name string
|
||||
Image string
|
||||
}
|
||||
158
config/os.go
158
config/os.go
@@ -41,8 +41,12 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
case constant.Amazon:
|
||||
eol, found = map[string]EOL{
|
||||
"1": {StandardSupportUntil: time.Date(2023, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2": {StandardSupportUntil: time.Date(2025, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2022": {StandardSupportUntil: time.Date(2026, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2023": {StandardSupportUntil: time.Date(2027, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2025": {StandardSupportUntil: time.Date(2029, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2027": {StandardSupportUntil: time.Date(2031, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"2029": {StandardSupportUntil: time.Date(2033, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[getAmazonLinuxVersion(release)]
|
||||
case constant.RedHat:
|
||||
// https://access.redhat.com/support/policy/updates/errata
|
||||
@@ -123,6 +127,9 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"9": {StandardSupportUntil: time.Date(2022, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"10": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"11": {StandardSupportUntil: time.Date(2026, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2028, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
// "13": {StandardSupportUntil: time.Date(2030, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
// "14": {StandardSupportUntil: time.Date(2032, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Raspbian:
|
||||
// Not found
|
||||
@@ -130,18 +137,35 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
case constant.Ubuntu:
|
||||
// https://wiki.ubuntu.com/Releases
|
||||
eol, found = map[string]EOL{
|
||||
"14.10": {Ended: true},
|
||||
"6.06": {Ended: true},
|
||||
"6.10": {Ended: true},
|
||||
"7.04": {Ended: true},
|
||||
"7.10": {Ended: true},
|
||||
"8.04": {Ended: true},
|
||||
"8.10": {Ended: true},
|
||||
"9.04": {Ended: true},
|
||||
"9.10": {Ended: true},
|
||||
"10.04": {Ended: true},
|
||||
"10.10": {Ended: true},
|
||||
"11.04": {Ended: true},
|
||||
"11.10": {Ended: true},
|
||||
"12.04": {Ended: true},
|
||||
"12.10": {Ended: true},
|
||||
"13.04": {Ended: true},
|
||||
"13.10": {Ended: true},
|
||||
"14.04": {
|
||||
ExtendedSupportUntil: time.Date(2022, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"14.10": {Ended: true},
|
||||
"15.04": {Ended: true},
|
||||
"16.10": {Ended: true},
|
||||
"17.04": {Ended: true},
|
||||
"17.10": {Ended: true},
|
||||
"15.10": {Ended: true},
|
||||
"16.04": {
|
||||
StandardSupportUntil: time.Date(2021, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2024, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"16.10": {Ended: true},
|
||||
"17.04": {Ended: true},
|
||||
"17.10": {Ended: true},
|
||||
"18.04": {
|
||||
StandardSupportUntil: time.Date(2023, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2028, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
@@ -166,6 +190,12 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
StandardSupportUntil: time.Date(2027, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
ExtendedSupportUntil: time.Date(2032, 4, 1, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"22.10": {
|
||||
StandardSupportUntil: time.Date(2023, 7, 20, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
"23.04": {
|
||||
StandardSupportUntil: time.Date(2024, 1, 31, 23, 59, 59, 0, time.UTC),
|
||||
},
|
||||
}[release]
|
||||
case constant.OpenSUSE:
|
||||
// https://en.opensuse.org/Lifetime
|
||||
@@ -264,6 +294,7 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"3.14": {StandardSupportUntil: time.Date(2023, 5, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.15": {StandardSupportUntil: time.Date(2023, 11, 1, 23, 59, 59, 0, time.UTC)},
|
||||
"3.16": {StandardSupportUntil: time.Date(2024, 5, 23, 23, 59, 59, 0, time.UTC)},
|
||||
"3.17": {StandardSupportUntil: time.Date(2024, 11, 22, 23, 59, 59, 0, time.UTC)},
|
||||
}[majorDotMinor(release)]
|
||||
case constant.FreeBSD:
|
||||
// https://www.freebsd.org/security/
|
||||
@@ -273,18 +304,103 @@ func GetEOL(family, release string) (eol EOL, found bool) {
|
||||
"9": {Ended: true},
|
||||
"10": {Ended: true},
|
||||
"11": {StandardSupportUntil: time.Date(2021, 9, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2024, 6, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"12": {StandardSupportUntil: time.Date(2023, 12, 31, 23, 59, 59, 0, time.UTC)},
|
||||
"13": {StandardSupportUntil: time.Date(2026, 1, 31, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Fedora:
|
||||
// https://docs.fedoraproject.org/en-US/releases/eol/
|
||||
// https://endoflife.date/fedora
|
||||
eol, found = map[string]EOL{
|
||||
"32": {StandardSupportUntil: time.Date(2021, 5, 25, 23, 59, 59, 0, time.UTC)},
|
||||
"33": {StandardSupportUntil: time.Date(2021, 11, 30, 23, 59, 59, 0, time.UTC)},
|
||||
"34": {StandardSupportUntil: time.Date(2022, 5, 17, 23, 59, 59, 0, time.UTC)},
|
||||
"35": {StandardSupportUntil: time.Date(2022, 12, 7, 23, 59, 59, 0, time.UTC)},
|
||||
"32": {StandardSupportUntil: time.Date(2021, 5, 24, 23, 59, 59, 0, time.UTC)},
|
||||
"33": {StandardSupportUntil: time.Date(2021, 11, 29, 23, 59, 59, 0, time.UTC)},
|
||||
"34": {StandardSupportUntil: time.Date(2022, 6, 6, 23, 59, 59, 0, time.UTC)},
|
||||
"35": {StandardSupportUntil: time.Date(2022, 12, 12, 23, 59, 59, 0, time.UTC)},
|
||||
"36": {StandardSupportUntil: time.Date(2023, 5, 16, 23, 59, 59, 0, time.UTC)},
|
||||
"37": {StandardSupportUntil: time.Date(2023, 12, 15, 23, 59, 59, 0, time.UTC)},
|
||||
"38": {StandardSupportUntil: time.Date(2024, 5, 14, 23, 59, 59, 0, time.UTC)},
|
||||
}[major(release)]
|
||||
case constant.Windows:
|
||||
// https://learn.microsoft.com/ja-jp/lifecycle/products/?products=windows
|
||||
|
||||
lhs, rhs, _ := strings.Cut(strings.TrimSuffix(release, "(Server Core installation)"), "for")
|
||||
switch strings.TrimSpace(lhs) {
|
||||
case "Windows 7":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2013, 4, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
if strings.Contains(rhs, "Service Pack 1") {
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 1, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
}
|
||||
case "Windows 8":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2016, 1, 12, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 8.1":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2023, 1, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2017, 5, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1511":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2017, 10, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1607":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2018, 4, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1703":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2018, 10, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1709":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2019, 4, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1803":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2019, 11, 12, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1809":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 11, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1903":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 12, 8, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 1909":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2021, 5, 11, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 2004":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2021, 12, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 20H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2022, 5, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 21H1":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2022, 12, 13, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 21H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2023, 6, 13, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 10 Version 22H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2024, 5, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 11 Version 21H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2024, 10, 8, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows 11 Version 22H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2025, 10, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server 2008":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2011, 7, 12, 23, 59, 59, 0, time.UTC)}, true
|
||||
if strings.Contains(rhs, "Service Pack 2") {
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 1, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
}
|
||||
case "Windows Server 2008 R2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2013, 4, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
if strings.Contains(rhs, "Service Pack 1") {
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 1, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
}
|
||||
case "Windows Server 2012":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2023, 10, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server 2012 R2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2023, 10, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server 2016":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2027, 1, 12, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 1709":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2019, 4, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 1803":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2019, 11, 12, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 1809":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 11, 10, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server 2019":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2029, 1, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 1903":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2020, 12, 8, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 1909":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2021, 5, 11, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 2004":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2021, 12, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server, Version 20H2":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2022, 8, 9, 23, 59, 59, 0, time.UTC)}, true
|
||||
case "Windows Server 2022":
|
||||
eol, found = EOL{StandardSupportUntil: time.Date(2031, 10, 14, 23, 59, 59, 0, time.UTC)}, true
|
||||
default:
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -302,9 +418,25 @@ func majorDotMinor(osVer string) (majorDotMinor string) {
|
||||
}
|
||||
|
||||
func getAmazonLinuxVersion(osRelease string) string {
|
||||
ss := strings.Fields(osRelease)
|
||||
if len(ss) == 1 {
|
||||
switch s := strings.Fields(osRelease)[0]; s {
|
||||
case "1":
|
||||
return "1"
|
||||
case "2":
|
||||
return "2"
|
||||
case "2022":
|
||||
return "2022"
|
||||
case "2023":
|
||||
return "2023"
|
||||
case "2025":
|
||||
return "2025"
|
||||
case "2027":
|
||||
return "2027"
|
||||
case "2029":
|
||||
return "2029"
|
||||
default:
|
||||
if _, err := time.Parse("2006.01", s); err == nil {
|
||||
return "1"
|
||||
}
|
||||
return "unknown"
|
||||
}
|
||||
return ss[0]
|
||||
}
|
||||
|
||||
@@ -54,8 +54,16 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "amazon linux 2024 not found",
|
||||
fields: fields{family: Amazon, release: "2024 (Amazon Linux)"},
|
||||
name: "amazon linux 2023 supported",
|
||||
fields: fields{family: Amazon, release: "2023"},
|
||||
now: time.Date(2023, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "amazon linux 2031 not found",
|
||||
fields: fields{family: Amazon, release: "2031"},
|
||||
now: time.Date(2023, 7, 1, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
@@ -244,8 +252,8 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
},
|
||||
//Ubuntu
|
||||
{
|
||||
name: "Ubuntu 12.10 not found",
|
||||
fields: fields{family: Ubuntu, release: "12.10"},
|
||||
name: "Ubuntu 5.10 not found",
|
||||
fields: fields{family: Ubuntu, release: "5.10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
found: false,
|
||||
stdEnded: false,
|
||||
@@ -339,7 +347,31 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 22.10 supported",
|
||||
fields: fields{family: Ubuntu, release: "22.10"},
|
||||
now: time.Date(2022, 5, 1, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
{
|
||||
name: "Ubuntu 23.04 supported",
|
||||
fields: fields{family: Ubuntu, release: "23.04"},
|
||||
now: time.Date(2023, 3, 16, 23, 59, 59, 0, time.UTC),
|
||||
found: true,
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
},
|
||||
//Debian
|
||||
{
|
||||
name: "Debian 8 supported",
|
||||
fields: fields{family: Debian, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 9 supported",
|
||||
fields: fields{family: Debian, release: "9"},
|
||||
@@ -356,14 +388,6 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 8 supported",
|
||||
fields: fields{family: Debian, release: "8"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 11 supported",
|
||||
fields: fields{family: Debian, release: "11"},
|
||||
@@ -373,8 +397,16 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 12 is not supported yet",
|
||||
name: "Debian 12 supported",
|
||||
fields: fields{family: Debian, release: "12"},
|
||||
now: time.Date(2023, 6, 10, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Debian 13 is not supported yet",
|
||||
fields: fields{family: Debian, release: "13"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
@@ -438,14 +470,30 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.17 not found",
|
||||
name: "Alpine 3.17 supported",
|
||||
fields: fields{family: Alpine, release: "3.17"},
|
||||
now: time.Date(2022, 1, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Alpine 3.18 not found",
|
||||
fields: fields{family: Alpine, release: "3.18"},
|
||||
now: time.Date(2022, 1, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
// freebsd
|
||||
{
|
||||
name: "freebsd 10 eol",
|
||||
fields: fields{family: FreeBSD, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 11 supported",
|
||||
fields: fields{family: FreeBSD, release: "11"},
|
||||
@@ -478,27 +526,19 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "freebsd 10 eol",
|
||||
fields: fields{family: FreeBSD, release: "10"},
|
||||
now: time.Date(2021, 1, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
// Fedora
|
||||
{
|
||||
name: "Fedora 32 supported",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 5, 25, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2021, 5, 24, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 32 eol on 2021-5-25",
|
||||
name: "Fedora 32 eol since 2021-5-25",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 5, 26, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2021, 5, 25, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
@@ -506,15 +546,15 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
{
|
||||
name: "Fedora 33 supported",
|
||||
fields: fields{family: Fedora, release: "33"},
|
||||
now: time.Date(2021, 11, 30, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2021, 11, 29, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 33 eol on 2021-5-26",
|
||||
name: "Fedora 33 eol since 2021-11-30",
|
||||
fields: fields{family: Fedora, release: "32"},
|
||||
now: time.Date(2021, 5, 27, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2021, 11, 30, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
@@ -522,15 +562,15 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
{
|
||||
name: "Fedora 34 supported",
|
||||
fields: fields{family: Fedora, release: "34"},
|
||||
now: time.Date(2022, 5, 17, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2022, 6, 6, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 32 eol on 2022-5-17",
|
||||
name: "Fedora 34 eol since 2022-6-7",
|
||||
fields: fields{family: Fedora, release: "34"},
|
||||
now: time.Date(2022, 5, 18, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2022, 6, 7, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
@@ -538,19 +578,91 @@ func TestEOL_IsStandardSupportEnded(t *testing.T) {
|
||||
{
|
||||
name: "Fedora 35 supported",
|
||||
fields: fields{family: Fedora, release: "35"},
|
||||
now: time.Date(2022, 12, 7, 23, 59, 59, 0, time.UTC),
|
||||
now: time.Date(2022, 12, 12, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 35 eol on 2022-12-7",
|
||||
name: "Fedora 35 eol since 2022-12-13",
|
||||
fields: fields{family: Fedora, release: "35"},
|
||||
now: time.Date(2022, 12, 13, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 36 supported",
|
||||
fields: fields{family: Fedora, release: "36"},
|
||||
now: time.Date(2023, 5, 16, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 36 eol since 2023-05-17",
|
||||
fields: fields{family: Fedora, release: "36"},
|
||||
now: time.Date(2023, 5, 17, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 37 supported",
|
||||
fields: fields{family: Fedora, release: "37"},
|
||||
now: time.Date(2023, 12, 15, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 37 eol since 2023-12-16",
|
||||
fields: fields{family: Fedora, release: "37"},
|
||||
now: time.Date(2023, 12, 16, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 38 supported",
|
||||
fields: fields{family: Fedora, release: "38"},
|
||||
now: time.Date(2024, 5, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 38 eol since 2024-05-15",
|
||||
fields: fields{family: Fedora, release: "38"},
|
||||
now: time.Date(2024, 5, 15, 0, 0, 0, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Fedora 39 not found",
|
||||
fields: fields{family: Fedora, release: "39"},
|
||||
now: time.Date(2024, 5, 14, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: false,
|
||||
},
|
||||
{
|
||||
name: "Windows 10 EOL",
|
||||
fields: fields{family: Windows, release: "Windows 10 for x64-based Systems"},
|
||||
now: time.Date(2022, 12, 8, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: true,
|
||||
extEnded: true,
|
||||
found: true,
|
||||
},
|
||||
{
|
||||
name: "Windows 10 Version 22H2 supported",
|
||||
fields: fields{family: Windows, release: "Windows 10 Version 22H2 for x64-based Systems"},
|
||||
now: time.Date(2022, 12, 8, 23, 59, 59, 0, time.UTC),
|
||||
stdEnded: false,
|
||||
extEnded: false,
|
||||
found: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
@@ -616,3 +728,58 @@ func Test_majorDotMinor(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_getAmazonLinuxVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
release string
|
||||
want string
|
||||
}{
|
||||
{
|
||||
release: "2017.09",
|
||||
want: "1",
|
||||
},
|
||||
{
|
||||
release: "2018.03",
|
||||
want: "1",
|
||||
},
|
||||
{
|
||||
release: "1",
|
||||
want: "1",
|
||||
},
|
||||
{
|
||||
release: "2",
|
||||
want: "2",
|
||||
},
|
||||
{
|
||||
release: "2022",
|
||||
want: "2022",
|
||||
},
|
||||
{
|
||||
release: "2023",
|
||||
want: "2023",
|
||||
},
|
||||
{
|
||||
release: "2025",
|
||||
want: "2025",
|
||||
},
|
||||
{
|
||||
release: "2027",
|
||||
want: "2027",
|
||||
},
|
||||
{
|
||||
release: "2029",
|
||||
want: "2029",
|
||||
},
|
||||
{
|
||||
release: "2031",
|
||||
want: "unknown",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.release, func(t *testing.T) {
|
||||
if got := getAmazonLinuxVersion(tt.release); got != tt.want {
|
||||
t.Errorf("getAmazonLinuxVersion() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !windows
|
||||
|
||||
package config
|
||||
|
||||
import (
|
||||
|
||||
@@ -294,6 +294,13 @@ func setDefaultIfEmpty(server *ServerInfo) error {
|
||||
}
|
||||
}
|
||||
|
||||
if server.Windows == nil {
|
||||
server.Windows = Conf.Default.Windows
|
||||
if server.Windows == nil {
|
||||
server.Windows = &WindowsConf{}
|
||||
}
|
||||
}
|
||||
|
||||
if len(server.IgnoredJSONKeys) == 0 {
|
||||
server.IgnoredJSONKeys = Conf.Default.IgnoredJSONKeys
|
||||
}
|
||||
|
||||
27
config/windows.go
Normal file
27
config/windows.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// WindowsConf used for Windows Update Setting
|
||||
type WindowsConf struct {
|
||||
ServerSelection int `toml:"serverSelection,omitempty" json:"serverSelection,omitempty"`
|
||||
CabPath string `toml:"cabPath,omitempty" json:"cabPath,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates configuration
|
||||
func (c *WindowsConf) Validate() []error {
|
||||
switch c.ServerSelection {
|
||||
case 0, 1, 2:
|
||||
case 3:
|
||||
if _, err := os.Stat(c.CabPath); err != nil {
|
||||
return []error{xerrors.Errorf("%s does not exist. err: %w", c.CabPath, err)}
|
||||
}
|
||||
default:
|
||||
return []error{xerrors.Errorf("ServerSelection: %d does not support . Reference: https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-uamg/07e2bfa4-6795-4189-b007-cc50b476181a", c.ServerSelection)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -11,7 +11,8 @@ COPY . $GOPATH/src/$REPOSITORY
|
||||
RUN cd $GOPATH/src/$REPOSITORY && \
|
||||
make build-scanner && mv vuls $GOPATH/bin && \
|
||||
make build-trivy-to-vuls && mv trivy-to-vuls $GOPATH/bin && \
|
||||
make build-future-vuls && mv future-vuls $GOPATH/bin
|
||||
make build-future-vuls && mv future-vuls $GOPATH/bin && \
|
||||
make build-snmp2cpe && mv snmp2cpe $GOPATH/bin
|
||||
|
||||
FROM alpine:3.15
|
||||
|
||||
@@ -25,7 +26,7 @@ RUN apk add --no-cache \
|
||||
nmap \
|
||||
&& mkdir -p $WORKDIR $LOGDIR
|
||||
|
||||
COPY --from=builder /go/bin/vuls /go/bin/trivy-to-vuls /go/bin/future-vuls /usr/local/bin/
|
||||
COPY --from=builder /go/bin/vuls /go/bin/trivy-to-vuls /go/bin/future-vuls /go/bin/snmp2cpe /usr/local/bin/
|
||||
COPY --from=aquasec/trivy:latest /usr/local/bin/trivy /usr/local/bin/trivy
|
||||
|
||||
VOLUME ["$WORKDIR", "$LOGDIR"]
|
||||
|
||||
50
contrib/snmp2cpe/README.md
Normal file
50
contrib/snmp2cpe/README.md
Normal file
@@ -0,0 +1,50 @@
|
||||
# snmp2cpe
|
||||
|
||||
## Main Features
|
||||
|
||||
- Estimate hardware and OS CPE from SNMP reply of network devices
|
||||
|
||||
## Installation
|
||||
|
||||
```console
|
||||
$ git clone https://github.com/future-architect/vuls.git
|
||||
$ make build-snmp2cpe
|
||||
```
|
||||
|
||||
## Command Reference
|
||||
|
||||
```console
|
||||
$ snmp2cpe help
|
||||
snmp2cpe: SNMP reply To CPE
|
||||
|
||||
Usage:
|
||||
snmp2cpe [command]
|
||||
|
||||
Available Commands:
|
||||
completion Generate the autocompletion script for the specified shell
|
||||
convert snmpget reply to CPE
|
||||
help Help about any command
|
||||
v1 snmpget with SNMPv1
|
||||
v2c snmpget with SNMPv2c
|
||||
v3 snmpget with SNMPv3
|
||||
version Print the version
|
||||
|
||||
Flags:
|
||||
-h, --help help for snmp2cpe
|
||||
|
||||
Use "snmp2cpe [command] --help" for more information about a command.
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
```console
|
||||
$ snmp2cpe v2c --debug 192.168.1.99 public
|
||||
2023/03/28 14:16:54 DEBUG: .1.3.6.1.2.1.1.1.0 ->
|
||||
2023/03/28 14:16:54 DEBUG: .1.3.6.1.2.1.47.1.1.1.1.12.1 -> Fortinet
|
||||
2023/03/28 14:16:54 DEBUG: .1.3.6.1.2.1.47.1.1.1.1.7.1 -> FGT_50E
|
||||
2023/03/28 14:16:54 DEBUG: .1.3.6.1.2.1.47.1.1.1.1.10.1 -> FortiGate-50E v5.4.6,build1165b1165,171018 (GA)
|
||||
{"192.168.1.99":{"entPhysicalTables":{"1":{"entPhysicalMfgName":"Fortinet","entPhysicalName":"FGT_50E","entPhysicalSoftwareRev":"FortiGate-50E v5.4.6,build1165b1165,171018 (GA)"}}}}
|
||||
|
||||
$ snmp2cpe v2c 192.168.1.99 public | snmp2cpe convert
|
||||
{"192.168.1.99":["cpe:2.3:h:fortinet:fortigate-50e:-:*:*:*:*:*:*:*","cpe:2.3:o:fortinet:fortios:5.4.6:*:*:*:*:*:*:*"]}
|
||||
```
|
||||
15
contrib/snmp2cpe/cmd/main.go
Normal file
15
contrib/snmp2cpe/cmd/main.go
Normal file
@@ -0,0 +1,15 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
rootCmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/root"
|
||||
)
|
||||
|
||||
func main() {
|
||||
if err := rootCmd.NewCmdRoot().Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to exec snmp2cpe: %s\n", fmt.Sprintf("%+v", err))
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
52
contrib/snmp2cpe/pkg/cmd/convert/convert.go
Normal file
52
contrib/snmp2cpe/pkg/cmd/convert/convert.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package convert
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cpe"
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
)
|
||||
|
||||
// NewCmdConvert ...
|
||||
func NewCmdConvert() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "convert",
|
||||
Short: "snmpget reply to CPE",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
Example: `$ snmp2cpe v2c 192.168.11.11 public | snmp2cpe convert
|
||||
$ snmp2cpe v2c 192.168.11.11 public | snmp2cpe convert -
|
||||
$ snmp2cpe v2c 192.168.11.11 public > v2c.json && snmp2cpe convert v2c.json`,
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
r := os.Stdin
|
||||
if len(args) == 1 && args[0] != "-" {
|
||||
f, err := os.Open(args[0])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to open %s", args[0])
|
||||
}
|
||||
defer f.Close()
|
||||
r = f
|
||||
}
|
||||
|
||||
var reply map[string]snmp.Result
|
||||
if err := json.NewDecoder(r).Decode(&reply); err != nil {
|
||||
return errors.Wrap(err, "failed to decode")
|
||||
}
|
||||
|
||||
converted := map[string][]string{}
|
||||
for ipaddr, res := range reply {
|
||||
converted[ipaddr] = cpe.Convert(res)
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(os.Stdout).Encode(converted); err != nil {
|
||||
return errors.Wrap(err, "failed to encode")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
30
contrib/snmp2cpe/pkg/cmd/root/root.go
Normal file
30
contrib/snmp2cpe/pkg/cmd/root/root.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package root
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
convertCmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/convert"
|
||||
v1Cmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/v1"
|
||||
v2cCmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/v2c"
|
||||
v3Cmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/v3"
|
||||
versionCmd "github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cmd/version"
|
||||
)
|
||||
|
||||
// NewCmdRoot ...
|
||||
func NewCmdRoot() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "snmp2cpe <command>",
|
||||
Short: "snmp2cpe",
|
||||
Long: "snmp2cpe: SNMP reply To CPE",
|
||||
SilenceErrors: true,
|
||||
SilenceUsage: true,
|
||||
}
|
||||
|
||||
cmd.AddCommand(v1Cmd.NewCmdV1())
|
||||
cmd.AddCommand(v2cCmd.NewCmdV2c())
|
||||
cmd.AddCommand(v3Cmd.NewCmdV3())
|
||||
cmd.AddCommand(convertCmd.NewCmdConvert())
|
||||
cmd.AddCommand(versionCmd.NewCmdVersion())
|
||||
|
||||
return cmd
|
||||
}
|
||||
47
contrib/snmp2cpe/pkg/cmd/v1/v1.go
Normal file
47
contrib/snmp2cpe/pkg/cmd/v1/v1.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package v1
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/gosnmp/gosnmp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
)
|
||||
|
||||
// SNMPv1Options ...
|
||||
type SNMPv1Options struct {
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// NewCmdV1 ...
|
||||
func NewCmdV1() *cobra.Command {
|
||||
opts := &SNMPv1Options{
|
||||
Debug: false,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "v1 <IP Address> <Community>",
|
||||
Short: "snmpget with SNMPv1",
|
||||
Example: "$ snmp2cpe v1 192.168.100.1 public",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
r, err := snmp.Get(gosnmp.Version1, args[0], snmp.WithCommunity(args[1]), snmp.WithDebug(opts.Debug))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to snmpget")
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(os.Stdout).Encode(map[string]snmp.Result{args[0]: r}); err != nil {
|
||||
return errors.Wrap(err, "failed to encode")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&opts.Debug, "debug", "", false, "debug mode")
|
||||
|
||||
return cmd
|
||||
}
|
||||
47
contrib/snmp2cpe/pkg/cmd/v2c/v2c.go
Normal file
47
contrib/snmp2cpe/pkg/cmd/v2c/v2c.go
Normal file
@@ -0,0 +1,47 @@
|
||||
package v2c
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
|
||||
"github.com/gosnmp/gosnmp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
)
|
||||
|
||||
// SNMPv2cOptions ...
|
||||
type SNMPv2cOptions struct {
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// NewCmdV2c ...
|
||||
func NewCmdV2c() *cobra.Command {
|
||||
opts := &SNMPv2cOptions{
|
||||
Debug: false,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "v2c <IP Address> <Community>",
|
||||
Short: "snmpget with SNMPv2c",
|
||||
Example: "$ snmp2cpe v2c 192.168.100.1 public",
|
||||
Args: cobra.ExactArgs(2),
|
||||
RunE: func(_ *cobra.Command, args []string) error {
|
||||
r, err := snmp.Get(gosnmp.Version2c, args[0], snmp.WithCommunity(args[1]), snmp.WithDebug(opts.Debug))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to snmpget")
|
||||
}
|
||||
|
||||
if err := json.NewEncoder(os.Stdout).Encode(map[string]snmp.Result{args[0]: r}); err != nil {
|
||||
return errors.Wrap(err, "failed to encode")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&opts.Debug, "debug", "", false, "debug mode")
|
||||
|
||||
return cmd
|
||||
}
|
||||
39
contrib/snmp2cpe/pkg/cmd/v3/v3.go
Normal file
39
contrib/snmp2cpe/pkg/cmd/v3/v3.go
Normal file
@@ -0,0 +1,39 @@
|
||||
package v3
|
||||
|
||||
import (
|
||||
"github.com/gosnmp/gosnmp"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
)
|
||||
|
||||
// SNMPv3Options ...
|
||||
type SNMPv3Options struct {
|
||||
Debug bool
|
||||
}
|
||||
|
||||
// NewCmdV3 ...
|
||||
func NewCmdV3() *cobra.Command {
|
||||
opts := &SNMPv3Options{
|
||||
Debug: false,
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "v3 <args>",
|
||||
Short: "snmpget with SNMPv3",
|
||||
Example: "$ snmp2cpe v3",
|
||||
RunE: func(_ *cobra.Command, _ []string) error {
|
||||
_, err := snmp.Get(gosnmp.Version3, "", snmp.WithDebug(opts.Debug))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to snmpget")
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().BoolVarP(&opts.Debug, "debug", "", false, "debug mode")
|
||||
|
||||
return cmd
|
||||
}
|
||||
23
contrib/snmp2cpe/pkg/cmd/version/version.go
Normal file
23
contrib/snmp2cpe/pkg/cmd/version/version.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
)
|
||||
|
||||
// NewCmdVersion ...
|
||||
func NewCmdVersion() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Print the version",
|
||||
Args: cobra.NoArgs,
|
||||
Run: func(_ *cobra.Command, _ []string) {
|
||||
fmt.Fprintf(os.Stdout, "snmp2cpe %s %s\n", config.Version, config.Revision)
|
||||
},
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
212
contrib/snmp2cpe/pkg/cpe/cpe.go
Normal file
212
contrib/snmp2cpe/pkg/cpe/cpe.go
Normal file
@@ -0,0 +1,212 @@
|
||||
package cpe
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/go-version"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/util"
|
||||
)
|
||||
|
||||
// Convert ...
|
||||
func Convert(result snmp.Result) []string {
|
||||
var cpes []string
|
||||
|
||||
switch detectVendor(result) {
|
||||
case "Cisco":
|
||||
var p, v string
|
||||
lhs, _, _ := strings.Cut(result.SysDescr0, " RELEASE SOFTWARE")
|
||||
for _, s := range strings.Split(lhs, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
switch {
|
||||
case strings.Contains(s, "Cisco NX-OS"):
|
||||
p = "nx-os"
|
||||
case strings.Contains(s, "Cisco IOS Software"), strings.Contains(s, "Cisco Internetwork Operating System Software IOS"):
|
||||
p = "ios"
|
||||
if strings.Contains(lhs, "IOSXE") || strings.Contains(lhs, "IOS-XE") {
|
||||
p = "ios_xe"
|
||||
}
|
||||
case strings.HasPrefix(s, "Version "):
|
||||
v = strings.ToLower(strings.TrimPrefix(s, "Version "))
|
||||
}
|
||||
}
|
||||
if p != "" && v != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:cisco:%s:%s:*:*:*:*:*:*:*", p, v))
|
||||
}
|
||||
|
||||
if t, ok := result.EntPhysicalTables[1]; ok {
|
||||
if t.EntPhysicalName != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:cisco:%s:-:*:*:*:*:*:*:*", strings.ToLower(t.EntPhysicalName)))
|
||||
}
|
||||
if p != "" && t.EntPhysicalSoftwareRev != "" {
|
||||
s, _, _ := strings.Cut(t.EntPhysicalSoftwareRev, " RELEASE SOFTWARE")
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:cisco:%s:%s:*:*:*:*:*:*:*", p, strings.ToLower(strings.TrimSuffix(s, ","))))
|
||||
}
|
||||
}
|
||||
case "Juniper Networks":
|
||||
if strings.HasPrefix(result.SysDescr0, "Juniper Networks, Inc.") {
|
||||
for _, s := range strings.Split(strings.TrimPrefix(result.SysDescr0, "Juniper Networks, Inc. "), ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
switch {
|
||||
case strings.HasPrefix(s, "qfx"), strings.HasPrefix(s, "ex"), strings.HasPrefix(s, "mx"), strings.HasPrefix(s, "ptx"), strings.HasPrefix(s, "acx"), strings.HasPrefix(s, "bti"), strings.HasPrefix(s, "srx"):
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:juniper:%s:-:*:*:*:*:*:*:*", strings.Fields(s)[0]))
|
||||
case strings.HasPrefix(s, "kernel JUNOS "):
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:juniper:junos:%s:*:*:*:*:*:*:*", strings.ToLower(strings.Fields(strings.TrimPrefix(s, "kernel JUNOS "))[0])))
|
||||
}
|
||||
}
|
||||
|
||||
if t, ok := result.EntPhysicalTables[1]; ok {
|
||||
if t.EntPhysicalSoftwareRev != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:juniper:junos:%s:*:*:*:*:*:*:*", strings.ToLower(t.EntPhysicalSoftwareRev)))
|
||||
}
|
||||
}
|
||||
} else {
|
||||
h, v, ok := strings.Cut(result.SysDescr0, " version ")
|
||||
if ok {
|
||||
cpes = append(cpes,
|
||||
fmt.Sprintf("cpe:2.3:h:juniper:%s:-:*:*:*:*:*:*:*", strings.ToLower(h)),
|
||||
fmt.Sprintf("cpe:2.3:o:juniper:screenos:%s:*:*:*:*:*:*:*", strings.ToLower(strings.Fields(v)[0])),
|
||||
)
|
||||
}
|
||||
}
|
||||
case "Arista Networks":
|
||||
v, h, ok := strings.Cut(result.SysDescr0, " running on an ")
|
||||
if ok {
|
||||
if strings.HasPrefix(v, "Arista Networks EOS version ") {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:arista:eos:%s:*:*:*:*:*:*:*", strings.ToLower(strings.TrimPrefix(v, "Arista Networks EOS version "))))
|
||||
}
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:/h:arista:%s:-:*:*:*:*:*:*:*", strings.ToLower(strings.TrimPrefix(h, "Arista Networks "))))
|
||||
}
|
||||
if t, ok := result.EntPhysicalTables[1]; ok {
|
||||
if t.EntPhysicalSoftwareRev != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:arista:eos:%s:*:*:*:*:*:*:*", strings.ToLower(t.EntPhysicalSoftwareRev)))
|
||||
}
|
||||
}
|
||||
case "Fortinet":
|
||||
if t, ok := result.EntPhysicalTables[1]; ok {
|
||||
if strings.HasPrefix(t.EntPhysicalName, "FGT_") {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:fortinet:fortigate-%s:-:*:*:*:*:*:*:*", strings.ToLower(strings.TrimPrefix(t.EntPhysicalName, "FGT_"))))
|
||||
}
|
||||
for _, s := range strings.Fields(t.EntPhysicalSoftwareRev) {
|
||||
switch {
|
||||
case strings.HasPrefix(s, "FortiGate-"):
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:fortinet:%s:-:*:*:*:*:*:*:*", strings.ToLower(s)))
|
||||
case strings.HasPrefix(s, "v") && strings.Contains(s, "build"):
|
||||
if v, _, found := strings.Cut(strings.TrimPrefix(s, "v"), ",build"); found {
|
||||
if _, err := version.NewVersion(v); err == nil {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:fortinet:fortios:%s:*:*:*:*:*:*:*", v))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
case "YAMAHA":
|
||||
var h, v string
|
||||
for _, s := range strings.Fields(result.SysDescr0) {
|
||||
switch {
|
||||
case strings.HasPrefix(s, "RTX"), strings.HasPrefix(s, "NVR"), strings.HasPrefix(s, "RTV"), strings.HasPrefix(s, "RT"),
|
||||
strings.HasPrefix(s, "SRT"), strings.HasPrefix(s, "FWX"), strings.HasPrefix(s, "YSL-V810"):
|
||||
h = strings.ToLower(s)
|
||||
case strings.HasPrefix(s, "Rev."):
|
||||
if _, err := version.NewVersion(strings.TrimPrefix(s, "Rev.")); err == nil {
|
||||
v = strings.TrimPrefix(s, "Rev.")
|
||||
}
|
||||
}
|
||||
}
|
||||
if h != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:yamaha:%s:-:*:*:*:*:*:*:*", h))
|
||||
if v != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:yamaha:%s:%s:*:*:*:*:*:*:*", h, v))
|
||||
}
|
||||
}
|
||||
case "NEC":
|
||||
var h, v string
|
||||
for _, s := range strings.Split(result.SysDescr0, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
switch {
|
||||
case strings.HasPrefix(s, "IX Series "):
|
||||
h = strings.ToLower(strings.TrimSuffix(strings.TrimPrefix(s, "IX Series "), " (magellan-sec) Software"))
|
||||
case strings.HasPrefix(s, "Version "):
|
||||
if _, err := version.NewVersion(strings.TrimSpace(strings.TrimPrefix(s, "Version "))); err == nil {
|
||||
v = strings.TrimSpace(strings.TrimPrefix(s, "Version "))
|
||||
}
|
||||
}
|
||||
}
|
||||
if h != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:nec:%s:-:*:*:*:*:*:*:*", h))
|
||||
if v != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:nec:%s:%s:*:*:*:*:*:*:*", h, v))
|
||||
}
|
||||
}
|
||||
case "Palo Alto Networks":
|
||||
if t, ok := result.EntPhysicalTables[1]; ok {
|
||||
if t.EntPhysicalName != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:h:paloaltonetworks:%s:-:*:*:*:*:*:*:*", strings.ToLower(t.EntPhysicalName)))
|
||||
}
|
||||
if t.EntPhysicalSoftwareRev != "" {
|
||||
cpes = append(cpes, fmt.Sprintf("cpe:2.3:o:paloaltonetworks:pan-os:%s:*:*:*:*:*:*:*", t.EntPhysicalSoftwareRev))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return []string{}
|
||||
}
|
||||
|
||||
return util.Unique(cpes)
|
||||
}
|
||||
|
||||
func detectVendor(r snmp.Result) string {
|
||||
if t, ok := r.EntPhysicalTables[1]; ok {
|
||||
switch t.EntPhysicalMfgName {
|
||||
case "Cisco":
|
||||
return "Cisco"
|
||||
case "Juniper Networks":
|
||||
return "Juniper Networks"
|
||||
case "Arista Networks":
|
||||
return "Arista Networks"
|
||||
case "Fortinet":
|
||||
return "Fortinet"
|
||||
case "YAMAHA":
|
||||
return "YAMAHA"
|
||||
case "NEC":
|
||||
return "NEC"
|
||||
case "Palo Alto Networks":
|
||||
return "Palo Alto Networks"
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case strings.Contains(r.SysDescr0, "Cisco"):
|
||||
return "Cisco"
|
||||
case strings.Contains(r.SysDescr0, "Juniper Networks"),
|
||||
strings.Contains(r.SysDescr0, "SSG5"), strings.Contains(r.SysDescr0, "SSG20"), strings.Contains(r.SysDescr0, "SSG140"),
|
||||
strings.Contains(r.SysDescr0, "SSG320"), strings.Contains(r.SysDescr0, "SSG350"), strings.Contains(r.SysDescr0, "SSG520"),
|
||||
strings.Contains(r.SysDescr0, "SSG550"):
|
||||
return "Juniper Networks"
|
||||
case strings.Contains(r.SysDescr0, "Arista Networks"):
|
||||
return "Arista Networks"
|
||||
case strings.Contains(r.SysDescr0, "Fortinet"), strings.Contains(r.SysDescr0, "FortiGate"):
|
||||
return "Fortinet"
|
||||
case strings.Contains(r.SysDescr0, "YAMAHA"),
|
||||
strings.Contains(r.SysDescr0, "RTX810"), strings.Contains(r.SysDescr0, "RTX830"),
|
||||
strings.Contains(r.SysDescr0, "RTX1000"), strings.Contains(r.SysDescr0, "RTX1100"),
|
||||
strings.Contains(r.SysDescr0, "RTX1200"), strings.Contains(r.SysDescr0, "RTX1210"), strings.Contains(r.SysDescr0, "RTX1220"),
|
||||
strings.Contains(r.SysDescr0, "RTX1300"), strings.Contains(r.SysDescr0, "RTX1500"), strings.Contains(r.SysDescr0, "RTX2000"),
|
||||
strings.Contains(r.SysDescr0, "RTX3000"), strings.Contains(r.SysDescr0, "RTX3500"), strings.Contains(r.SysDescr0, "RTX5000"),
|
||||
strings.Contains(r.SysDescr0, "NVR500"), strings.Contains(r.SysDescr0, "NVR510"), strings.Contains(r.SysDescr0, "NVR700W"),
|
||||
strings.Contains(r.SysDescr0, "RTV01"), strings.Contains(r.SysDescr0, "RTV700"),
|
||||
strings.Contains(r.SysDescr0, "RT105i"), strings.Contains(r.SysDescr0, "RT105p"), strings.Contains(r.SysDescr0, "RT105e"),
|
||||
strings.Contains(r.SysDescr0, "RT107e"), strings.Contains(r.SysDescr0, "RT250i"), strings.Contains(r.SysDescr0, "RT300i"),
|
||||
strings.Contains(r.SysDescr0, "SRT100"),
|
||||
strings.Contains(r.SysDescr0, "FWX100"),
|
||||
strings.Contains(r.SysDescr0, "YSL-V810"):
|
||||
return "YAMAHA"
|
||||
case strings.Contains(r.SysDescr0, "NEC"):
|
||||
return "NEC"
|
||||
case strings.Contains(r.SysDescr0, "Palo Alto Networks"):
|
||||
return "Palo Alto Networks"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
244
contrib/snmp2cpe/pkg/cpe/cpe_test.go
Normal file
244
contrib/snmp2cpe/pkg/cpe/cpe_test.go
Normal file
@@ -0,0 +1,244 @@
|
||||
package cpe_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/cpe"
|
||||
"github.com/future-architect/vuls/contrib/snmp2cpe/pkg/snmp"
|
||||
)
|
||||
|
||||
func TestConvert(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args snmp.Result
|
||||
want []string
|
||||
}{
|
||||
{
|
||||
name: "Cisco NX-OS Version 7.1(4)N1(1)",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Cisco NX-OS(tm) n6000, Software (n6000-uk9), Version 7.1(4)N1(1), RELEASE SOFTWARE Copyright (c) 2002-2012 by Cisco Systems, Inc. Device Manager Version 6.0(2)N1(1),Compiled 9/2/2016 10:00:00",
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:nx-os:7.1(4)n1(1):*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOS Version 15.1(4)M3",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software, 2800 Software (C2800NM-ADVENTERPRISEK9-M), Version 15.1(4)M3, RELEASE SOFTWARE (fc1)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2011 by Cisco Systems, Inc.
|
||||
Compiled Tue 06-Dec-11 16:21 by prod_rel_team`,
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:ios:15.1(4)m3:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOS Version 15.1(4)M4",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software, C181X Software (C181X-ADVENTERPRISEK9-M), Version 15.1(4)M4, RELEASE SOFTWARE (fc1)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2012 by Cisco Systems, Inc.
|
||||
Compiled Tue 20-Mar-12 23:34 by prod_rel_team`,
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:ios:15.1(4)m4:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOS Vresion 15.5(3)M on Cisco 892J-K9-V02",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software, C890 Software (C890-UNIVERSALK9-M), Version 15.5(3)M, RELEASE SOFTWARE (fc1)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2015 by Cisco Systems, Inc.
|
||||
Compiled Thu 23-Jul-15 03:08 by prod_rel_team`,
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Cisco",
|
||||
EntPhysicalName: "892",
|
||||
EntPhysicalSoftwareRev: "15.5(3)M, RELEASE SOFTWARE (fc1)",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:cisco:892:-:*:*:*:*:*:*:*", "cpe:2.3:o:cisco:ios:15.5(3)m:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOS Version 15.4(3)M5 on Cisco C892FSP-K9-V02",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software, C800 Software (C800-UNIVERSALK9-M), Version 15.4(3)M5, RELEASE SOFTWARE (fc1)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2016 by Cisco Systems, Inc.
|
||||
Compiled Tue 09-Feb-16 06:15 by prod_rel_team`,
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Cisco",
|
||||
EntPhysicalName: "C892FSP-K9",
|
||||
EntPhysicalSoftwareRev: "15.4(3)M5, RELEASE SOFTWARE (fc1)",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:cisco:c892fsp-k9:-:*:*:*:*:*:*:*", "cpe:2.3:o:cisco:ios:15.4(3)m5:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOS Version 12.2(17d)SXB11",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco Internetwork Operating System Software IOS (tm) s72033_rp Software (s72033_rp-JK9SV-M), Version 12.2(17d)SXB11, RELEASE SOFTWARE (fc1)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2005 by cisco Systems, Inc.`,
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:ios:12.2(17d)sxb11:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOX-XE Version 16.12.4",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software [Gibraltar], Catalyst L3 Switch Software (CAT9K_LITE_IOSXE), Version 16.12.4, RELEASE SOFTWARE (fc5)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2020 by Cisco Systems, Inc.
|
||||
Compiled Thu 09-Jul-20 19:31 by m`,
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:ios_xe:16.12.4:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Cisco IOX-XE Version 03.06.07.E",
|
||||
args: snmp.Result{
|
||||
SysDescr0: `Cisco IOS Software, IOS-XE Software, Catalyst 4500 L3 Switch Software (cat4500es8-UNIVERSALK9-M), Version 03.06.07.E RELEASE SOFTWARE (fc3)
|
||||
Technical Support: http://www.cisco.com/techsupport
|
||||
Copyright (c) 1986-2017 by Cisco Systems, Inc.
|
||||
Compiled Wed`,
|
||||
},
|
||||
want: []string{"cpe:2.3:o:cisco:ios_xe:03.06.07.e:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Juniper SSG-5-SH-BT",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "SSG5-ISDN version 6.3.0r14.0 (SN: 0000000000000001, Firewall+VPN)",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:ssg5-isdn:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:screenos:6.3.0r14.0:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "JUNOS 20.4R3-S4.8 on Juniper MX240",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Juniper Networks, Inc. mx240 internet router, kernel JUNOS 20.4R3-S4.8, Build date: 2022-08-16 20:42:11 UTC Copyright (c) 1996-2022 Juniper Networks, Inc.",
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Juniper Networks",
|
||||
EntPhysicalName: "CHAS-BP3-MX240-S",
|
||||
EntPhysicalSoftwareRev: "20.4R3-S4.8",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:mx240:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:junos:20.4r3-s4.8:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "JUNOS 12.1X46-D65.4 on Juniper SRX220H",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Juniper Networks, Inc. srx220h internet router, kernel JUNOS 12.1X46-D65.4 #0: 2016-12-30 01:34:30 UTC builder@quoarth.juniper.net:/volume/build/junos/12.1/service/12.1X46-D65.4/obj-octeon/junos/bsd/kernels/JSRXNLE/kernel Build date: 2016-12-30 02:59",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:srx220h:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:junos:12.1x46-d65.4:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "JUNOS 12.3X48-D30.7 on Juniper SRX220H2",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Juniper Networks, Inc. srx220h2 internet router, kernel JUNOS 12.3X48-D30.7, Build date: 2016-04-29 00:01:04 UTC Copyright (c) 1996-2016 Juniper Networks, Inc.",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:srx220h2:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:junos:12.3x48-d30.7:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "JUNOS 20.4R3-S4.8 on Juniper SRX4600",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Juniper Networks, Inc. srx4600 internet router, kernel JUNOS 20.4R3-S4.8, Build date: 2022-08-16 20:42:11 UTC Copyright (c) 1996-2022 Juniper Networks, Inc.",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:srx4600:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:junos:20.4r3-s4.8:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "cpe:2.3:o:juniper:junos:20.4:r2-s2.2:*:*:*:*:*:*",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Juniper Networks, Inc. ex4300-32f Ethernet Switch, kernel JUNOS 20.4R3-S4.8, Build date: 2022-08-16 21:10:45 UTC Copyright (c) 1996-2022 Juniper Networks, Inc.",
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Juniper Networks",
|
||||
EntPhysicalName: "",
|
||||
EntPhysicalSoftwareRev: "20.4R3-S4.8",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:juniper:ex4300-32f:-:*:*:*:*:*:*:*", "cpe:2.3:o:juniper:junos:20.4r3-s4.8:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Arista Networks EOS version 4.28.4M on DCS-7050TX-64",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Arista Networks EOS version 4.28.4M running on an Arista Networks DCS-7050TX-64",
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Arista Networks",
|
||||
EntPhysicalName: "",
|
||||
EntPhysicalSoftwareRev: "4.28.4M",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:/h:arista:dcs-7050tx-64:-:*:*:*:*:*:*:*", "cpe:2.3:o:arista:eos:4.28.4m:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "FortiGate-50E",
|
||||
args: snmp.Result{
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Fortinet",
|
||||
EntPhysicalName: "FGT_50E",
|
||||
EntPhysicalSoftwareRev: "FortiGate-50E v5.4.6,build1165b1165,171018 (GA)",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:fortinet:fortigate-50e:-:*:*:*:*:*:*:*", "cpe:2.3:o:fortinet:fortios:5.4.6:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "FortiGate-60F",
|
||||
args: snmp.Result{
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Fortinet",
|
||||
EntPhysicalName: "FGT_60F",
|
||||
EntPhysicalSoftwareRev: "FortiGate-60F v6.4.11,build2030,221031 (GA.M)",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:fortinet:fortigate-60f:-:*:*:*:*:*:*:*", "cpe:2.3:o:fortinet:fortios:6.4.11:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "YAMAHA RTX1000",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "RTX1000 Rev.8.01.29 (Fri Apr 15 11:50:44 2011)",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:yamaha:rtx1000:-:*:*:*:*:*:*:*", "cpe:2.3:o:yamaha:rtx1000:8.01.29:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "YAMAHA RTX810",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "RTX810 Rev.11.01.34 (Tue Nov 26 18:39:12 2019)",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:yamaha:rtx810:-:*:*:*:*:*:*:*", "cpe:2.3:o:yamaha:rtx810:11.01.34:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "NEC IX2105",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "NEC Portable Internetwork Core Operating System Software, IX Series IX2105 (magellan-sec) Software, Version 8.8.22, RELEASE SOFTWARE, Compiled Jul 04-Wed-2012 14:18:46 JST #2, IX2105",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:nec:ix2105:-:*:*:*:*:*:*:*", "cpe:2.3:o:nec:ix2105:8.8.22:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "NEC IX2235",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "NEC Portable Internetwork Core Operating System Software, IX Series IX2235 (magellan-sec) Software, Version 10.6.21, RELEASE SOFTWARE, Compiled Dec 15-Fri-YYYY HH:MM:SS JST #2, IX2235",
|
||||
},
|
||||
want: []string{"cpe:2.3:h:nec:ix2235:-:*:*:*:*:*:*:*", "cpe:2.3:o:nec:ix2235:10.6.21:*:*:*:*:*:*:*"},
|
||||
},
|
||||
{
|
||||
name: "Palo Alto Networks PAN-OS 10.0.0 on PA-220",
|
||||
args: snmp.Result{
|
||||
SysDescr0: "Palo Alto Networks PA-220 series firewall",
|
||||
EntPhysicalTables: map[int]snmp.EntPhysicalTable{1: {
|
||||
EntPhysicalMfgName: "Palo Alto Networks",
|
||||
EntPhysicalName: "PA-220",
|
||||
EntPhysicalSoftwareRev: "10.0.0",
|
||||
}},
|
||||
},
|
||||
want: []string{"cpe:2.3:h:paloaltonetworks:pa-220:-:*:*:*:*:*:*:*", "cpe:2.3:o:paloaltonetworks:pan-os:10.0.0:*:*:*:*:*:*:*"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
opts := []cmp.Option{
|
||||
cmpopts.SortSlices(func(i, j string) bool {
|
||||
return i < j
|
||||
}),
|
||||
}
|
||||
if diff := cmp.Diff(cpe.Convert(tt.args), tt.want, opts...); diff != "" {
|
||||
t.Errorf("Convert() value is mismatch (-got +want):%s\n", diff)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
131
contrib/snmp2cpe/pkg/snmp/snmp.go
Normal file
131
contrib/snmp2cpe/pkg/snmp/snmp.go
Normal file
@@ -0,0 +1,131 @@
|
||||
package snmp
|
||||
|
||||
import (
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gosnmp/gosnmp"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type options struct {
|
||||
community string
|
||||
debug bool
|
||||
}
|
||||
|
||||
// Option ...
|
||||
type Option interface {
|
||||
apply(*options)
|
||||
}
|
||||
|
||||
type communityOption string
|
||||
|
||||
func (c communityOption) apply(opts *options) {
|
||||
opts.community = string(c)
|
||||
}
|
||||
|
||||
// WithCommunity ...
|
||||
func WithCommunity(c string) Option {
|
||||
return communityOption(c)
|
||||
}
|
||||
|
||||
type debugOption bool
|
||||
|
||||
func (d debugOption) apply(opts *options) {
|
||||
opts.debug = bool(d)
|
||||
}
|
||||
|
||||
// WithDebug ...
|
||||
func WithDebug(d bool) Option {
|
||||
return debugOption(d)
|
||||
}
|
||||
|
||||
// Get ...
|
||||
func Get(version gosnmp.SnmpVersion, ipaddr string, opts ...Option) (Result, error) {
|
||||
var options options
|
||||
for _, o := range opts {
|
||||
o.apply(&options)
|
||||
}
|
||||
|
||||
r := Result{SysDescr0: "", EntPhysicalTables: map[int]EntPhysicalTable{}}
|
||||
|
||||
params := &gosnmp.GoSNMP{
|
||||
Target: ipaddr,
|
||||
Port: 161,
|
||||
Version: version,
|
||||
Timeout: time.Duration(2) * time.Second,
|
||||
Retries: 3,
|
||||
ExponentialTimeout: true,
|
||||
MaxOids: gosnmp.MaxOids,
|
||||
}
|
||||
|
||||
switch version {
|
||||
case gosnmp.Version1, gosnmp.Version2c:
|
||||
params.Community = options.community
|
||||
case gosnmp.Version3:
|
||||
return Result{}, errors.New("not implemented")
|
||||
}
|
||||
|
||||
if err := params.Connect(); err != nil {
|
||||
return Result{}, errors.Wrap(err, "failed to connect")
|
||||
}
|
||||
defer params.Conn.Close()
|
||||
|
||||
for _, oid := range []string{"1.3.6.1.2.1.1.1.0", "1.3.6.1.2.1.47.1.1.1.1.12.1", "1.3.6.1.2.1.47.1.1.1.1.7.1", "1.3.6.1.2.1.47.1.1.1.1.10.1"} {
|
||||
resp, err := params.Get([]string{oid})
|
||||
if err != nil {
|
||||
return Result{}, errors.Wrap(err, "send SNMP GET request")
|
||||
}
|
||||
for _, v := range resp.Variables {
|
||||
if options.debug {
|
||||
switch v.Type {
|
||||
case gosnmp.OctetString:
|
||||
log.Printf("DEBUG: %s -> %s", v.Name, string(v.Value.([]byte)))
|
||||
default:
|
||||
log.Printf("DEBUG: %s -> %v", v.Name, v.Value)
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
case v.Name == ".1.3.6.1.2.1.1.1.0":
|
||||
if v.Type == gosnmp.OctetString {
|
||||
r.SysDescr0 = string(v.Value.([]byte))
|
||||
}
|
||||
case strings.HasPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.12."):
|
||||
i, err := strconv.Atoi(strings.TrimPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.12."))
|
||||
if err != nil {
|
||||
return Result{}, errors.Wrap(err, "failed to get index")
|
||||
}
|
||||
if v.Type == gosnmp.OctetString {
|
||||
b := r.EntPhysicalTables[i]
|
||||
b.EntPhysicalMfgName = string(v.Value.([]byte))
|
||||
r.EntPhysicalTables[i] = b
|
||||
}
|
||||
case strings.HasPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.7."):
|
||||
i, err := strconv.Atoi(strings.TrimPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.7."))
|
||||
if err != nil {
|
||||
return Result{}, errors.Wrap(err, "failed to get index")
|
||||
}
|
||||
if v.Type == gosnmp.OctetString {
|
||||
b := r.EntPhysicalTables[i]
|
||||
b.EntPhysicalName = string(v.Value.([]byte))
|
||||
r.EntPhysicalTables[i] = b
|
||||
}
|
||||
case strings.HasPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.10."):
|
||||
i, err := strconv.Atoi(strings.TrimPrefix(v.Name, ".1.3.6.1.2.1.47.1.1.1.1.10."))
|
||||
if err != nil {
|
||||
return Result{}, errors.Wrap(err, "failed to get index")
|
||||
}
|
||||
if v.Type == gosnmp.OctetString {
|
||||
b := r.EntPhysicalTables[i]
|
||||
b.EntPhysicalSoftwareRev = string(v.Value.([]byte))
|
||||
r.EntPhysicalTables[i] = b
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
14
contrib/snmp2cpe/pkg/snmp/types.go
Normal file
14
contrib/snmp2cpe/pkg/snmp/types.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package snmp
|
||||
|
||||
// Result ...
|
||||
type Result struct {
|
||||
SysDescr0 string `json:"sysDescr0,omitempty"`
|
||||
EntPhysicalTables map[int]EntPhysicalTable `json:"entPhysicalTables,omitempty"`
|
||||
}
|
||||
|
||||
// EntPhysicalTable ...
|
||||
type EntPhysicalTable struct {
|
||||
EntPhysicalMfgName string `json:"entPhysicalMfgName,omitempty"`
|
||||
EntPhysicalName string `json:"entPhysicalName,omitempty"`
|
||||
EntPhysicalSoftwareRev string `json:"entPhysicalSoftwareRev,omitempty"`
|
||||
}
|
||||
12
contrib/snmp2cpe/pkg/util/util.go
Normal file
12
contrib/snmp2cpe/pkg/util/util.go
Normal file
@@ -0,0 +1,12 @@
|
||||
package util
|
||||
|
||||
import "golang.org/x/exp/maps"
|
||||
|
||||
// Unique return unique elements
|
||||
func Unique[T comparable](s []T) []T {
|
||||
m := map[T]struct{}{}
|
||||
for _, v := range s {
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
return maps.Keys(m)
|
||||
}
|
||||
139
cti/cti.go
139
cti/cti.go
@@ -660,7 +660,7 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "CAPEC-35: Leverage Executable Code in Non-Executable Files",
|
||||
},
|
||||
"CAPEC-36": {
|
||||
Name: "CAPEC-36: Using Unpublished Interfaces",
|
||||
Name: "CAPEC-36: Using Unpublished Interfaces or Functionality",
|
||||
},
|
||||
"CAPEC-37": {
|
||||
Name: "CAPEC-37: Retrieve Embedded Sensitive Data",
|
||||
@@ -831,7 +831,7 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "CAPEC-442: Infected Software",
|
||||
},
|
||||
"CAPEC-443": {
|
||||
Name: "CAPEC-443: Malicious Logic Inserted Into Product Software by Authorized Developer",
|
||||
Name: "CAPEC-443: Malicious Logic Inserted Into Product by Authorized Developer",
|
||||
},
|
||||
"CAPEC-444": {
|
||||
Name: "CAPEC-444: Development Alteration",
|
||||
@@ -840,7 +840,7 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "CAPEC-445: Malicious Logic Insertion into Product Software via Configuration Management Manipulation",
|
||||
},
|
||||
"CAPEC-446": {
|
||||
Name: "CAPEC-446: Malicious Logic Insertion into Product Software via Inclusion of 3rd Party Component Dependency",
|
||||
Name: "CAPEC-446: Malicious Logic Insertion into Product via Inclusion of Third-Party Component",
|
||||
},
|
||||
"CAPEC-447": {
|
||||
Name: "CAPEC-447: Design Alteration",
|
||||
@@ -1382,9 +1382,6 @@ var TechniqueDict = map[string]Technique{
|
||||
"CAPEC-628": {
|
||||
Name: "CAPEC-628: Carry-Off GPS Attack",
|
||||
},
|
||||
"CAPEC-629": {
|
||||
Name: "CAPEC-629: Unauthorized Use of Device Resources",
|
||||
},
|
||||
"CAPEC-63": {
|
||||
Name: "CAPEC-63: Cross-Site Scripting (XSS)",
|
||||
},
|
||||
@@ -1464,7 +1461,7 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "CAPEC-652: Use of Known Kerberos Credentials",
|
||||
},
|
||||
"CAPEC-653": {
|
||||
Name: "CAPEC-653: Use of Known Windows Credentials",
|
||||
Name: "CAPEC-653: Use of Known Operating System Credentials",
|
||||
},
|
||||
"CAPEC-654": {
|
||||
Name: "CAPEC-654: Credential Prompt Impersonation",
|
||||
@@ -1553,9 +1550,39 @@ var TechniqueDict = map[string]Technique{
|
||||
"CAPEC-681": {
|
||||
Name: "CAPEC-681: Exploitation of Improperly Controlled Hardware Security Identifiers",
|
||||
},
|
||||
"CAPEC-682": {
|
||||
Name: "CAPEC-682: Exploitation of Firmware or ROM Code with Unpatchable Vulnerabilities",
|
||||
},
|
||||
"CAPEC-69": {
|
||||
Name: "CAPEC-69: Target Programs with Elevated Privileges",
|
||||
},
|
||||
"CAPEC-690": {
|
||||
Name: "CAPEC-690: Metadata Spoofing",
|
||||
},
|
||||
"CAPEC-691": {
|
||||
Name: "CAPEC-691: Spoof Open-Source Software Metadata",
|
||||
},
|
||||
"CAPEC-692": {
|
||||
Name: "CAPEC-692: Spoof Version Control System Commit Metadata",
|
||||
},
|
||||
"CAPEC-693": {
|
||||
Name: "CAPEC-693: StarJacking",
|
||||
},
|
||||
"CAPEC-694": {
|
||||
Name: "CAPEC-694: System Location Discovery",
|
||||
},
|
||||
"CAPEC-695": {
|
||||
Name: "CAPEC-695: Repo Jacking",
|
||||
},
|
||||
"CAPEC-696": {
|
||||
Name: "CAPEC-696: Load Value Injection",
|
||||
},
|
||||
"CAPEC-697": {
|
||||
Name: "CAPEC-697: DHCP Spoofing",
|
||||
},
|
||||
"CAPEC-698": {
|
||||
Name: "CAPEC-698: Install Malicious Extension",
|
||||
},
|
||||
"CAPEC-7": {
|
||||
Name: "CAPEC-7: Blind SQL Injection",
|
||||
},
|
||||
@@ -1596,7 +1623,7 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "CAPEC-80: Using UTF-8 Encoding to Bypass Validation Logic",
|
||||
},
|
||||
"CAPEC-81": {
|
||||
Name: "CAPEC-81: Web Logs Tampering",
|
||||
Name: "CAPEC-81: Web Server Logs Tampering",
|
||||
},
|
||||
"CAPEC-83": {
|
||||
Name: "CAPEC-83: XPath Injection",
|
||||
@@ -1814,6 +1841,18 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0005: Defense Evasion => T1027.006: HTML Smuggling",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1027.007": {
|
||||
Name: "TA0005: Defense Evasion => T1027.007: Dynamic API Resolution",
|
||||
Platforms: []string{"Windows"},
|
||||
},
|
||||
"T1027.008": {
|
||||
Name: "TA0005: Defense Evasion => T1027.008: Stripped Payloads",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1027.009": {
|
||||
Name: "TA0005: Defense Evasion => T1027.009: Embedded Payloads",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1029": {
|
||||
Name: "TA0010: Exfiltration => T1029: Scheduled Transfer",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
@@ -2087,8 +2126,8 @@ var TechniqueDict = map[string]Technique{
|
||||
Platforms: []string{"Azure AD", "Google Workspace", "IaaS", "Office 365", "SaaS"},
|
||||
},
|
||||
"T1070": {
|
||||
Name: "TA0005: Defense Evasion => T1070: Indicator Removal on Host",
|
||||
Platforms: []string{"Containers", "Linux", "Network", "Windows", "macOS"},
|
||||
Name: "TA0005: Defense Evasion => T1070: Indicator Removal",
|
||||
Platforms: []string{"Containers", "Google Workspace", "Linux", "Network", "Office 365", "Windows", "macOS"},
|
||||
},
|
||||
"T1070.001": {
|
||||
Name: "TA0005: Defense Evasion => T1070.001: Clear Windows Event Logs",
|
||||
@@ -2114,6 +2153,18 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0005: Defense Evasion => T1070.006: Timestomp",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1070.007": {
|
||||
Name: "TA0005: Defense Evasion => T1070.007: Clear Network Connection History and Configurations",
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1070.008": {
|
||||
Name: "TA0005: Defense Evasion => T1070.008: Clear Mailbox Data",
|
||||
Platforms: []string{"Google Workspace", "Linux", "Office 365", "Windows", "macOS"},
|
||||
},
|
||||
"T1070.009": {
|
||||
Name: "TA0005: Defense Evasion => T1070.009: Clear Persistence",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1071": {
|
||||
Name: "TA0011: Command and Control => T1071: Application Layer Protocol",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
@@ -2152,7 +2203,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1078": {
|
||||
Name: "TA0001: Initial Access, TA0003: Persistence, TA0004: Privilege Escalation, TA0005: Defense Evasion => T1078: Valid Accounts",
|
||||
Platforms: []string{"Azure AD", "Containers", "Google Workspace", "IaaS", "Linux", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
Platforms: []string{"Azure AD", "Containers", "Google Workspace", "IaaS", "Linux", "Network", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
},
|
||||
"T1078.001": {
|
||||
Name: "TA0001: Initial Access, TA0003: Persistence, TA0004: Privilege Escalation, TA0005: Defense Evasion => T1078.001: Default Accounts",
|
||||
@@ -2504,7 +2555,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1199": {
|
||||
Name: "TA0001: Initial Access => T1199: Trusted Relationship",
|
||||
Platforms: []string{"IaaS", "Linux", "SaaS", "Windows", "macOS"},
|
||||
Platforms: []string{"IaaS", "Linux", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
},
|
||||
"T1200": {
|
||||
Name: "TA0001: Initial Access => T1200: Hardware Additions",
|
||||
@@ -2546,6 +2597,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0011: Command and Control => T1205.001: Port Knocking",
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1205.002": {
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0011: Command and Control => T1205.002: Socket Filters",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1207": {
|
||||
Name: "TA0005: Defense Evasion => T1207: Rogue Domain Controller",
|
||||
Platforms: []string{"Windows"},
|
||||
@@ -2780,7 +2835,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1505": {
|
||||
Name: "TA0003: Persistence => T1505: Server Software Component",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1505.001": {
|
||||
Name: "TA0003: Persistence => T1505.001: SQL Stored Procedures",
|
||||
@@ -2792,7 +2847,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1505.003": {
|
||||
Name: "TA0003: Persistence => T1505.003: Web Shell",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1505.004": {
|
||||
Name: "TA0003: Persistence => T1505.004: IIS Components",
|
||||
@@ -2827,8 +2882,8 @@ var TechniqueDict = map[string]Technique{
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1530": {
|
||||
Name: "TA0009: Collection => T1530: Data from Cloud Storage Object",
|
||||
Platforms: []string{"IaaS"},
|
||||
Name: "TA0009: Collection => T1530: Data from Cloud Storage",
|
||||
Platforms: []string{"IaaS", "SaaS"},
|
||||
},
|
||||
"T1531": {
|
||||
Name: "TA0040: Impact => T1531: Account Access Removal",
|
||||
@@ -2900,7 +2955,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1546": {
|
||||
Name: "TA0003: Persistence, TA0004: Privilege Escalation => T1546: Event Triggered Execution",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
Platforms: []string{"IaaS", "Linux", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
},
|
||||
"T1546.001": {
|
||||
Name: "TA0003: Persistence, TA0004: Privilege Escalation => T1546.001: Change Default File Association",
|
||||
@@ -2962,6 +3017,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0003: Persistence, TA0004: Privilege Escalation => T1546.015: Component Object Model Hijacking",
|
||||
Platforms: []string{"Windows"},
|
||||
},
|
||||
"T1546.016": {
|
||||
Name: "TA0003: Persistence, TA0004: Privilege Escalation => T1546.016: Installer Packages",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
},
|
||||
"T1547": {
|
||||
Name: "TA0003: Persistence, TA0004: Privilege Escalation => T1547: Boot or Logon Autostart Execution",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
@@ -3048,7 +3107,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1550.001": {
|
||||
Name: "TA0005: Defense Evasion, TA0008: Lateral Movement => T1550.001: Application Access Token",
|
||||
Platforms: []string{"Containers", "Google Workspace", "Office 365", "SaaS"},
|
||||
Platforms: []string{"Azure AD", "Containers", "Google Workspace", "IaaS", "Office 365", "SaaS"},
|
||||
},
|
||||
"T1550.002": {
|
||||
Name: "TA0005: Defense Evasion, TA0008: Lateral Movement => T1550.002: Pass the Hash",
|
||||
@@ -3152,7 +3211,7 @@ var TechniqueDict = map[string]Technique{
|
||||
},
|
||||
"T1556": {
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0006: Credential Access => T1556: Modify Authentication Process",
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
Platforms: []string{"Azure AD", "Google Workspace", "IaaS", "Linux", "Network", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
},
|
||||
"T1556.001": {
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0006: Credential Access => T1556.001: Domain Controller Authentication",
|
||||
@@ -3174,9 +3233,17 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0006: Credential Access => T1556.005: Reversible Encryption",
|
||||
Platforms: []string{"Windows"},
|
||||
},
|
||||
"T1556.006": {
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0006: Credential Access => T1556.006: Multi-Factor Authentication",
|
||||
Platforms: []string{"Azure AD", "Google Workspace", "IaaS", "Linux", "Office 365", "SaaS", "Windows", "macOS"},
|
||||
},
|
||||
"T1556.007": {
|
||||
Name: "TA0003: Persistence, TA0005: Defense Evasion, TA0006: Credential Access => T1556.007: Hybrid Identity",
|
||||
Platforms: []string{"Azure AD", "Google Workspace", "IaaS", "Office 365", "SaaS", "Windows"},
|
||||
},
|
||||
"T1557": {
|
||||
Name: "TA0006: Credential Access, TA0009: Collection => T1557: Adversary-in-the-Middle",
|
||||
Platforms: []string{"Linux", "Windows", "macOS"},
|
||||
Platforms: []string{"Linux", "Network", "Windows", "macOS"},
|
||||
},
|
||||
"T1557.001": {
|
||||
Name: "TA0006: Credential Access, TA0009: Collection => T1557.001: LLMNR/NBT-NS Poisoning and SMB Relay",
|
||||
@@ -3550,6 +3617,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0042: Resource Development => T1583.006: Web Services",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1583.007": {
|
||||
Name: "TA0042: Resource Development => T1583.007: Serverless",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1584": {
|
||||
Name: "TA0042: Resource Development => T1584: Compromise Infrastructure",
|
||||
Platforms: []string{"PRE"},
|
||||
@@ -3578,6 +3649,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0042: Resource Development => T1584.006: Web Services",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1584.007": {
|
||||
Name: "TA0042: Resource Development => T1584.007: Serverless",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1585": {
|
||||
Name: "TA0042: Resource Development => T1585: Establish Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
@@ -3590,6 +3665,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0042: Resource Development => T1585.002: Email Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1585.003": {
|
||||
Name: "TA0042: Resource Development => T1585.003: Cloud Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1586": {
|
||||
Name: "TA0042: Resource Development => T1586: Compromise Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
@@ -3602,6 +3681,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0042: Resource Development => T1586.002: Email Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1586.003": {
|
||||
Name: "TA0042: Resource Development => T1586.003: Cloud Accounts",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1587": {
|
||||
Name: "TA0042: Resource Development => T1587: Develop Capabilities",
|
||||
Platforms: []string{"PRE"},
|
||||
@@ -3746,6 +3829,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0043: Reconnaissance => T1593.002: Search Engines",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1593.003": {
|
||||
Name: "TA0043: Reconnaissance => T1593.003: Code Repositories",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1594": {
|
||||
Name: "TA0043: Reconnaissance => T1594: Search Victim-Owned Websites",
|
||||
Platforms: []string{"PRE"},
|
||||
@@ -3898,6 +3985,10 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0042: Resource Development => T1608.005: Link Target",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1608.006": {
|
||||
Name: "TA0042: Resource Development => T1608.006: SEO Poisoning",
|
||||
Platforms: []string{"PRE"},
|
||||
},
|
||||
"T1609": {
|
||||
Name: "TA0002: Execution => T1609: Container Administration Command",
|
||||
Platforms: []string{"Containers"},
|
||||
@@ -3950,4 +4041,12 @@ var TechniqueDict = map[string]Technique{
|
||||
Name: "TA0005: Defense Evasion => T1647: Plist File Modification",
|
||||
Platforms: []string{"macOS"},
|
||||
},
|
||||
"T1648": {
|
||||
Name: "TA0002: Execution => T1648: Serverless Execution",
|
||||
Platforms: []string{"IaaS", "Office 365", "SaaS"},
|
||||
},
|
||||
"T1649": {
|
||||
Name: "TA0006: Credential Access => T1649: Steal or Forge Authentication Certificates",
|
||||
Platforms: []string{"Azure AD", "Linux", "Windows", "macOS"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -211,9 +211,9 @@ func newCTIDB(cnf config.VulnDictInterface) (ctidb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := ctidb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ctidb.Option{})
|
||||
driver, err := ctidb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ctidb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, ctidb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init cti DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init cti DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -213,9 +213,9 @@ func newCveDB(cnf config.VulnDictInterface) (cvedb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := cvedb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), cvedb.Option{})
|
||||
driver, err := cvedb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), cvedb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, cvedb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init CVE DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init CVE DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -261,21 +261,23 @@ func DetectPkgCves(r *models.ScanResult, ovalCnf config.GovalDictConf, gostCnf c
|
||||
|
||||
// isPkgCvesDetactable checks whether CVEs is detactable with gost and oval from the result
|
||||
func isPkgCvesDetactable(r *models.ScanResult) bool {
|
||||
if r.Release == "" {
|
||||
logging.Log.Infof("r.Release is empty. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
if r.ScannedVia == "trivy" {
|
||||
logging.Log.Infof("r.ScannedVia is trivy. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
switch r.Family {
|
||||
case constant.FreeBSD, constant.ServerTypePseudo:
|
||||
logging.Log.Infof("%s type. Skip OVAL and gost detection", r.Family)
|
||||
return false
|
||||
case constant.Windows:
|
||||
return true
|
||||
default:
|
||||
if r.ScannedVia == "trivy" {
|
||||
logging.Log.Infof("r.ScannedVia is trivy. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
if r.Release == "" {
|
||||
logging.Log.Infof("r.Release is empty. Skip OVAL and gost detection")
|
||||
return false
|
||||
}
|
||||
|
||||
if len(r.Packages)+len(r.SrcPackages) == 0 {
|
||||
logging.Log.Infof("Number of packages is 0. Skip OVAL and gost detection")
|
||||
return false
|
||||
@@ -289,6 +291,8 @@ func DetectGitHubCves(r *models.ScanResult, githubConfs map[string]config.GitHub
|
||||
if len(githubConfs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
r.GitHubManifests = models.DependencyGraphManifests{}
|
||||
for ownerRepo, setting := range githubConfs {
|
||||
ss := strings.Split(ownerRepo, "/")
|
||||
if len(ss) != 2 {
|
||||
@@ -301,6 +305,10 @@ func DetectGitHubCves(r *models.ScanResult, githubConfs map[string]config.GitHub
|
||||
}
|
||||
logging.Log.Infof("%s: %d CVEs detected with GHSA %s/%s",
|
||||
r.FormatServerName(), n, owner, repo)
|
||||
|
||||
if err = DetectGitHubDependencyGraph(r, owner, repo, setting.Token); err != nil {
|
||||
return xerrors.Errorf("Failed to access GitHub Dependency graph: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -417,20 +425,20 @@ func detectPkgsCvesWithOval(cnf config.GovalDictConf, r *models.ScanResult, logO
|
||||
}
|
||||
}()
|
||||
|
||||
logging.Log.Debugf("Check if oval fetched: %s %s", r.Family, r.Release)
|
||||
ok, err := client.CheckIfOvalFetched(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
switch r.Family {
|
||||
case constant.Debian:
|
||||
logging.Log.Infof("Skip OVAL and Scan with gost alone.")
|
||||
logging.Log.Infof("%s: %d CVEs are detected with OVAL", r.FormatServerName(), 0)
|
||||
return nil
|
||||
case constant.Windows, constant.FreeBSD, constant.ServerTypePseudo:
|
||||
return nil
|
||||
default:
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Raspbian, constant.Ubuntu:
|
||||
logging.Log.Infof("Skip OVAL and Scan with gost alone.")
|
||||
logging.Log.Infof("%s: %d CVEs are detected with OVAL", r.FormatServerName(), 0)
|
||||
return nil
|
||||
case constant.Windows, constant.FreeBSD, constant.ServerTypePseudo:
|
||||
return nil
|
||||
default:
|
||||
logging.Log.Debugf("Check if oval fetched: %s %s", r.Family, r.Release)
|
||||
ok, err := client.CheckIfOvalFetched(r.Family, r.Release)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
return xerrors.Errorf("OVAL entries of %s %s are not found. Fetch OVAL before reporting. For details, see `https://github.com/vulsio/goval-dictionary#usage`", r.Family, r.Release)
|
||||
}
|
||||
}
|
||||
@@ -464,19 +472,21 @@ func detectPkgsCvesWithGost(cnf config.GostConf, r *models.ScanResult, logOpts l
|
||||
|
||||
nCVEs, err := client.DetectCVEs(r, true)
|
||||
if err != nil {
|
||||
if r.Family == constant.Debian {
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Raspbian, constant.Ubuntu, constant.Windows:
|
||||
return xerrors.Errorf("Failed to detect CVEs with gost: %w", err)
|
||||
default:
|
||||
return xerrors.Errorf("Failed to detect unfixed CVEs with gost: %w", err)
|
||||
}
|
||||
return xerrors.Errorf("Failed to detect unfixed CVEs with gost: %w", err)
|
||||
}
|
||||
|
||||
if r.Family == constant.Debian {
|
||||
logging.Log.Infof("%s: %d CVEs are detected with gost",
|
||||
r.FormatServerName(), nCVEs)
|
||||
} else {
|
||||
logging.Log.Infof("%s: %d unfixed CVEs are detected with gost",
|
||||
r.FormatServerName(), nCVEs)
|
||||
switch r.Family {
|
||||
case constant.Debian, constant.Raspbian, constant.Ubuntu, constant.Windows:
|
||||
logging.Log.Infof("%s: %d CVEs are detected with gost", r.FormatServerName(), nCVEs)
|
||||
default:
|
||||
logging.Log.Infof("%s: %d unfixed CVEs are detected with gost", r.FormatServerName(), nCVEs)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -109,14 +109,20 @@ func FillWithExploit(r *models.ScanResult, cnf config.ExploitConf, logOpts loggi
|
||||
// ConvertToModelsExploit converts exploit model to vuls model
|
||||
func ConvertToModelsExploit(es []exploitmodels.Exploit) (exploits []models.Exploit) {
|
||||
for _, e := range es {
|
||||
var documentURL, shellURL *string
|
||||
var documentURL, shellURL, paperURL, ghdbURL *string
|
||||
if e.OffensiveSecurity != nil {
|
||||
os := e.OffensiveSecurity
|
||||
if os.Document != nil {
|
||||
documentURL = &os.Document.DocumentURL
|
||||
documentURL = &os.Document.FileURL
|
||||
}
|
||||
if os.ShellCode != nil {
|
||||
shellURL = &os.ShellCode.ShellCodeURL
|
||||
shellURL = &os.ShellCode.FileURL
|
||||
}
|
||||
if os.Paper != nil {
|
||||
paperURL = &os.Paper.FileURL
|
||||
}
|
||||
if os.GHDB != nil {
|
||||
ghdbURL = &os.GHDB.Link
|
||||
}
|
||||
}
|
||||
exploit := models.Exploit{
|
||||
@@ -126,6 +132,8 @@ func ConvertToModelsExploit(es []exploitmodels.Exploit) (exploits []models.Explo
|
||||
Description: e.Description,
|
||||
DocumentURL: documentURL,
|
||||
ShellCodeURL: shellURL,
|
||||
PaperURL: paperURL,
|
||||
GHDBURL: ghdbURL,
|
||||
}
|
||||
exploits = append(exploits, exploit)
|
||||
}
|
||||
@@ -231,7 +239,7 @@ func httpGetExploit(url string, req exploitRequest, resChan chan<- exploitRespon
|
||||
}
|
||||
}
|
||||
|
||||
func newExploitDB(cnf config.VulnDictInterface) (driver exploitdb.DB, err error) {
|
||||
func newExploitDB(cnf config.VulnDictInterface) (exploitdb.DB, error) {
|
||||
if cnf.IsFetchViaHTTP() {
|
||||
return nil, nil
|
||||
}
|
||||
@@ -239,9 +247,9 @@ func newExploitDB(cnf config.VulnDictInterface) (driver exploitdb.DB, err error)
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := exploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), exploitdb.Option{})
|
||||
driver, err := exploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), exploitdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, exploitdb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init exploit DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init exploit DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -10,9 +10,12 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/future-architect/vuls/errof"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -29,7 +32,7 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
// TODO Use `https://github.com/shurcooL/githubv4` if the tool supports vulnerabilityAlerts Endpoint
|
||||
// Memo : https://developer.github.com/v4/explorer/
|
||||
const jsonfmt = `{"query":
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url vulnerabilityAlerts(first: %d, %s) { pageInfo { endCursor hasNextPage startCursor } edges { node { id dismissReason dismissedAt securityVulnerability{ package { name ecosystem } severity vulnerableVersionRange firstPatchedVersion { identifier } } securityAdvisory { description ghsaId permalink publishedAt summary updatedAt withdrawnAt origin severity references { url } identifiers { type value } } } } } } } "}`
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url vulnerabilityAlerts(first: %d, states:[OPEN], %s) { pageInfo { endCursor hasNextPage startCursor } edges { node { id dismissReason dismissedAt securityVulnerability{ package { name ecosystem } severity vulnerableVersionRange firstPatchedVersion { identifier } } vulnerableManifestFilename vulnerableManifestPath vulnerableRequirements securityAdvisory { description ghsaId permalink publishedAt summary updatedAt withdrawnAt origin severity references { url } identifiers { type value } } } } } } } "}`
|
||||
after := ""
|
||||
|
||||
for {
|
||||
@@ -79,11 +82,15 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
continue
|
||||
}
|
||||
|
||||
pkgName := fmt.Sprintf("%s %s",
|
||||
alerts.Data.Repository.URL, v.Node.SecurityVulnerability.Package.Name)
|
||||
|
||||
m := models.GitHubSecurityAlert{
|
||||
PackageName: pkgName,
|
||||
Repository: alerts.Data.Repository.URL,
|
||||
Package: models.GSAVulnerablePackage{
|
||||
Name: v.Node.SecurityVulnerability.Package.Name,
|
||||
Ecosystem: v.Node.SecurityVulnerability.Package.Ecosystem,
|
||||
ManifestFilename: v.Node.VulnerableManifestFilename,
|
||||
ManifestPath: v.Node.VulnerableManifestPath,
|
||||
Requirements: v.Node.VulnerableRequirements,
|
||||
},
|
||||
FixedIn: v.Node.SecurityVulnerability.FirstPatchedVersion.Identifier,
|
||||
AffectedRange: v.Node.SecurityVulnerability.VulnerableVersionRange,
|
||||
Dismissed: len(v.Node.DismissReason) != 0,
|
||||
@@ -148,7 +155,7 @@ func DetectGitHubSecurityAlerts(r *models.ScanResult, owner, repo, token string,
|
||||
return nCVEs, err
|
||||
}
|
||||
|
||||
//SecurityAlerts has detected CVE-IDs, PackageNames, Refs
|
||||
// SecurityAlerts has detected CVE-IDs, PackageNames, Refs
|
||||
type SecurityAlerts struct {
|
||||
Data struct {
|
||||
Repository struct {
|
||||
@@ -175,7 +182,10 @@ type SecurityAlerts struct {
|
||||
Identifier string `json:"identifier"`
|
||||
} `json:"firstPatchedVersion"`
|
||||
} `json:"securityVulnerability"`
|
||||
SecurityAdvisory struct {
|
||||
VulnerableManifestFilename string `json:"vulnerableManifestFilename"`
|
||||
VulnerableManifestPath string `json:"vulnerableManifestPath"`
|
||||
VulnerableRequirements string `json:"vulnerableRequirements"`
|
||||
SecurityAdvisory struct {
|
||||
Description string `json:"description"`
|
||||
GhsaID string `json:"ghsaId"`
|
||||
Permalink string `json:"permalink"`
|
||||
@@ -199,3 +209,187 @@ type SecurityAlerts struct {
|
||||
} `json:"repository"`
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// DetectGitHubDependencyGraph access to owner/repo on GitHub and fetch dependency graph of the repository via GitHub API v4 GraphQL and then set to the given ScanResult.
|
||||
// https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-the-dependency-graph
|
||||
func DetectGitHubDependencyGraph(r *models.ScanResult, owner, repo, token string) (err error) {
|
||||
src := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: token},
|
||||
)
|
||||
//TODO Proxy
|
||||
httpClient := oauth2.NewClient(context.Background(), src)
|
||||
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, "", "", 10, 100)
|
||||
}
|
||||
|
||||
// recursive function
|
||||
func fetchDependencyGraph(r *models.ScanResult, httpClient *http.Client, owner, repo, after, dependenciesAfter string, first, dependenciesFirst int) (err error) {
|
||||
const queryFmt = `{"query":
|
||||
"query { repository(owner:\"%s\", name:\"%s\") { url dependencyGraphManifests(first: %d, withDependencies: true%s) { pageInfo { endCursor hasNextPage } edges { node { blobPath filename repository { url } parseable exceedsMaxSize dependenciesCount dependencies(first: %d%s) { pageInfo { endCursor hasNextPage } edges { node { packageName packageManager repository { url } requirements hasDependencies } } } } } } } }"}`
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
var graph DependencyGraph
|
||||
rateLimitRemaining := 5000
|
||||
count, retryMax := 0, 10
|
||||
retryCheck := func(err error) error {
|
||||
if count == retryMax {
|
||||
return backoff.Permanent(err)
|
||||
}
|
||||
if rateLimitRemaining == 0 {
|
||||
// The GraphQL API rate limit is 5,000 points per hour.
|
||||
// Terminate with an error on rate limit reached.
|
||||
return backoff.Permanent(errof.New(errof.ErrFailedToAccessGithubAPI,
|
||||
fmt.Sprintf("rate limit exceeded. error: %s", err.Error())))
|
||||
}
|
||||
return err
|
||||
}
|
||||
operation := func() error {
|
||||
count++
|
||||
queryStr := fmt.Sprintf(queryFmt, owner, repo, first, after, dependenciesFirst, dependenciesAfter)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost,
|
||||
"https://api.github.com/graphql",
|
||||
bytes.NewBuffer([]byte(queryStr)),
|
||||
)
|
||||
if err != nil {
|
||||
return retryCheck(err)
|
||||
}
|
||||
|
||||
// https://docs.github.com/en/graphql/overview/schema-previews#access-to-a-repository-s-dependency-graph-preview
|
||||
// TODO remove this header if it is no longer preview status in the future.
|
||||
req.Header.Set("Accept", "application/vnd.github.hawkgirl-preview+json")
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return retryCheck(err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// https://docs.github.com/en/graphql/overview/resource-limitations#rate-limit
|
||||
if rateLimitRemaining, err = strconv.Atoi(resp.Header.Get("X-RateLimit-Remaining")); err != nil {
|
||||
// If the header retrieval fails, rateLimitRemaining will be set to 0,
|
||||
// preventing further retries. To enable retry, we reset it to 5000.
|
||||
rateLimitRemaining = 5000
|
||||
return retryCheck(errof.New(errof.ErrFailedToAccessGithubAPI, "Failed to get X-RateLimit-Remaining header"))
|
||||
}
|
||||
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return retryCheck(err)
|
||||
}
|
||||
|
||||
graph = DependencyGraph{}
|
||||
if err := json.Unmarshal(body, &graph); err != nil {
|
||||
return retryCheck(err)
|
||||
}
|
||||
|
||||
if len(graph.Errors) > 0 || graph.Data.Repository.URL == "" {
|
||||
// this mainly occurs on timeout
|
||||
// reduce the number of dependencies to be fetched for the next retry
|
||||
if dependenciesFirst > 50 {
|
||||
dependenciesFirst -= 5
|
||||
}
|
||||
return retryCheck(errof.New(errof.ErrFailedToAccessGithubAPI,
|
||||
fmt.Sprintf("Failed to access to GitHub API. Repository: %s/%s; Response: %s", owner, repo, string(body))))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed attempts (count: %d). retrying in %s. err: %+v", count, t, err)
|
||||
}
|
||||
|
||||
if err = backoff.RetryNotify(operation, backoff.NewExponentialBackOff(), notify); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dependenciesAfter = ""
|
||||
for _, m := range graph.Data.Repository.DependencyGraphManifests.Edges {
|
||||
manifest, ok := r.GitHubManifests[m.Node.BlobPath]
|
||||
if !ok {
|
||||
manifest = models.DependencyGraphManifest{
|
||||
BlobPath: m.Node.BlobPath,
|
||||
Filename: m.Node.Filename,
|
||||
Repository: m.Node.Repository.URL,
|
||||
Dependencies: []models.Dependency{},
|
||||
}
|
||||
}
|
||||
for _, d := range m.Node.Dependencies.Edges {
|
||||
manifest.Dependencies = append(manifest.Dependencies, models.Dependency{
|
||||
PackageName: d.Node.PackageName,
|
||||
PackageManager: d.Node.PackageManager,
|
||||
Repository: d.Node.Repository.URL,
|
||||
Requirements: d.Node.Requirements,
|
||||
})
|
||||
}
|
||||
r.GitHubManifests[m.Node.BlobPath] = manifest
|
||||
|
||||
if m.Node.Dependencies.PageInfo.HasNextPage {
|
||||
dependenciesAfter = fmt.Sprintf(`, after: \"%s\"`, m.Node.Dependencies.PageInfo.EndCursor)
|
||||
}
|
||||
}
|
||||
if dependenciesAfter != "" {
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, after, dependenciesAfter, first, dependenciesFirst)
|
||||
}
|
||||
|
||||
if graph.Data.Repository.DependencyGraphManifests.PageInfo.HasNextPage {
|
||||
after = fmt.Sprintf(`, after: \"%s\"`, graph.Data.Repository.DependencyGraphManifests.PageInfo.EndCursor)
|
||||
return fetchDependencyGraph(r, httpClient, owner, repo, after, dependenciesAfter, first, dependenciesFirst)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// DependencyGraph is a GitHub API response
|
||||
type DependencyGraph struct {
|
||||
Data struct {
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
DependencyGraphManifests struct {
|
||||
PageInfo struct {
|
||||
EndCursor string `json:"endCursor"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
} `json:"pageInfo"`
|
||||
Edges []struct {
|
||||
Node struct {
|
||||
BlobPath string `json:"blobPath"`
|
||||
Filename string `json:"filename"`
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
Parseable bool `json:"parseable"`
|
||||
ExceedsMaxSize bool `json:"exceedsMaxSize"`
|
||||
DependenciesCount int `json:"dependenciesCount"`
|
||||
Dependencies struct {
|
||||
PageInfo struct {
|
||||
EndCursor string `json:"endCursor"`
|
||||
HasNextPage bool `json:"hasNextPage"`
|
||||
} `json:"pageInfo"`
|
||||
Edges []struct {
|
||||
Node struct {
|
||||
PackageName string `json:"packageName"`
|
||||
PackageManager string `json:"packageManager"`
|
||||
Repository struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
Requirements string `json:"requirements"`
|
||||
HasDependencies bool `json:"hasDependencies"`
|
||||
} `json:"node"`
|
||||
} `json:"edges"`
|
||||
} `json:"dependencies"`
|
||||
} `json:"node"`
|
||||
} `json:"edges"`
|
||||
} `json:"dependencyGraphManifests"`
|
||||
} `json:"repository"`
|
||||
} `json:"data"`
|
||||
Errors []struct {
|
||||
Type string `json:"type,omitempty"`
|
||||
Path []interface{} `json:"path,omitempty"`
|
||||
Locations []struct {
|
||||
Line int `json:"line"`
|
||||
Column int `json:"column"`
|
||||
} `json:"locations,omitempty"`
|
||||
Message string `json:"message"`
|
||||
} `json:"errors,omitempty"`
|
||||
}
|
||||
|
||||
@@ -234,9 +234,9 @@ func newKEVulnDB(cnf config.VulnDictInterface) (kevulndb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := kevulndb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), kevulndb.Option{})
|
||||
driver, err := kevulndb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), kevulndb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, kevulndb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init kevuln DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init kevuln DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -233,9 +233,9 @@ func newMetasploitDB(cnf config.VulnDictInterface) (metasploitdb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := metasploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), metasploitdb.Option{})
|
||||
driver, err := metasploitdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), metasploitdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, metasploitdb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init metasploit DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init metasploit DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -6,11 +6,9 @@ package detector
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -183,11 +181,7 @@ func getMinusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
}
|
||||
|
||||
func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
cTypes := []models.CveContentType{
|
||||
models.Nvd,
|
||||
models.Jvn,
|
||||
models.NewCveContentType(current.Family),
|
||||
}
|
||||
cTypes := append([]models.CveContentType{models.Nvd, models.Jvn}, models.GetCveContentTypes(current.Family)...)
|
||||
|
||||
prevLastModified := map[models.CveContentType][]time.Time{}
|
||||
preVinfo, ok := previous.ScannedCves[cveID]
|
||||
@@ -225,25 +219,23 @@ func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// jsonDirPattern is file name pattern of JSON directory
|
||||
// 2016-11-16T10:43:28+09:00
|
||||
// 2016-11-16T10:43:28Z
|
||||
var jsonDirPattern = regexp.MustCompile(
|
||||
`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:Z|[+-]\d{2}:\d{2})$`)
|
||||
|
||||
// ListValidJSONDirs returns valid json directory as array
|
||||
// Returned array is sorted so that recent directories are at the head
|
||||
func ListValidJSONDirs(resultsDir string) (dirs []string, err error) {
|
||||
var dirInfo []fs.DirEntry
|
||||
if dirInfo, err = os.ReadDir(resultsDir); err != nil {
|
||||
err = xerrors.Errorf("Failed to read %s: %w",
|
||||
config.Conf.ResultsDir, err)
|
||||
return
|
||||
dirInfo, err := os.ReadDir(resultsDir)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", config.Conf.ResultsDir, err)
|
||||
}
|
||||
for _, d := range dirInfo {
|
||||
if d.IsDir() && jsonDirPattern.MatchString(d.Name()) {
|
||||
jsonDir := filepath.Join(resultsDir, d.Name())
|
||||
dirs = append(dirs, jsonDir)
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, layout := range []string{"2006-01-02T15:04:05Z", "2006-01-02T15:04:05-07:00", "2006-01-02T15-04-05-0700"} {
|
||||
if _, err := time.Parse(layout, d.Name()); err == nil {
|
||||
dirs = append(dirs, filepath.Join(resultsDir, d.Name()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(dirs, func(i, j int) bool {
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
//WpCveInfos is for wpscan json
|
||||
// WpCveInfos is for wpscan json
|
||||
type WpCveInfos struct {
|
||||
ReleaseDate string `json:"release_date"`
|
||||
ChangelogURL string `json:"changelog_url"`
|
||||
@@ -33,7 +33,7 @@ type WpCveInfos struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
//WpCveInfo is for wpscan json
|
||||
// WpCveInfo is for wpscan json
|
||||
type WpCveInfo struct {
|
||||
ID string `json:"id"`
|
||||
Title string `json:"title"`
|
||||
@@ -44,7 +44,7 @@ type WpCveInfo struct {
|
||||
FixedIn string `json:"fixed_in"`
|
||||
}
|
||||
|
||||
//References is for wpscan json
|
||||
// References is for wpscan json
|
||||
type References struct {
|
||||
URL []string `json:"url"`
|
||||
Cve []string `json:"cve"`
|
||||
|
||||
237
go.mod
237
go.mod
@@ -1,22 +1,26 @@
|
||||
module github.com/future-architect/vuls
|
||||
|
||||
go 1.18
|
||||
go 1.20
|
||||
|
||||
require (
|
||||
github.com/Azure/azure-sdk-for-go v66.0.0+incompatible
|
||||
github.com/BurntSushi/toml v1.1.0
|
||||
github.com/Ullaakut/nmap/v2 v2.1.2-0.20210406060955-59a52fe80a4f
|
||||
github.com/aquasecurity/go-dep-parser v0.0.0-20220626060741-179d0b167e5f
|
||||
github.com/aquasecurity/trivy v0.30.4
|
||||
github.com/Azure/azure-sdk-for-go v68.0.0+incompatible
|
||||
github.com/BurntSushi/toml v1.3.2
|
||||
github.com/CycloneDX/cyclonedx-go v0.7.1
|
||||
github.com/Ullaakut/nmap/v2 v2.2.2
|
||||
github.com/aquasecurity/go-dep-parser v0.0.0-20221114145626-35ef808901e8
|
||||
github.com/aquasecurity/trivy v0.35.0
|
||||
github.com/aquasecurity/trivy-db v0.0.0-20220627104749-930461748b63
|
||||
github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d
|
||||
github.com/aws/aws-sdk-go v1.44.46
|
||||
github.com/c-robinson/iplib v1.0.3
|
||||
github.com/aws/aws-sdk-go v1.44.300
|
||||
github.com/c-robinson/iplib v1.0.6
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/d4l3k/messagediff v1.2.2-0.20190829033028-7e0a312ae40b
|
||||
github.com/emersion/go-sasl v0.0.0-20200509203442-7bfe0ed36a21
|
||||
github.com/emersion/go-smtp v0.14.0
|
||||
github.com/emersion/go-smtp v0.16.0
|
||||
github.com/google/go-cmp v0.5.9
|
||||
github.com/google/subcommands v1.2.0
|
||||
github.com/google/uuid v1.3.0
|
||||
github.com/gosnmp/gosnmp v1.35.0
|
||||
github.com/gosuri/uitable v0.0.4
|
||||
github.com/hashicorp/go-uuid v1.0.3
|
||||
github.com/hashicorp/go-version v1.6.0
|
||||
@@ -31,166 +35,155 @@ require (
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/nlopes/slack v0.6.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/package-url/packageurl-go v0.1.1-0.20220203205134-d70459300c8a
|
||||
github.com/parnurzeal/gorequest v0.2.16
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/rifflock/lfshook v0.0.0-20180920164130-b9218ef580f5
|
||||
github.com/sirupsen/logrus v1.9.0
|
||||
github.com/spf13/cobra v1.5.0
|
||||
github.com/vulsio/go-cti v0.0.2-0.20220613013115-8c7e57a6aa86
|
||||
github.com/vulsio/go-cve-dictionary v0.8.2-0.20211028094424-0a854f8e8f85
|
||||
github.com/vulsio/go-exploitdb v0.4.2
|
||||
github.com/vulsio/go-kev v0.1.1-0.20220118062020-5f69b364106f
|
||||
github.com/vulsio/go-msfdb v0.2.1-0.20211028071756-4a9759bd9f14
|
||||
github.com/vulsio/gost v0.4.2-0.20220630181607-2ed593791ec3
|
||||
github.com/vulsio/goval-dictionary v0.8.0
|
||||
go.etcd.io/bbolt v1.3.6
|
||||
golang.org/x/exp v0.0.0-20220613132600-b0d781184e0d
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5
|
||||
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f
|
||||
github.com/saintfish/chardet v0.0.0-20230101081208-5e3ef4b5456d
|
||||
github.com/sirupsen/logrus v1.9.3
|
||||
github.com/spf13/cobra v1.7.0
|
||||
github.com/vulsio/go-cti v0.0.3
|
||||
github.com/vulsio/go-cve-dictionary v0.8.4
|
||||
github.com/vulsio/go-exploitdb v0.4.5
|
||||
github.com/vulsio/go-kev v0.1.2
|
||||
github.com/vulsio/go-msfdb v0.2.2
|
||||
github.com/vulsio/gost v0.4.4
|
||||
github.com/vulsio/goval-dictionary v0.9.2
|
||||
go.etcd.io/bbolt v1.3.7
|
||||
golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53
|
||||
golang.org/x/oauth2 v0.8.0
|
||||
golang.org/x/sync v0.2.0
|
||||
golang.org/x/text v0.9.0
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.100.2 // indirect
|
||||
cloud.google.com/go/compute v1.6.1 // indirect
|
||||
cloud.google.com/go/iam v0.3.0 // indirect
|
||||
cloud.google.com/go/storage v1.14.0 // indirect
|
||||
cloud.google.com/go v0.107.0 // indirect
|
||||
cloud.google.com/go/compute v1.15.1 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
cloud.google.com/go/iam v0.8.0 // indirect
|
||||
cloud.google.com/go/storage v1.27.0 // indirect
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.27 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.20 // indirect
|
||||
github.com/Azure/go-autorest/autorest v0.11.28 // indirect
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.21 // indirect
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/autorest/to v0.3.0 // indirect
|
||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||
github.com/Microsoft/go-winio v0.5.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.6.1 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/PuerkitoBio/goquery v1.8.1 // indirect
|
||||
github.com/VividCortex/ewma v1.2.0 // indirect
|
||||
github.com/acomagu/bufpipe v1.0.3 // indirect
|
||||
github.com/andybalholm/cascadia v1.2.0 // indirect
|
||||
github.com/andybalholm/cascadia v1.3.2 // indirect
|
||||
github.com/aquasecurity/go-gem-version v0.0.0-20201115065557-8eed6fe000ce // indirect
|
||||
github.com/aquasecurity/go-npm-version v0.0.0-20201110091526-0b796d180798 // indirect
|
||||
github.com/aquasecurity/go-pep440-version v0.0.0-20210121094942-22b2f8951d46 // indirect
|
||||
github.com/aquasecurity/go-version v0.0.0-20210121072130-637058cfe492 // indirect
|
||||
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d // indirect
|
||||
github.com/briandowns/spinner v1.18.1 // indirect
|
||||
github.com/caarlos0/env/v6 v6.9.3 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheggaaa/pb/v3 v3.1.0 // indirect
|
||||
github.com/briandowns/spinner v1.23.0 // indirect
|
||||
github.com/caarlos0/env/v6 v6.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/cheggaaa/pb/v3 v3.1.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/dgryski/go-minhash v0.0.0-20170608043002-7fe510aff544 // indirect
|
||||
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||
github.com/docker/cli v20.10.17+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.1+incompatible // indirect
|
||||
github.com/docker/docker v20.10.17+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.6.4 // indirect
|
||||
github.com/ekzhu/minhash-lsh v0.0.0-20171225071031-5c06ee8586a1 // indirect
|
||||
github.com/emirpasic/gods v1.12.0 // indirect
|
||||
github.com/fatih/color v1.13.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.4 // indirect
|
||||
github.com/go-enry/go-license-detector/v4 v4.3.0 // indirect
|
||||
github.com/go-git/gcfg v1.5.0 // indirect
|
||||
github.com/go-git/go-billy/v5 v5.3.1 // indirect
|
||||
github.com/go-git/go-git/v5 v5.4.2 // indirect
|
||||
github.com/dnaeon/go-vcr v1.2.0 // indirect
|
||||
github.com/docker/cli v20.10.20+incompatible // indirect
|
||||
github.com/docker/distribution v2.8.2+incompatible // indirect
|
||||
github.com/docker/docker v23.0.4+incompatible // indirect
|
||||
github.com/docker/docker-credential-helpers v0.7.0 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/emirpasic/gods v1.18.1 // indirect
|
||||
github.com/fatih/color v1.15.0 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/glebarez/go-sqlite v1.21.1 // indirect
|
||||
github.com/glebarez/sqlite v1.8.1-0.20230417114740-1accfe103bf2 // indirect
|
||||
github.com/go-redis/redis/v8 v8.11.5 // indirect
|
||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/gofrs/uuid v4.0.0+incompatible // indirect
|
||||
github.com/golang-jwt/jwt/v4 v4.2.0 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-containerregistry v0.8.0 // indirect
|
||||
github.com/google/licenseclassifier/v2 v2.0.0-pre5 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
|
||||
github.com/google/go-containerregistry v0.12.0 // indirect
|
||||
github.com/google/licenseclassifier/v2 v2.0.0-pre6 // indirect
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.7.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
github.com/gorilla/websocket v1.4.2 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.6.2 // indirect
|
||||
github.com/hashicorp/go-getter v1.7.0 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/hashicorp/go-retryablehttp v0.7.1 // indirect
|
||||
github.com/hashicorp/go-safetemp v1.0.0 // indirect
|
||||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/hhatto/gorst v0.0.0-20181029133204-ca9f730cac5b // indirect
|
||||
github.com/imdario/mergo v0.3.13 // indirect
|
||||
github.com/inconshreveable/log15 v0.0.0-20201112154412-8562bdadbbac // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgconn v1.12.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
github.com/inconshreveable/log15 v3.0.0-testing.5+incompatible // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/jackc/pgpassfile v1.0.0 // indirect
|
||||
github.com/jackc/pgproto3/v2 v2.3.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b // indirect
|
||||
github.com/jackc/pgtype v1.11.0 // indirect
|
||||
github.com/jackc/pgx/v4 v4.16.1 // indirect
|
||||
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
|
||||
github.com/jdkato/prose v1.1.0 // indirect
|
||||
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
|
||||
github.com/jackc/pgx/v5 v5.3.1 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.1.0 // indirect
|
||||
github.com/klauspost/compress v1.15.6 // indirect
|
||||
github.com/lib/pq v1.10.5 // indirect
|
||||
github.com/magiconair/properties v1.8.6 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/liamg/jfather v0.0.7 // indirect
|
||||
github.com/magiconair/properties v1.8.7 // indirect
|
||||
github.com/masahiro331/go-mvn-version v0.0.0-20210429150710-d3157d602a08 // indirect
|
||||
github.com/mattn/go-colorable v0.1.12 // indirect
|
||||
github.com/mattn/go-isatty v0.0.14 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.13 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.14 // indirect
|
||||
github.com/masahiro331/go-xfs-filesystem v0.0.0-20221127135739-051c25f1becd // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.18 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.0.0 // indirect
|
||||
github.com/mitchellh/go-testing-interface v1.14.1 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/montanaflynn/stats v0.0.0-20151014174947-eeaced052adb // indirect
|
||||
github.com/nsf/termbox-go v1.1.1 // indirect
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20220303224323-02efb9a75ee1 // indirect
|
||||
github.com/pelletier/go-toml v1.9.5 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.2 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.7 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/rivo/uniseg v0.3.1 // indirect
|
||||
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rogpeppe/go-internal v1.8.1 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/sergi/go-diff v1.2.0 // indirect
|
||||
github.com/shogo82148/go-shuffle v0.0.0-20170808115208-59829097ff3b // indirect
|
||||
github.com/spf13/afero v1.9.2 // indirect
|
||||
github.com/samber/lo v1.33.0 // indirect
|
||||
github.com/sergi/go-diff v1.3.1 // indirect
|
||||
github.com/smartystreets/assertions v1.13.0 // indirect
|
||||
github.com/spdx/tools-golang v0.3.0 // indirect
|
||||
github.com/spf13/afero v1.9.5 // indirect
|
||||
github.com/spf13/cast v1.5.0 // indirect
|
||||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/spf13/viper v1.12.0 // indirect
|
||||
github.com/stretchr/objx v0.4.0 // indirect
|
||||
github.com/stretchr/testify v1.8.0 // indirect
|
||||
github.com/subosito/gotenv v1.4.0 // indirect
|
||||
github.com/ulikunitz/xz v0.5.10 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.uber.org/atomic v1.7.0 // indirect
|
||||
github.com/spf13/viper v1.15.0 // indirect
|
||||
github.com/stretchr/objx v0.5.0 // indirect
|
||||
github.com/stretchr/testify v1.8.2 // indirect
|
||||
github.com/subosito/gotenv v1.4.2 // indirect
|
||||
github.com/ulikunitz/xz v0.5.11 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
go.opencensus.io v0.24.0 // indirect
|
||||
go.uber.org/atomic v1.10.0 // indirect
|
||||
go.uber.org/goleak v1.1.12 // indirect
|
||||
go.uber.org/multierr v1.7.0 // indirect
|
||||
go.uber.org/zap v1.21.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa // indirect
|
||||
golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 // indirect
|
||||
golang.org/x/net v0.0.0-20220802222814-0bcc04d9c69b // indirect
|
||||
golang.org/x/sys v0.0.0-20220731174439-a90be440212d // indirect
|
||||
golang.org/x/term v0.0.0-20220526004731-065cf7ba2467 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20220722155302-e5dcc9cfc0b9 // indirect
|
||||
gonum.org/v1/gonum v0.7.0 // indirect
|
||||
google.golang.org/api v0.81.0 // indirect
|
||||
go.uber.org/multierr v1.8.0 // indirect
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/crypto v0.9.0 // indirect
|
||||
golang.org/x/mod v0.10.0 // indirect
|
||||
golang.org/x/net v0.10.0 // indirect
|
||||
golang.org/x/sys v0.8.0 // indirect
|
||||
golang.org/x/term v0.8.0 // indirect
|
||||
golang.org/x/tools v0.9.1 // indirect
|
||||
google.golang.org/api v0.107.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220624142145-8cd45d7dbd1f // indirect
|
||||
google.golang.org/grpc v1.47.0 // indirect
|
||||
google.golang.org/protobuf v1.28.0 // indirect
|
||||
gopkg.in/ini.v1 v1.66.6 // indirect
|
||||
gopkg.in/neurosnap/sentences.v1 v1.0.6 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
google.golang.org/genproto v0.0.0-20230110181048-76db0878b65f // indirect
|
||||
google.golang.org/grpc v1.53.0 // indirect
|
||||
google.golang.org/protobuf v1.28.1 // indirect
|
||||
gopkg.in/ini.v1 v1.67.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
gorm.io/driver/mysql v1.3.5 // indirect
|
||||
gorm.io/driver/postgres v1.3.8 // indirect
|
||||
gorm.io/driver/sqlite v1.3.6 // indirect
|
||||
gorm.io/gorm v1.23.8 // indirect
|
||||
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
|
||||
gorm.io/driver/mysql v1.5.0 // indirect
|
||||
gorm.io/driver/postgres v1.5.0 // indirect
|
||||
gorm.io/gorm v1.25.0 // indirect
|
||||
k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed // indirect
|
||||
modernc.org/libc v1.22.6 // indirect
|
||||
modernc.org/mathutil v1.5.0 // indirect
|
||||
modernc.org/memory v1.5.0 // indirect
|
||||
modernc.org/sqlite v1.22.1 // indirect
|
||||
moul.io/http2curl v1.0.0 // indirect
|
||||
)
|
||||
|
||||
// See https://github.com/moby/moby/issues/42939#issuecomment-1114255529
|
||||
replace github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
|
||||
|
||||
390
gost/debian.go
390
gost/debian.go
@@ -5,8 +5,12 @@ package gost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
@@ -20,19 +24,16 @@ type Debian struct {
|
||||
Base
|
||||
}
|
||||
|
||||
type packCves struct {
|
||||
packName string
|
||||
isSrcPack bool
|
||||
cves []models.CveContent
|
||||
fixes models.PackageFixStatuses
|
||||
}
|
||||
|
||||
func (deb Debian) supported(major string) bool {
|
||||
_, ok := map[string]string{
|
||||
"7": "wheezy",
|
||||
"8": "jessie",
|
||||
"9": "stretch",
|
||||
"10": "buster",
|
||||
"11": "bullseye",
|
||||
"12": "bookworm",
|
||||
// "13": "trixie",
|
||||
// "14": "forky",
|
||||
}[major]
|
||||
return ok
|
||||
}
|
||||
@@ -45,199 +46,218 @@ func (deb Debian) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Add linux and set the version of running kernel to search Gost.
|
||||
if r.Container.ContainerID == "" {
|
||||
if r.RunningKernel.Version != "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages["linux-image-"+r.RunningKernel.Release]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
}
|
||||
} else {
|
||||
logging.Log.Warnf("Since the exact kernel version is not available, the vulnerability in the linux package is not detected.")
|
||||
if r.RunningKernel.Release == "" {
|
||||
logging.Log.Warnf("Since the exact kernel release is not available, the vulnerability in the kernel package is not detected.")
|
||||
}
|
||||
}
|
||||
|
||||
var stashLinuxPackage models.Package
|
||||
if linux, ok := r.Packages["linux"]; ok {
|
||||
stashLinuxPackage = linux
|
||||
}
|
||||
nFixedCVEs, err := deb.detectCVEsWithFixState(r, "resolved")
|
||||
fixedCVEs, err := deb.detectCVEsWithFixState(r, true)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect fixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
if stashLinuxPackage.Name != "" {
|
||||
r.Packages["linux"] = stashLinuxPackage
|
||||
}
|
||||
nUnfixedCVEs, err := deb.detectCVEsWithFixState(r, "open")
|
||||
unfixedCVEs, err := deb.detectCVEsWithFixState(r, false)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect unfixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
return (nFixedCVEs + nUnfixedCVEs), nil
|
||||
return len(unique(append(fixedCVEs, unfixedCVEs...))), nil
|
||||
}
|
||||
|
||||
func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixStatus string) (nCVEs int, err error) {
|
||||
if fixStatus != "resolved" && fixStatus != "open" {
|
||||
return 0, xerrors.Errorf(`Failed to detectCVEsWithFixState. fixStatus is not allowed except "open" and "resolved"(actual: fixStatus -> %s).`, fixStatus)
|
||||
}
|
||||
|
||||
packCvesList := []packCves{}
|
||||
func (deb Debian) detectCVEsWithFixState(r *models.ScanResult, fixed bool) ([]string, error) {
|
||||
detects := map[string]cveContent{}
|
||||
if deb.driver == nil {
|
||||
url, err := util.URLPathJoin(deb.baseURL, "debian", major(r.Release), "pkgs")
|
||||
urlPrefix, err := util.URLPathJoin(deb.baseURL, "debian", major(r.Release), "pkgs")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
s := "unfixed-cves"
|
||||
if s == "resolved" {
|
||||
s = "fixed-cves"
|
||||
s := "fixed-cves"
|
||||
if !fixed {
|
||||
s = "unfixed-cves"
|
||||
}
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, url, s)
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, urlPrefix, s)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get CVEs via HTTP. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to get CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range responses {
|
||||
debCves := map[string]gostmodels.DebianCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &debCves); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
if !res.request.isSrcPack {
|
||||
continue
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
fixes := []models.PackageFixStatus{}
|
||||
for _, debcve := range debCves {
|
||||
cves = append(cves, *deb.ConvertToModel(&debcve))
|
||||
fixes = append(fixes, checkPackageFixStatus(&debcve)...)
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: res.request.packName,
|
||||
isSrcPack: res.request.isSrcPack,
|
||||
cves: cves,
|
||||
fixes: fixes,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
for _, pack := range r.Packages {
|
||||
cves, fixes, err := deb.getCvesDebianWithfixStatus(fixStatus, major(r.Release), pack.Name)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get CVEs for Package. err: %w", err)
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: false,
|
||||
cves: cves,
|
||||
fixes: fixes,
|
||||
})
|
||||
}
|
||||
|
||||
// SrcPack
|
||||
for _, pack := range r.SrcPackages {
|
||||
cves, fixes, err := deb.getCvesDebianWithfixStatus(fixStatus, major(r.Release), pack.Name)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get CVEs for SrcPackage. err: %w", err)
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: true,
|
||||
cves: cves,
|
||||
fixes: fixes,
|
||||
})
|
||||
}
|
||||
}
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-latest", "linux", "-amd64", "", "-arm64", "", "-i386", "").Replace(res.request.packName)
|
||||
|
||||
delete(r.Packages, "linux")
|
||||
|
||||
for _, p := range packCvesList {
|
||||
for i, cve := range p.cves {
|
||||
v, ok := r.ScannedCves[cve.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(cve)
|
||||
} else {
|
||||
v.CveContents[models.DebianSecurityTracker] = []models.CveContent{cve}
|
||||
v.Confidences = models.Confidences{models.DebianSecurityTrackerMatch}
|
||||
}
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cve.CveID,
|
||||
CveContents: models.NewCveContents(cve),
|
||||
Confidences: models.Confidences{models.DebianSecurityTrackerMatch},
|
||||
}
|
||||
|
||||
if fixStatus == "resolved" {
|
||||
versionRelease := ""
|
||||
if p.isSrcPack {
|
||||
versionRelease = r.SrcPackages[p.packName].Version
|
||||
} else {
|
||||
versionRelease = r.Packages[p.packName].FormatVer()
|
||||
}
|
||||
|
||||
if versionRelease == "" {
|
||||
if deb.isKernelSourcePackage(n) {
|
||||
isRunning := false
|
||||
for _, bn := range r.SrcPackages[res.request.packName].BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isRunning = true
|
||||
break
|
||||
}
|
||||
|
||||
affected, err := isGostDefAffected(versionRelease, p.fixes[i].FixedIn)
|
||||
if err != nil {
|
||||
logging.Log.Debugf("Failed to parse versions: %s, Ver: %s, Gost: %s",
|
||||
err, versionRelease, p.fixes[i].FixedIn)
|
||||
continue
|
||||
}
|
||||
|
||||
if !affected {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
nCVEs++
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
if p.isSrcPack {
|
||||
if srcPack, ok := r.SrcPackages[p.packName]; ok {
|
||||
for _, binName := range srcPack.BinaryNames {
|
||||
if _, ok := r.Packages[binName]; ok {
|
||||
names = append(names, binName)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if p.packName == "linux" {
|
||||
names = append(names, "linux-image-"+r.RunningKernel.Release)
|
||||
} else {
|
||||
names = append(names, p.packName)
|
||||
// To detect vulnerabilities in running kernels only, skip if the kernel is not running.
|
||||
if !isRunning {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if fixStatus == "resolved" {
|
||||
for _, name := range names {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(models.PackageFixStatus{
|
||||
Name: name,
|
||||
FixedIn: p.fixes[i].FixedIn,
|
||||
})
|
||||
cs := map[string]gostmodels.DebianCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &cs); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
for _, content := range deb.detect(cs, models.SrcPackage{Name: res.request.packName, Version: r.SrcPackages[res.request.packName].Version, BinaryNames: r.SrcPackages[res.request.packName].BinaryNames}, models.Kernel{Release: r.RunningKernel.Release, Version: r.Packages[fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)].Version}) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
} else {
|
||||
for _, name := range names {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(models.PackageFixStatus{
|
||||
Name: name,
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
})
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, p := range r.SrcPackages {
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-latest", "linux", "-amd64", "", "-arm64", "", "-i386", "").Replace(p.Name)
|
||||
|
||||
if deb.isKernelSourcePackage(n) {
|
||||
isRunning := false
|
||||
for _, bn := range p.BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isRunning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// To detect vulnerabilities in running kernels only, skip if the kernel is not running.
|
||||
if !isRunning {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
r.ScannedCves[cve.CveID] = v
|
||||
var f func(string, string) (map[string]gostmodels.DebianCVE, error) = deb.driver.GetFixedCvesDebian
|
||||
if !fixed {
|
||||
f = deb.driver.GetUnfixedCvesDebian
|
||||
}
|
||||
cs, err := f(major(r.Release), n)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get CVEs. release: %s, src package: %s, err: %w", major(r.Release), p.Name, err)
|
||||
}
|
||||
for _, content := range deb.detect(cs, p, models.Kernel{Release: r.RunningKernel.Release, Version: r.Packages[fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)].Version}) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nCVEs, nil
|
||||
for _, content := range detects {
|
||||
v, ok := r.ScannedCves[content.cveContent.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(content.cveContent)
|
||||
} else {
|
||||
v.CveContents[models.DebianSecurityTracker] = []models.CveContent{content.cveContent}
|
||||
}
|
||||
v.Confidences.AppendIfMissing(models.DebianSecurityTrackerMatch)
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: content.cveContent.CveID,
|
||||
CveContents: models.NewCveContents(content.cveContent),
|
||||
Confidences: models.Confidences{models.DebianSecurityTrackerMatch},
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range content.fixStatuses {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(s)
|
||||
}
|
||||
r.ScannedCves[content.cveContent.CveID] = v
|
||||
}
|
||||
|
||||
return maps.Keys(detects), nil
|
||||
}
|
||||
|
||||
func isGostDefAffected(versionRelease, gostVersion string) (affected bool, err error) {
|
||||
func (deb Debian) isKernelSourcePackage(pkgname string) bool {
|
||||
switch ss := strings.Split(pkgname, "-"); len(ss) {
|
||||
case 1:
|
||||
return pkgname == "linux"
|
||||
case 2:
|
||||
if ss[0] != "linux" {
|
||||
return false
|
||||
}
|
||||
switch ss[1] {
|
||||
case "grsec":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[1], 64)
|
||||
return err == nil
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (deb Debian) detect(cves map[string]gostmodels.DebianCVE, srcPkg models.SrcPackage, runningKernel models.Kernel) []cveContent {
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-latest", "linux", "-amd64", "", "-arm64", "", "-i386", "").Replace(srcPkg.Name)
|
||||
|
||||
var contents []cveContent
|
||||
for _, cve := range cves {
|
||||
c := cveContent{
|
||||
cveContent: *(Debian{}).ConvertToModel(&cve),
|
||||
}
|
||||
|
||||
for _, p := range cve.Package {
|
||||
for _, r := range p.Release {
|
||||
switch r.Status {
|
||||
case "open", "undetermined":
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if deb.isKernelSourcePackage(n) && bn != fmt.Sprintf("linux-image-%s", runningKernel.Release) {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixState: r.Status,
|
||||
NotFixedYet: true,
|
||||
})
|
||||
}
|
||||
case "resolved":
|
||||
installedVersion := srcPkg.Version
|
||||
patchedVersion := r.FixedVersion
|
||||
|
||||
if deb.isKernelSourcePackage(n) {
|
||||
installedVersion = runningKernel.Version
|
||||
}
|
||||
|
||||
affected, err := deb.isGostDefAffected(installedVersion, patchedVersion)
|
||||
if err != nil {
|
||||
logging.Log.Debugf("Failed to parse versions: %s, Ver: %s, Gost: %s", err, installedVersion, patchedVersion)
|
||||
continue
|
||||
}
|
||||
|
||||
if affected {
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if deb.isKernelSourcePackage(n) && bn != fmt.Sprintf("linux-image-%s", runningKernel.Release) {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixedIn: patchedVersion,
|
||||
})
|
||||
}
|
||||
}
|
||||
default:
|
||||
logging.Log.Debugf("Failed to check vulnerable CVE. err: unknown status: %s", r.Status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(c.fixStatuses) > 0 {
|
||||
contents = append(contents, c)
|
||||
}
|
||||
}
|
||||
return contents
|
||||
}
|
||||
|
||||
func (deb Debian) isGostDefAffected(versionRelease, gostVersion string) (affected bool, err error) {
|
||||
vera, err := debver.NewVersion(versionRelease)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", versionRelease, err)
|
||||
@@ -249,27 +269,6 @@ func isGostDefAffected(versionRelease, gostVersion string) (affected bool, err e
|
||||
return vera.LessThan(verb), nil
|
||||
}
|
||||
|
||||
func (deb Debian) getCvesDebianWithfixStatus(fixStatus, release, pkgName string) ([]models.CveContent, []models.PackageFixStatus, error) {
|
||||
var f func(string, string) (map[string]gostmodels.DebianCVE, error)
|
||||
if fixStatus == "resolved" {
|
||||
f = deb.driver.GetFixedCvesDebian
|
||||
} else {
|
||||
f = deb.driver.GetUnfixedCvesDebian
|
||||
}
|
||||
debCves, err := f(release, pkgName)
|
||||
if err != nil {
|
||||
return nil, nil, xerrors.Errorf("Failed to get CVEs. fixStatus: %s, release: %s, src package: %s, err: %w", fixStatus, release, pkgName, err)
|
||||
}
|
||||
|
||||
cves := []models.CveContent{}
|
||||
fixes := []models.PackageFixStatus{}
|
||||
for _, devbCve := range debCves {
|
||||
cves = append(cves, *deb.ConvertToModel(&devbCve))
|
||||
fixes = append(fixes, checkPackageFixStatus(&devbCve)...)
|
||||
}
|
||||
return cves, fixes, nil
|
||||
}
|
||||
|
||||
// ConvertToModel converts gost model to vuls model
|
||||
func (deb Debian) ConvertToModel(cve *gostmodels.DebianCVE) *models.CveContent {
|
||||
severity := ""
|
||||
@@ -279,34 +278,17 @@ func (deb Debian) ConvertToModel(cve *gostmodels.DebianCVE) *models.CveContent {
|
||||
break
|
||||
}
|
||||
}
|
||||
var optinal map[string]string
|
||||
if cve.Scope != "" {
|
||||
optinal = map[string]string{"attack range": cve.Scope}
|
||||
}
|
||||
return &models.CveContent{
|
||||
Type: models.DebianSecurityTracker,
|
||||
CveID: cve.CveID,
|
||||
Summary: cve.Description,
|
||||
Cvss2Severity: severity,
|
||||
Cvss3Severity: severity,
|
||||
SourceLink: "https://security-tracker.debian.org/tracker/" + cve.CveID,
|
||||
Optional: map[string]string{
|
||||
"attack range": cve.Scope,
|
||||
},
|
||||
SourceLink: fmt.Sprintf("https://security-tracker.debian.org/tracker/%s", cve.CveID),
|
||||
Optional: optinal,
|
||||
}
|
||||
}
|
||||
|
||||
func checkPackageFixStatus(cve *gostmodels.DebianCVE) []models.PackageFixStatus {
|
||||
fixes := []models.PackageFixStatus{}
|
||||
for _, p := range cve.Package {
|
||||
for _, r := range p.Release {
|
||||
f := models.PackageFixStatus{Name: p.PackageName}
|
||||
|
||||
if r.Status == "open" {
|
||||
f.NotFixedYet = true
|
||||
} else {
|
||||
f.FixedIn = r.FixedVersion
|
||||
}
|
||||
|
||||
fixes = append(fixes, f)
|
||||
}
|
||||
}
|
||||
|
||||
return fixes
|
||||
}
|
||||
|
||||
@@ -3,69 +3,348 @@
|
||||
|
||||
package gost
|
||||
|
||||
import "testing"
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
gostmodels "github.com/vulsio/gost/models"
|
||||
)
|
||||
|
||||
func TestDebian_Supported(t *testing.T) {
|
||||
type fields struct {
|
||||
Base Base
|
||||
}
|
||||
type args struct {
|
||||
major string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
args string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "7 is supported",
|
||||
args: "7",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "8 is supported",
|
||||
args: args{
|
||||
major: "8",
|
||||
},
|
||||
args: "8",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "9 is supported",
|
||||
args: args{
|
||||
major: "9",
|
||||
},
|
||||
args: "9",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "10 is supported",
|
||||
args: args{
|
||||
major: "10",
|
||||
},
|
||||
args: "10",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "11 is supported",
|
||||
args: args{
|
||||
major: "11",
|
||||
},
|
||||
args: "11",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "12 is not supported yet",
|
||||
args: args{
|
||||
major: "12",
|
||||
},
|
||||
name: "12 is supported",
|
||||
args: "12",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "13 is not supported yet",
|
||||
args: "13",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "14 is not supported yet",
|
||||
args: "14",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty string is not supported yet",
|
||||
args: args{
|
||||
major: "",
|
||||
},
|
||||
args: "",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
deb := Debian{}
|
||||
if got := deb.supported(tt.args.major); got != tt.want {
|
||||
if got := (Debian{}).supported(tt.args); got != tt.want {
|
||||
t.Errorf("Debian.Supported() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebian_ConvertToModel(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args gostmodels.DebianCVE
|
||||
want models.CveContent
|
||||
}{
|
||||
{
|
||||
name: "gost Debian.ConvertToModel",
|
||||
args: gostmodels.DebianCVE{
|
||||
CveID: "CVE-2022-39260",
|
||||
Scope: "local",
|
||||
Description: "Git is an open source, scalable, distributed revision control system. `git shell` is a restricted login shell that can be used to implement Git's push/pull functionality via SSH. In versions prior to 2.30.6, 2.31.5, 2.32.4, 2.33.5, 2.34.5, 2.35.5, 2.36.3, and 2.37.4, the function that splits the command arguments into an array improperly uses an `int` to represent the number of entries in the array, allowing a malicious actor to intentionally overflow the return value, leading to arbitrary heap writes. Because the resulting array is then passed to `execv()`, it is possible to leverage this attack to gain remote code execution on a victim machine. Note that a victim must first allow access to `git shell` as a login shell in order to be vulnerable to this attack. This problem is patched in versions 2.30.6, 2.31.5, 2.32.4, 2.33.5, 2.34.5, 2.35.5, 2.36.3, and 2.37.4 and users are advised to upgrade to the latest version. Disabling `git shell` access via remote logins is a viable short-term workaround.",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "git",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bookworm",
|
||||
Status: "resolved",
|
||||
FixedVersion: "1:2.38.1-1",
|
||||
Urgency: "not yet assigned",
|
||||
Version: "1:2.39.2-1.1",
|
||||
},
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "resolved",
|
||||
FixedVersion: "1:2.30.2-1+deb11u1",
|
||||
Urgency: "not yet assigned",
|
||||
Version: "1:2.30.2-1",
|
||||
},
|
||||
{
|
||||
ProductName: "buster",
|
||||
Status: "resolved",
|
||||
FixedVersion: "1:2.20.1-2+deb10u5",
|
||||
Urgency: "not yet assigned",
|
||||
Version: "1:2.20.1-2+deb10u3",
|
||||
},
|
||||
{
|
||||
ProductName: "sid",
|
||||
Status: "resolved",
|
||||
FixedVersion: "1:2.38.1-1",
|
||||
Urgency: "not yet assigned",
|
||||
Version: "1:2.40.0-1",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
want: models.CveContent{
|
||||
Type: models.DebianSecurityTracker,
|
||||
CveID: "CVE-2022-39260",
|
||||
Summary: "Git is an open source, scalable, distributed revision control system. `git shell` is a restricted login shell that can be used to implement Git's push/pull functionality via SSH. In versions prior to 2.30.6, 2.31.5, 2.32.4, 2.33.5, 2.34.5, 2.35.5, 2.36.3, and 2.37.4, the function that splits the command arguments into an array improperly uses an `int` to represent the number of entries in the array, allowing a malicious actor to intentionally overflow the return value, leading to arbitrary heap writes. Because the resulting array is then passed to `execv()`, it is possible to leverage this attack to gain remote code execution on a victim machine. Note that a victim must first allow access to `git shell` as a login shell in order to be vulnerable to this attack. This problem is patched in versions 2.30.6, 2.31.5, 2.32.4, 2.33.5, 2.34.5, 2.35.5, 2.36.3, and 2.37.4 and users are advised to upgrade to the latest version. Disabling `git shell` access via remote logins is a viable short-term workaround.",
|
||||
Cvss2Severity: "not yet assigned",
|
||||
Cvss3Severity: "not yet assigned",
|
||||
SourceLink: "https://security-tracker.debian.org/tracker/CVE-2022-39260",
|
||||
Optional: map[string]string{"attack range": "local"},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := (Debian{}).ConvertToModel(&tt.args); !reflect.DeepEqual(got, &tt.want) {
|
||||
t.Errorf("Debian.ConvertToModel() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebian_detect(t *testing.T) {
|
||||
type args struct {
|
||||
cves map[string]gostmodels.DebianCVE
|
||||
srcPkg models.SrcPackage
|
||||
runningKernel models.Kernel
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []cveContent
|
||||
}{
|
||||
{
|
||||
name: "fixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.DebianCVE{
|
||||
"CVE-0000-0000": {
|
||||
CveID: "CVE-0000-0000",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "resolved",
|
||||
FixedVersion: "0.0.0-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
CveID: "CVE-0000-0001",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "resolved",
|
||||
FixedVersion: "0.0.0-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.DebianSecurityTracker, CveID: "CVE-0000-0001", SourceLink: "https://security-tracker.debian.org/tracker/CVE-0000-0001"},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unfixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.DebianCVE{
|
||||
"CVE-0000-0000": {
|
||||
CveID: "CVE-0000-0000",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "open",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
CveID: "CVE-0000-0001",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "undetermined",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.DebianSecurityTracker, CveID: "CVE-0000-0000", SourceLink: "https://security-tracker.debian.org/tracker/CVE-0000-0000"},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
}},
|
||||
},
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.DebianSecurityTracker, CveID: "CVE-0000-0001", SourceLink: "https://security-tracker.debian.org/tracker/CVE-0000-0001"},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixState: "undetermined",
|
||||
NotFixedYet: true,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "linux-signed-amd64",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.DebianCVE{
|
||||
"CVE-0000-0000": {
|
||||
CveID: "CVE-0000-0000",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "linux",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "resolved",
|
||||
FixedVersion: "0.0.0-0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
CveID: "CVE-0000-0001",
|
||||
Package: []gostmodels.DebianPackage{
|
||||
{
|
||||
PackageName: "linux",
|
||||
Release: []gostmodels.DebianRelease{
|
||||
{
|
||||
ProductName: "bullseye",
|
||||
Status: "resolved",
|
||||
FixedVersion: "0.0.0-2",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
srcPkg: models.SrcPackage{Name: "linux-signed-amd64", Version: "0.0.0+1", BinaryNames: []string{"linux-image-5.10.0-20-amd64"}},
|
||||
runningKernel: models.Kernel{Release: "5.10.0-20-amd64", Version: "0.0.0-1"},
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.DebianSecurityTracker, CveID: "CVE-0000-0001", SourceLink: "https://security-tracker.debian.org/tracker/CVE-0000-0001"},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "linux-image-5.10.0-20-amd64",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := (Debian{}).detect(tt.args.cves, tt.args.srcPkg, tt.args.runningKernel)
|
||||
slices.SortFunc(got, func(i, j cveContent) bool { return i.cveContent.CveID < j.cveContent.CveID })
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("Debian.detect() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestDebian_isKernelSourcePackage(t *testing.T) {
|
||||
tests := []struct {
|
||||
pkgname string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
pkgname: "linux",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "apt",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-5.10",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-grsec",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-base",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.pkgname, func(t *testing.T) {
|
||||
if got := (Debian{}).isKernelSourcePackage(tt.pkgname); got != tt.want {
|
||||
t.Errorf("Debian.isKernelSourcePackage() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,9 +89,9 @@ func newGostDB(cnf config.VulnDictInterface) (gostdb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := gostdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), gostdb.Option{})
|
||||
driver, err := gostdb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), gostdb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, gostdb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init gost DB. SQLite3: %s is locked. err: %w", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init gost DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -4,17 +4,23 @@
|
||||
package gost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/hashicorp/go-version"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
gostmodels "github.com/vulsio/gost/models"
|
||||
)
|
||||
|
||||
@@ -23,123 +29,256 @@ type Microsoft struct {
|
||||
Base
|
||||
}
|
||||
|
||||
var kbIDPattern = regexp.MustCompile(`KB(\d{6,7})`)
|
||||
|
||||
// DetectCVEs fills cve information that has in Gost
|
||||
func (ms Microsoft) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
if ms.driver == nil {
|
||||
return 0, nil
|
||||
var applied, unapplied []string
|
||||
if r.WindowsKB != nil {
|
||||
applied = r.WindowsKB.Applied
|
||||
unapplied = r.WindowsKB.Unapplied
|
||||
}
|
||||
if ms.driver == nil {
|
||||
u, err := util.URLPathJoin(ms.baseURL, "microsoft", "kbs")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
var osName string
|
||||
osName, ok := r.Optional["OSName"].(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("This Windows has wrong type option(OSName). UUID: %s", r.ServerUUID)
|
||||
content := map[string]interface{}{"applied": applied, "unapplied": unapplied}
|
||||
var body []byte
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
f := func() error {
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Post(u).SendStruct(content).Type("json").EndBytes()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("HTTP POST error. url: %s, resp: %v, err: %+v", u, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed to HTTP POST. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
if err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify); err != nil {
|
||||
return 0, xerrors.Errorf("HTTP Error: %w", err)
|
||||
}
|
||||
|
||||
var r struct {
|
||||
Applied []string `json:"applied"`
|
||||
Unapplied []string `json:"unapplied"`
|
||||
}
|
||||
if err := json.Unmarshal(body, &r); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
applied = r.Applied
|
||||
unapplied = r.Unapplied
|
||||
} else {
|
||||
applied, unapplied, err = ms.driver.GetExpandKB(applied, unapplied)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect CVEs. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var products []string
|
||||
if _, ok := r.Optional["InstalledProducts"]; ok {
|
||||
switch ps := r.Optional["InstalledProducts"].(type) {
|
||||
case []interface{}:
|
||||
for _, p := range ps {
|
||||
pname, ok := p.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip products: %v", p)
|
||||
continue
|
||||
}
|
||||
products = append(products, pname)
|
||||
}
|
||||
case []string:
|
||||
for _, p := range ps {
|
||||
products = append(products, p)
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(InstalledProducts). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
}
|
||||
|
||||
applied, unapplied := map[string]struct{}{}, map[string]struct{}{}
|
||||
if _, ok := r.Optional["KBID"]; ok {
|
||||
switch kbIDs := r.Optional["KBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
unapplied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
unapplied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(KBID). UUID: %s", r.ServerUUID)
|
||||
if ms.driver == nil {
|
||||
u, err := util.URLPathJoin(ms.baseURL, "microsoft", "products")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
for _, pkg := range r.Packages {
|
||||
matches := kbIDPattern.FindAllStringSubmatch(pkg.Name, -1)
|
||||
for _, match := range matches {
|
||||
applied[match[1]] = struct{}{}
|
||||
content := map[string]interface{}{"release": r.Release, "kbs": append(applied, unapplied...)}
|
||||
var body []byte
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
f := func() error {
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Post(u).SendStruct(content).Type("json").EndBytes()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("HTTP POST error. url: %s, resp: %v, err: %+v", u, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed to HTTP POST. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
if err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify); err != nil {
|
||||
return 0, xerrors.Errorf("HTTP Error: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &products); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
} else {
|
||||
switch kbIDs := r.Optional["AppliedKBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
applied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
applied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(AppliedKBID). UUID: %s", r.ServerUUID)
|
||||
}
|
||||
|
||||
switch kbIDs := r.Optional["UnappliedKBID"].(type) {
|
||||
case []interface{}:
|
||||
for _, kbID := range kbIDs {
|
||||
s, ok := kbID.(string)
|
||||
if !ok {
|
||||
logging.Log.Warnf("skip KBID: %v", kbID)
|
||||
continue
|
||||
}
|
||||
unapplied[strings.TrimPrefix(s, "KB")] = struct{}{}
|
||||
}
|
||||
case []string:
|
||||
for _, kbID := range kbIDs {
|
||||
unapplied[strings.TrimPrefix(kbID, "KB")] = struct{}{}
|
||||
}
|
||||
case nil:
|
||||
logging.Log.Warnf("This Windows has no option(UnappliedKBID). UUID: %s", r.ServerUUID)
|
||||
ps, err := ms.driver.GetRelatedProducts(r.Release, append(applied, unapplied...))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect CVEs. err: %w", err)
|
||||
}
|
||||
products = ps
|
||||
}
|
||||
|
||||
logging.Log.Debugf(`GetCvesByMicrosoftKBID query body {"osName": %s, "installedProducts": %q, "applied": %q, "unapplied: %q"}`, osName, products, maps.Keys(applied), maps.Keys(unapplied))
|
||||
cves, err := ms.driver.GetCvesByMicrosoftKBID(osName, products, maps.Keys(applied), maps.Keys(unapplied))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect CVEs. err: %w", err)
|
||||
m := map[string]struct{}{}
|
||||
for _, p := range products {
|
||||
m[p] = struct{}{}
|
||||
}
|
||||
for _, n := range []string{"Microsoft Edge (Chromium-based)", fmt.Sprintf("Microsoft Edge on %s", r.Release), fmt.Sprintf("Microsoft Edge (Chromium-based) in IE Mode on %s", r.Release), fmt.Sprintf("Microsoft Edge (EdgeHTML-based) on %s", r.Release)} {
|
||||
delete(m, n)
|
||||
}
|
||||
filtered := []string{r.Release}
|
||||
for _, p := range r.Packages {
|
||||
switch p.Name {
|
||||
case "Microsoft Edge":
|
||||
if ss := strings.Split(p.Version, "."); len(ss) > 0 {
|
||||
v, err := strconv.ParseInt(ss[0], 10, 8)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if v > 44 {
|
||||
filtered = append(filtered, "Microsoft Edge (Chromium-based)", fmt.Sprintf("Microsoft Edge on %s", r.Release), fmt.Sprintf("Microsoft Edge (Chromium-based) in IE Mode on %s", r.Release))
|
||||
} else {
|
||||
filtered = append(filtered, fmt.Sprintf("Microsoft Edge on %s", r.Release), fmt.Sprintf("Microsoft Edge (EdgeHTML-based) on %s", r.Release))
|
||||
}
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
filtered = unique(append(filtered, maps.Keys(m)...))
|
||||
|
||||
var cves map[string]gostmodels.MicrosoftCVE
|
||||
if ms.driver == nil {
|
||||
u, err := util.URLPathJoin(ms.baseURL, "microsoft", "filtered-cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
|
||||
content := map[string]interface{}{"products": filtered, "kbs": append(applied, unapplied...)}
|
||||
var body []byte
|
||||
var errs []error
|
||||
var resp *http.Response
|
||||
f := func() error {
|
||||
resp, body, errs = gorequest.New().Timeout(10 * time.Second).Post(u).SendStruct(content).Type("json").EndBytes()
|
||||
if 0 < len(errs) || resp == nil || resp.StatusCode != 200 {
|
||||
return xerrors.Errorf("HTTP POST error. url: %s, resp: %v, err: %+v", u, resp, errs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
notify := func(err error, t time.Duration) {
|
||||
logging.Log.Warnf("Failed to HTTP POST. retrying in %s seconds. err: %+v", t, err)
|
||||
}
|
||||
if err := backoff.RetryNotify(f, backoff.NewExponentialBackOff(), notify); err != nil {
|
||||
return 0, xerrors.Errorf("HTTP Error: %w", err)
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(body, &cves); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to Unmarshal. body: %s, err: %w", body, err)
|
||||
}
|
||||
} else {
|
||||
cves, err = ms.driver.GetFilteredCvesMicrosoft(filtered, append(applied, unapplied...))
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect CVEs. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for cveID, cve := range cves {
|
||||
var ps []gostmodels.MicrosoftProduct
|
||||
for _, p := range cve.Products {
|
||||
if len(p.KBs) == 0 {
|
||||
ps = append(ps, p)
|
||||
continue
|
||||
}
|
||||
|
||||
var kbs []gostmodels.MicrosoftKB
|
||||
for _, kb := range p.KBs {
|
||||
if _, err := strconv.Atoi(kb.Article); err != nil {
|
||||
switch {
|
||||
case strings.HasPrefix(p.Name, "Microsoft Edge"):
|
||||
p, ok := r.Packages["Microsoft Edge"]
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
|
||||
if kb.FixedBuild == "" {
|
||||
kbs = append(kbs, kb)
|
||||
break
|
||||
}
|
||||
|
||||
vera, err := version.NewVersion(p.Version)
|
||||
if err != nil {
|
||||
kbs = append(kbs, kb)
|
||||
break
|
||||
}
|
||||
verb, err := version.NewVersion(kb.FixedBuild)
|
||||
if err != nil {
|
||||
kbs = append(kbs, kb)
|
||||
break
|
||||
}
|
||||
if vera.LessThan(verb) {
|
||||
kbs = append(kbs, kb)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if slices.Contains(applied, kb.Article) {
|
||||
kbs = []gostmodels.MicrosoftKB{}
|
||||
break
|
||||
}
|
||||
if slices.Contains(unapplied, kb.Article) {
|
||||
kbs = append(kbs, kb)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(kbs) > 0 {
|
||||
p.KBs = kbs
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
cve.Products = ps
|
||||
if len(cve.Products) == 0 {
|
||||
continue
|
||||
}
|
||||
nCVEs++
|
||||
|
||||
cveCont, mitigations := ms.ConvertToModel(&cve)
|
||||
uniqKB := map[string]struct{}{}
|
||||
var stats models.PackageFixStatuses
|
||||
for _, p := range cve.Products {
|
||||
for _, kb := range p.KBs {
|
||||
if _, err := strconv.Atoi(kb.Article); err == nil {
|
||||
uniqKB[fmt.Sprintf("KB%s", kb.Article)] = struct{}{}
|
||||
if _, err := strconv.Atoi(kb.Article); err != nil {
|
||||
switch {
|
||||
case strings.HasPrefix(p.Name, "Microsoft Edge"):
|
||||
s := models.PackageFixStatus{
|
||||
Name: "Microsoft Edge",
|
||||
FixState: "fixed",
|
||||
FixedIn: kb.FixedBuild,
|
||||
}
|
||||
if kb.FixedBuild == "" {
|
||||
s.FixState = "unknown"
|
||||
}
|
||||
stats = append(stats, s)
|
||||
default:
|
||||
stats = append(stats, models.PackageFixStatus{
|
||||
Name: p.Name,
|
||||
FixState: "unknown",
|
||||
FixedIn: kb.FixedBuild,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
uniqKB[kb.Article] = struct{}{}
|
||||
uniqKB[fmt.Sprintf("KB%s", kb.Article)] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(uniqKB) == 0 && len(stats) == 0 {
|
||||
for _, p := range cve.Products {
|
||||
switch {
|
||||
case strings.HasPrefix(p.Name, "Microsoft Edge"):
|
||||
stats = append(stats, models.PackageFixStatus{
|
||||
Name: "Microsoft Edge",
|
||||
FixState: "unknown",
|
||||
})
|
||||
default:
|
||||
stats = append(stats, models.PackageFixStatus{
|
||||
Name: p.Name,
|
||||
FixState: "unknown",
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
advisories := []models.DistroAdvisory{}
|
||||
for kb := range uniqKB {
|
||||
advisories = append(advisories, models.DistroAdvisory{
|
||||
@@ -149,14 +288,16 @@ func (ms Microsoft) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err err
|
||||
}
|
||||
|
||||
r.ScannedCves[cveID] = models.VulnInfo{
|
||||
CveID: cveID,
|
||||
Confidences: models.Confidences{models.WindowsUpdateSearch},
|
||||
DistroAdvisories: advisories,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Mitigations: mitigations,
|
||||
CveID: cveID,
|
||||
Confidences: models.Confidences{models.WindowsUpdateSearch},
|
||||
DistroAdvisories: advisories,
|
||||
CveContents: models.NewCveContents(*cveCont),
|
||||
Mitigations: mitigations,
|
||||
AffectedPackages: stats,
|
||||
WindowsKBFixedIns: maps.Keys(uniqKB),
|
||||
}
|
||||
}
|
||||
return len(cves), nil
|
||||
return nCVEs, nil
|
||||
}
|
||||
|
||||
// ConvertToModel converts gost model to vuls model
|
||||
|
||||
@@ -32,7 +32,7 @@ func (red RedHat) DetectCVEs(r *models.ScanResult, ignoreWillNotFix bool) (nCVEs
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, prefix)
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, prefix, "unfixed-cves")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
|
||||
413
gost/ubuntu.go
413
gost/ubuntu.go
@@ -5,8 +5,12 @@ package gost
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
@@ -22,150 +26,267 @@ type Ubuntu struct {
|
||||
|
||||
func (ubu Ubuntu) supported(version string) bool {
|
||||
_, ok := map[string]string{
|
||||
"606": "dapper",
|
||||
"610": "edgy",
|
||||
"704": "feisty",
|
||||
"710": "gutsy",
|
||||
"804": "hardy",
|
||||
"810": "intrepid",
|
||||
"904": "jaunty",
|
||||
"910": "karmic",
|
||||
"1004": "lucid",
|
||||
"1010": "maverick",
|
||||
"1104": "natty",
|
||||
"1110": "oneiric",
|
||||
"1204": "precise",
|
||||
"1210": "quantal",
|
||||
"1304": "raring",
|
||||
"1310": "saucy",
|
||||
"1404": "trusty",
|
||||
"1410": "utopic",
|
||||
"1504": "vivid",
|
||||
"1510": "wily",
|
||||
"1604": "xenial",
|
||||
"1610": "yakkety",
|
||||
"1704": "zesty",
|
||||
"1710": "artful",
|
||||
"1804": "bionic",
|
||||
"1810": "cosmic",
|
||||
"1904": "disco",
|
||||
"1910": "eoan",
|
||||
"2004": "focal",
|
||||
"2010": "groovy",
|
||||
"2104": "hirsute",
|
||||
"2110": "impish",
|
||||
"2204": "jammy",
|
||||
"2210": "kinetic",
|
||||
"2304": "lunar",
|
||||
}[version]
|
||||
return ok
|
||||
}
|
||||
|
||||
type cveContent struct {
|
||||
cveContent models.CveContent
|
||||
fixStatuses models.PackageFixStatuses
|
||||
}
|
||||
|
||||
// DetectCVEs fills cve information that has in Gost
|
||||
func (ubu Ubuntu) DetectCVEs(r *models.ScanResult, _ bool) (nCVEs int, err error) {
|
||||
ubuReleaseVer := strings.Replace(r.Release, ".", "", 1)
|
||||
if !ubu.supported(ubuReleaseVer) {
|
||||
if !ubu.supported(strings.Replace(r.Release, ".", "", 1)) {
|
||||
logging.Log.Warnf("Ubuntu %s is not supported yet", r.Release)
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
// Add linux and set the version of running kernel to search Gost.
|
||||
if r.Container.ContainerID == "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages[linuxImage]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
if r.RunningKernel.Release == "" {
|
||||
logging.Log.Warnf("Since the exact kernel release is not available, the vulnerability in the kernel package is not detected.")
|
||||
}
|
||||
}
|
||||
|
||||
packCvesList := []packCves{}
|
||||
fixedCVEs, err := ubu.detectCVEsWithFixState(r, true)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect fixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
unfixedCVEs, err := ubu.detectCVEsWithFixState(r, false)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to detect unfixed CVEs. err: %w", err)
|
||||
}
|
||||
|
||||
return len(unique(append(fixedCVEs, unfixedCVEs...))), nil
|
||||
}
|
||||
|
||||
func (ubu Ubuntu) detectCVEsWithFixState(r *models.ScanResult, fixed bool) ([]string, error) {
|
||||
detects := map[string]cveContent{}
|
||||
if ubu.driver == nil {
|
||||
url, err := util.URLPathJoin(ubu.baseURL, "ubuntu", ubuReleaseVer, "pkgs")
|
||||
urlPrefix, err := util.URLPathJoin(ubu.baseURL, "ubuntu", strings.Replace(r.Release, ".", "", 1), "pkgs")
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to join URLPath. err: %w", err)
|
||||
}
|
||||
responses, err := getAllUnfixedCvesViaHTTP(r, url)
|
||||
s := "fixed-cves"
|
||||
if !fixed {
|
||||
s = "unfixed-cves"
|
||||
}
|
||||
responses, err := getCvesWithFixStateViaHTTP(r, urlPrefix, s)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs via HTTP. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to get fixed CVEs via HTTP. err: %w", err)
|
||||
}
|
||||
|
||||
for _, res := range responses {
|
||||
ubuCves := map[string]gostmodels.UbuntuCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &ubuCves); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
if !res.request.isSrcPack {
|
||||
continue
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(res.request.packName)
|
||||
|
||||
if ubu.isKernelSourcePackage(n) {
|
||||
isRunning := false
|
||||
for _, bn := range r.SrcPackages[res.request.packName].BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isRunning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// To detect vulnerabilities in running kernels only, skip if the kernel is not running.
|
||||
if !isRunning {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
cs := map[string]gostmodels.UbuntuCVE{}
|
||||
if err := json.Unmarshal([]byte(res.json), &cs); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to unmarshal json. err: %w", err)
|
||||
}
|
||||
for _, content := range ubu.detect(cs, fixed, models.SrcPackage{Name: res.request.packName, Version: r.SrcPackages[res.request.packName].Version, BinaryNames: r.SrcPackages[res.request.packName].BinaryNames}, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: res.request.packName,
|
||||
isSrcPack: res.request.isSrcPack,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
} else {
|
||||
for _, pack := range r.Packages {
|
||||
ubuCves, err := ubu.driver.GetUnfixedCvesUbuntu(ubuReleaseVer, pack.Name)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs For Package. err: %w", err)
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: false,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
for _, p := range r.SrcPackages {
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(p.Name)
|
||||
|
||||
// SrcPack
|
||||
for _, pack := range r.SrcPackages {
|
||||
ubuCves, err := ubu.driver.GetUnfixedCvesUbuntu(ubuReleaseVer, pack.Name)
|
||||
if ubu.isKernelSourcePackage(n) {
|
||||
isRunning := false
|
||||
for _, bn := range p.BinaryNames {
|
||||
if bn == fmt.Sprintf("linux-image-%s", r.RunningKernel.Release) {
|
||||
isRunning = true
|
||||
break
|
||||
}
|
||||
}
|
||||
// To detect vulnerabilities in running kernels only, skip if the kernel is not running.
|
||||
if !isRunning {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
var f func(string, string) (map[string]gostmodels.UbuntuCVE, error) = ubu.driver.GetFixedCvesUbuntu
|
||||
if !fixed {
|
||||
f = ubu.driver.GetUnfixedCvesUbuntu
|
||||
}
|
||||
cs, err := f(strings.Replace(r.Release, ".", "", 1), n)
|
||||
if err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Unfixed CVEs For SrcPackage. err: %w", err)
|
||||
return nil, xerrors.Errorf("Failed to get CVEs. release: %s, src package: %s, err: %w", major(r.Release), p.Name, err)
|
||||
}
|
||||
cves := []models.CveContent{}
|
||||
for _, ubucve := range ubuCves {
|
||||
cves = append(cves, *ubu.ConvertToModel(&ubucve))
|
||||
for _, content := range ubu.detect(cs, fixed, p, fmt.Sprintf("linux-image-%s", r.RunningKernel.Release)) {
|
||||
c, ok := detects[content.cveContent.CveID]
|
||||
if ok {
|
||||
content.fixStatuses = append(content.fixStatuses, c.fixStatuses...)
|
||||
}
|
||||
detects[content.cveContent.CveID] = content
|
||||
}
|
||||
packCvesList = append(packCvesList, packCves{
|
||||
packName: pack.Name,
|
||||
isSrcPack: true,
|
||||
cves: cves,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
delete(r.Packages, "linux")
|
||||
|
||||
for _, p := range packCvesList {
|
||||
for _, cve := range p.cves {
|
||||
v, ok := r.ScannedCves[cve.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(cve)
|
||||
} else {
|
||||
v.CveContents[models.UbuntuAPI] = []models.CveContent{cve}
|
||||
}
|
||||
for _, content := range detects {
|
||||
v, ok := r.ScannedCves[content.cveContent.CveID]
|
||||
if ok {
|
||||
if v.CveContents == nil {
|
||||
v.CveContents = models.NewCveContents(content.cveContent)
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: cve.CveID,
|
||||
CveContents: models.NewCveContents(cve),
|
||||
Confidences: models.Confidences{models.UbuntuAPIMatch},
|
||||
}
|
||||
nCVEs++
|
||||
v.CveContents[models.UbuntuAPI] = []models.CveContent{content.cveContent}
|
||||
}
|
||||
v.Confidences.AppendIfMissing(models.UbuntuAPIMatch)
|
||||
} else {
|
||||
v = models.VulnInfo{
|
||||
CveID: content.cveContent.CveID,
|
||||
CveContents: models.NewCveContents(content.cveContent),
|
||||
Confidences: models.Confidences{models.UbuntuAPIMatch},
|
||||
}
|
||||
}
|
||||
|
||||
names := []string{}
|
||||
if p.isSrcPack {
|
||||
if srcPack, ok := r.SrcPackages[p.packName]; ok {
|
||||
for _, binName := range srcPack.BinaryNames {
|
||||
if _, ok := r.Packages[binName]; ok {
|
||||
names = append(names, binName)
|
||||
for _, s := range content.fixStatuses {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(s)
|
||||
}
|
||||
r.ScannedCves[content.cveContent.CveID] = v
|
||||
}
|
||||
|
||||
return maps.Keys(detects), nil
|
||||
}
|
||||
|
||||
func (ubu Ubuntu) detect(cves map[string]gostmodels.UbuntuCVE, fixed bool, srcPkg models.SrcPackage, runningKernelBinaryPkgName string) []cveContent {
|
||||
n := strings.NewReplacer("linux-signed", "linux", "linux-meta", "linux").Replace(srcPkg.Name)
|
||||
|
||||
var contents []cveContent
|
||||
for _, cve := range cves {
|
||||
c := cveContent{
|
||||
cveContent: *(Ubuntu{}).ConvertToModel(&cve),
|
||||
}
|
||||
|
||||
if fixed {
|
||||
for _, p := range cve.Patches {
|
||||
for _, rp := range p.ReleasePatches {
|
||||
installedVersion := srcPkg.Version
|
||||
patchedVersion := rp.Note
|
||||
|
||||
// https://git.launchpad.net/ubuntu-cve-tracker/tree/scripts/generate-oval#n384
|
||||
if ubu.isKernelSourcePackage(n) && strings.HasPrefix(srcPkg.Name, "linux-meta") {
|
||||
// 5.15.0.1026.30~20.04.16 -> 5.15.0.1026
|
||||
ss := strings.Split(installedVersion, ".")
|
||||
if len(ss) >= 4 {
|
||||
installedVersion = strings.Join(ss[:4], ".")
|
||||
}
|
||||
|
||||
// 5.15.0-1026.30~20.04.16 -> 5.15.0.1026
|
||||
lhs, rhs, ok := strings.Cut(patchedVersion, "-")
|
||||
if ok {
|
||||
patchedVersion = fmt.Sprintf("%s.%s", lhs, strings.Split(rhs, ".")[0])
|
||||
}
|
||||
}
|
||||
|
||||
affected, err := ubu.isGostDefAffected(installedVersion, patchedVersion)
|
||||
if err != nil {
|
||||
logging.Log.Debugf("Failed to parse versions: %s, Ver: %s, Gost: %s", err, installedVersion, patchedVersion)
|
||||
continue
|
||||
}
|
||||
|
||||
if affected {
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if ubu.isKernelSourcePackage(n) && bn != runningKernelBinaryPkgName {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixedIn: patchedVersion,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if p.packName == "linux" {
|
||||
names = append(names, linuxImage)
|
||||
} else {
|
||||
names = append(names, p.packName)
|
||||
}
|
||||
}
|
||||
|
||||
for _, name := range names {
|
||||
v.AffectedPackages = v.AffectedPackages.Store(models.PackageFixStatus{
|
||||
Name: name,
|
||||
} else {
|
||||
for _, bn := range srcPkg.BinaryNames {
|
||||
if ubu.isKernelSourcePackage(n) && bn != runningKernelBinaryPkgName {
|
||||
continue
|
||||
}
|
||||
c.fixStatuses = append(c.fixStatuses, models.PackageFixStatus{
|
||||
Name: bn,
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
})
|
||||
}
|
||||
r.ScannedCves[cve.CveID] = v
|
||||
}
|
||||
|
||||
if len(c.fixStatuses) > 0 {
|
||||
c.fixStatuses.Sort()
|
||||
contents = append(contents, c)
|
||||
}
|
||||
}
|
||||
return nCVEs, nil
|
||||
return contents
|
||||
}
|
||||
|
||||
func (ubu Ubuntu) isGostDefAffected(versionRelease, gostVersion string) (affected bool, err error) {
|
||||
vera, err := debver.NewVersion(versionRelease)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", versionRelease, err)
|
||||
}
|
||||
verb, err := debver.NewVersion(gostVersion)
|
||||
if err != nil {
|
||||
return false, xerrors.Errorf("Failed to parse version. version: %s, err: %w", gostVersion, err)
|
||||
}
|
||||
return vera.LessThan(verb), nil
|
||||
}
|
||||
|
||||
// ConvertToModel converts gost model to vuls model
|
||||
@@ -195,8 +316,118 @@ func (ubu Ubuntu) ConvertToModel(cve *gostmodels.UbuntuCVE) *models.CveContent {
|
||||
Summary: cve.Description,
|
||||
Cvss2Severity: cve.Priority,
|
||||
Cvss3Severity: cve.Priority,
|
||||
SourceLink: "https://ubuntu.com/security/" + cve.Candidate,
|
||||
SourceLink: fmt.Sprintf("https://ubuntu.com/security/%s", cve.Candidate),
|
||||
References: references,
|
||||
Published: cve.PublicDate,
|
||||
}
|
||||
}
|
||||
|
||||
// https://git.launchpad.net/ubuntu-cve-tracker/tree/scripts/cve_lib.py#n931
|
||||
func (ubu Ubuntu) isKernelSourcePackage(pkgname string) bool {
|
||||
switch ss := strings.Split(pkgname, "-"); len(ss) {
|
||||
case 1:
|
||||
return pkgname == "linux"
|
||||
case 2:
|
||||
if ss[0] != "linux" {
|
||||
return false
|
||||
}
|
||||
switch ss[1] {
|
||||
case "armadaxp", "mako", "manta", "flo", "goldfish", "joule", "raspi", "raspi2", "snapdragon", "aws", "azure", "bluefield", "dell300x", "gcp", "gke", "gkeop", "ibm", "lowlatency", "kvm", "oem", "oracle", "euclid", "hwe", "riscv":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[1], 64)
|
||||
return err == nil
|
||||
}
|
||||
case 3:
|
||||
if ss[0] != "linux" {
|
||||
return false
|
||||
}
|
||||
switch ss[1] {
|
||||
case "ti":
|
||||
return ss[2] == "omap4"
|
||||
case "raspi", "raspi2", "gke", "gkeop", "ibm", "oracle", "riscv":
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
case "aws":
|
||||
switch ss[2] {
|
||||
case "hwe", "edge":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
case "azure":
|
||||
switch ss[2] {
|
||||
case "fde", "edge":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
case "gcp":
|
||||
switch ss[2] {
|
||||
case "edge":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
case "intel":
|
||||
switch ss[2] {
|
||||
case "iotg":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
case "oem":
|
||||
switch ss[2] {
|
||||
case "osp1":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
case "lts":
|
||||
return ss[2] == "xenial"
|
||||
case "hwe":
|
||||
switch ss[2] {
|
||||
case "edge":
|
||||
return true
|
||||
default:
|
||||
_, err := strconv.ParseFloat(ss[2], 64)
|
||||
return err == nil
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
case 4:
|
||||
if ss[0] != "linux" {
|
||||
return false
|
||||
}
|
||||
switch ss[1] {
|
||||
case "azure":
|
||||
if ss[2] != "fde" {
|
||||
return false
|
||||
}
|
||||
_, err := strconv.ParseFloat(ss[3], 64)
|
||||
return err == nil
|
||||
case "intel":
|
||||
if ss[2] != "iotg" {
|
||||
return false
|
||||
}
|
||||
_, err := strconv.ParseFloat(ss[3], 64)
|
||||
return err == nil
|
||||
case "lowlatency":
|
||||
if ss[2] != "hwe" {
|
||||
return false
|
||||
}
|
||||
_, err := strconv.ParseFloat(ss[3], 64)
|
||||
return err == nil
|
||||
default:
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -10,68 +10,51 @@ import (
|
||||
)
|
||||
|
||||
func TestUbuntu_Supported(t *testing.T) {
|
||||
type args struct {
|
||||
ubuReleaseVer string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
args string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "14.04 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "1404",
|
||||
},
|
||||
args: "1404",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "16.04 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "1604",
|
||||
},
|
||||
args: "1604",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "18.04 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "1804",
|
||||
},
|
||||
args: "1804",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "20.04 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "2004",
|
||||
},
|
||||
args: "2004",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "20.10 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "2010",
|
||||
},
|
||||
args: "2010",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "21.04 is supported",
|
||||
args: args{
|
||||
ubuReleaseVer: "2104",
|
||||
},
|
||||
args: "2104",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty string is not supported yet",
|
||||
args: args{
|
||||
ubuReleaseVer: "",
|
||||
},
|
||||
args: "",
|
||||
want: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ubu := Ubuntu{}
|
||||
if got := ubu.supported(tt.args.ubuReleaseVer); got != tt.want {
|
||||
if got := ubu.supported(tt.args); got != tt.want {
|
||||
t.Errorf("Ubuntu.Supported() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
@@ -127,11 +110,222 @@ func TestUbuntuConvertToModel(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
ubu := Ubuntu{}
|
||||
got := ubu.ConvertToModel(&tt.input)
|
||||
if !reflect.DeepEqual(got, &tt.expected) {
|
||||
if got := (Ubuntu{}).ConvertToModel(&tt.input); !reflect.DeepEqual(got, &tt.expected) {
|
||||
t.Errorf("Ubuntu.ConvertToModel() = %#v, want %#v", got, &tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_detect(t *testing.T) {
|
||||
type args struct {
|
||||
cves map[string]gostmodels.UbuntuCVE
|
||||
fixed bool
|
||||
srcPkg models.SrcPackage
|
||||
runningKernelBinaryPkgName string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []cveContent
|
||||
}{
|
||||
{
|
||||
name: "fixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
runningKernelBinaryPkgName: "",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "unfixed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "pkg",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "open"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: false,
|
||||
srcPkg: models.SrcPackage{Name: "pkg", Version: "0.0.0-1", BinaryNames: []string{"pkg"}},
|
||||
runningKernelBinaryPkgName: "",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0000", SourceLink: "https://ubuntu.com/security/CVE-0000-0000", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "pkg",
|
||||
FixState: "open",
|
||||
NotFixedYet: true,
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "linux-signed",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "linux-signed", Version: "0.0.0-1", BinaryNames: []string{"linux-image-generic", "linux-headers-generic"}},
|
||||
runningKernelBinaryPkgName: "linux-image-generic",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "linux-image-generic",
|
||||
FixedIn: "0.0.0-2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "linux-meta",
|
||||
args: args{
|
||||
cves: map[string]gostmodels.UbuntuCVE{
|
||||
"CVE-0000-0000": {
|
||||
Candidate: "CVE-0000-0000",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-0"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
"CVE-0000-0001": {
|
||||
Candidate: "CVE-0000-0001",
|
||||
Patches: []gostmodels.UbuntuPatch{
|
||||
{
|
||||
PackageName: "linux",
|
||||
ReleasePatches: []gostmodels.UbuntuReleasePatch{{ReleaseName: "jammy", Status: "released", Note: "0.0.0-2"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
fixed: true,
|
||||
srcPkg: models.SrcPackage{Name: "linux-meta", Version: "0.0.0.1", BinaryNames: []string{"linux-image-generic", "linux-headers-generic"}},
|
||||
runningKernelBinaryPkgName: "linux-image-generic",
|
||||
},
|
||||
want: []cveContent{
|
||||
{
|
||||
cveContent: models.CveContent{Type: models.UbuntuAPI, CveID: "CVE-0000-0001", SourceLink: "https://ubuntu.com/security/CVE-0000-0001", References: []models.Reference{}},
|
||||
fixStatuses: models.PackageFixStatuses{{
|
||||
Name: "linux-image-generic",
|
||||
FixedIn: "0.0.0.2",
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := (Ubuntu{}).detect(tt.args.cves, tt.args.fixed, tt.args.srcPkg, tt.args.runningKernelBinaryPkgName); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("detect() = %#v, want %#v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestUbuntu_isKernelSourcePackage(t *testing.T) {
|
||||
tests := []struct {
|
||||
pkgname string
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
pkgname: "linux",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "apt",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-aws",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-5.9",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-base",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
pkgname: "apt-utils",
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-aws-edge",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-aws-5.15",
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
pkgname: "linux-lowlatency-hwe-5.15",
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.pkgname, func(t *testing.T) {
|
||||
if got := (Ubuntu{}).isKernelSourcePackage(tt.pkgname); got != tt.want {
|
||||
t.Errorf("Ubuntu.isKernelSourcePackage() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
40
gost/util.go
40
gost/util.go
@@ -9,11 +9,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
"github.com/parnurzeal/gorequest"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
type response struct {
|
||||
@@ -78,15 +80,9 @@ func getCvesViaHTTP(cveIDs []string, urlPrefix string) (
|
||||
}
|
||||
|
||||
type request struct {
|
||||
osMajorVersion string
|
||||
packName string
|
||||
isSrcPack bool
|
||||
cveID string
|
||||
}
|
||||
|
||||
func getAllUnfixedCvesViaHTTP(r *models.ScanResult, urlPrefix string) (
|
||||
responses []response, err error) {
|
||||
return getCvesWithFixStateViaHTTP(r, urlPrefix, "unfixed-cves")
|
||||
packName string
|
||||
isSrcPack bool
|
||||
cveID string
|
||||
}
|
||||
|
||||
func getCvesWithFixStateViaHTTP(r *models.ScanResult, urlPrefix, fixState string) (responses []response, err error) {
|
||||
@@ -101,16 +97,14 @@ func getCvesWithFixStateViaHTTP(r *models.ScanResult, urlPrefix, fixState string
|
||||
go func() {
|
||||
for _, pack := range r.Packages {
|
||||
reqChan <- request{
|
||||
osMajorVersion: major(r.Release),
|
||||
packName: pack.Name,
|
||||
isSrcPack: false,
|
||||
packName: pack.Name,
|
||||
isSrcPack: false,
|
||||
}
|
||||
}
|
||||
for _, pack := range r.SrcPackages {
|
||||
reqChan <- request{
|
||||
osMajorVersion: major(r.Release),
|
||||
packName: pack.Name,
|
||||
isSrcPack: true,
|
||||
packName: pack.Name,
|
||||
isSrcPack: true,
|
||||
}
|
||||
}
|
||||
}()
|
||||
@@ -145,11 +139,11 @@ func getCvesWithFixStateViaHTTP(r *models.ScanResult, urlPrefix, fixState string
|
||||
case err := <-errChan:
|
||||
errs = append(errs, err)
|
||||
case <-timeout:
|
||||
return nil, xerrors.New("Timeout Fetching OVAL")
|
||||
return nil, xerrors.New("Timeout Fetching Gost")
|
||||
}
|
||||
}
|
||||
if len(errs) != 0 {
|
||||
return nil, xerrors.Errorf("Failed to fetch OVAL. err: %w", errs)
|
||||
return nil, xerrors.Errorf("Failed to fetch Gost. err: %w", errs)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -193,3 +187,11 @@ func httpGet(url string, req request, resChan chan<- response, errChan chan<- er
|
||||
func major(osVer string) (majorVersion string) {
|
||||
return strings.Split(osVer, ".")[0]
|
||||
}
|
||||
|
||||
func unique[T comparable](s []T) []T {
|
||||
m := map[T]struct{}{}
|
||||
for _, v := range s {
|
||||
m[v] = struct{}{}
|
||||
}
|
||||
return maps.Keys(m)
|
||||
}
|
||||
|
||||
Submodule integration updated: b40375c4df...1ae07c012e
@@ -15,7 +15,7 @@ import (
|
||||
formatter "github.com/kotakanbe/logrus-prefixed-formatter"
|
||||
)
|
||||
|
||||
//LogOpts has options for logging
|
||||
// LogOpts has options for logging
|
||||
type LogOpts struct {
|
||||
Debug bool `json:"debug,omitempty"`
|
||||
DebugSQL bool `json:"debugSQL,omitempty"`
|
||||
@@ -45,6 +45,13 @@ func NewNormalLogger() Logger {
|
||||
return Logger{Entry: logrus.Entry{Logger: logrus.New()}}
|
||||
}
|
||||
|
||||
// NewIODiscardLogger creates discard logger
|
||||
func NewIODiscardLogger() Logger {
|
||||
l := logrus.New()
|
||||
l.Out = io.Discard
|
||||
return Logger{Entry: logrus.Entry{Logger: l}}
|
||||
}
|
||||
|
||||
// NewCustomLogger creates logrus
|
||||
func NewCustomLogger(debug, quiet, logToFile bool, logDir, logMsgAnsiColor, serverName string) Logger {
|
||||
log := logrus.New()
|
||||
|
||||
@@ -75,7 +75,7 @@ func (v CveContents) PrimarySrcURLs(lang, myFamily, cveID string, confidences Co
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Nvd, NewCveContentType(myFamily), GitHub}
|
||||
order := append(append(CveContentTypes{Nvd}, GetCveContentTypes(myFamily)...), GitHub)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
@@ -133,24 +133,6 @@ func (v CveContents) PatchURLs() (urls []string) {
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
// Severities returns Severities
|
||||
func (v CveContents) Severities(myFamily string) (values []CveContentStr) {
|
||||
order := CveContentTypes{NVD, NewCveContentType(myFamily)}
|
||||
order = append(order, AllCveContetTypes.Except(append(order)...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
if cont, found := v[ctype]; found && 0 < len(cont.Severity) {
|
||||
values = append(values, CveContentStr{
|
||||
Type: ctype,
|
||||
Value: cont.Severity,
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
*/
|
||||
|
||||
// CveContentCpes has CveContentType and Value
|
||||
type CveContentCpes struct {
|
||||
Type CveContentType
|
||||
@@ -159,7 +141,7 @@ type CveContentCpes struct {
|
||||
|
||||
// Cpes returns affected CPEs of this Vulnerability
|
||||
func (v CveContents) Cpes(myFamily string) (values []CveContentCpes) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
@@ -185,7 +167,7 @@ type CveContentRefs struct {
|
||||
|
||||
// References returns References
|
||||
func (v CveContents) References(myFamily string) (values []CveContentRefs) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
|
||||
for _, ctype := range order {
|
||||
@@ -206,7 +188,7 @@ func (v CveContents) References(myFamily string) (values []CveContentRefs) {
|
||||
|
||||
// CweIDs returns related CweIDs of the vulnerability
|
||||
func (v CveContents) CweIDs(myFamily string) (values []CveContentStr) {
|
||||
order := CveContentTypes{NewCveContentType(myFamily)}
|
||||
order := GetCveContentTypes(myFamily)
|
||||
order = append(order, AllCveContetTypes.Except(order...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v[ctype]; found {
|
||||
@@ -352,6 +334,30 @@ func NewCveContentType(name string) CveContentType {
|
||||
}
|
||||
}
|
||||
|
||||
// GetCveContentTypes return CveContentTypes
|
||||
func GetCveContentTypes(family string) []CveContentType {
|
||||
switch family {
|
||||
case constant.RedHat, constant.CentOS, constant.Alma, constant.Rocky:
|
||||
return []CveContentType{RedHat, RedHatAPI}
|
||||
case constant.Fedora:
|
||||
return []CveContentType{Fedora}
|
||||
case constant.Oracle:
|
||||
return []CveContentType{Oracle}
|
||||
case constant.Amazon:
|
||||
return []CveContentType{Amazon}
|
||||
case constant.Debian, constant.Raspbian:
|
||||
return []CveContentType{Debian, DebianSecurityTracker}
|
||||
case constant.Ubuntu:
|
||||
return []CveContentType{Ubuntu, UbuntuAPI}
|
||||
case constant.OpenSUSE, constant.OpenSUSELeap, constant.SUSEEnterpriseServer, constant.SUSEEnterpriseDesktop:
|
||||
return []CveContentType{SUSE}
|
||||
case constant.Windows:
|
||||
return []CveContentType{Microsoft}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
// Nvd is Nvd JSON
|
||||
Nvd CveContentType = "nvd"
|
||||
|
||||
@@ -3,6 +3,8 @@ package models
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
)
|
||||
|
||||
func TestExcept(t *testing.T) {
|
||||
@@ -249,3 +251,61 @@ func TestCveContents_Sort(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNewCveContentType(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
want CveContentType
|
||||
}{
|
||||
{
|
||||
name: "redhat",
|
||||
want: RedHat,
|
||||
},
|
||||
{
|
||||
name: "centos",
|
||||
want: RedHat,
|
||||
},
|
||||
{
|
||||
name: "unknown",
|
||||
want: Unknown,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := NewCveContentType(tt.name); got != tt.want {
|
||||
t.Errorf("NewCveContentType() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetCveContentTypes(t *testing.T) {
|
||||
tests := []struct {
|
||||
family string
|
||||
want []CveContentType
|
||||
}{
|
||||
{
|
||||
family: constant.RedHat,
|
||||
want: []CveContentType{RedHat, RedHatAPI},
|
||||
},
|
||||
{
|
||||
family: constant.Debian,
|
||||
want: []CveContentType{Debian, DebianSecurityTracker},
|
||||
},
|
||||
{
|
||||
family: constant.Ubuntu,
|
||||
want: []CveContentType{Ubuntu, UbuntuAPI},
|
||||
},
|
||||
{
|
||||
family: constant.FreeBSD,
|
||||
want: nil,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.family, func(t *testing.T) {
|
||||
if got := GetCveContentTypes(tt.family); !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("GetCveContentTypes() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
96
models/github.go
Normal file
96
models/github.go
Normal file
@@ -0,0 +1,96 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
ftypes "github.com/aquasecurity/trivy/pkg/fanal/types"
|
||||
)
|
||||
|
||||
// DependencyGraphManifests has a map of DependencyGraphManifest
|
||||
// key: BlobPath
|
||||
type DependencyGraphManifests map[string]DependencyGraphManifest
|
||||
|
||||
// DependencyGraphManifest has filename, repository, dependencies
|
||||
type DependencyGraphManifest struct {
|
||||
BlobPath string `json:"blobPath"`
|
||||
Filename string `json:"filename"`
|
||||
Repository string `json:"repository"`
|
||||
Dependencies []Dependency `json:"dependencies"`
|
||||
}
|
||||
|
||||
// RepoURLFilename should be same format with GitHubSecurityAlert.RepoURLManifestPath()
|
||||
func (m DependencyGraphManifest) RepoURLFilename() string {
|
||||
return fmt.Sprintf("%s/%s", m.Repository, m.Filename)
|
||||
}
|
||||
|
||||
// Ecosystem returns a name of ecosystem(or package manager) of manifest(lock) file in trivy way
|
||||
// https://docs.github.com/en/code-security/supply-chain-security/understanding-your-software-supply-chain/about-the-dependency-graph#supported-package-ecosystems
|
||||
func (m DependencyGraphManifest) Ecosystem() string {
|
||||
switch {
|
||||
case strings.HasSuffix(m.Filename, "Cargo.lock"),
|
||||
strings.HasSuffix(m.Filename, "Cargo.toml"):
|
||||
return ftypes.Cargo // Rust
|
||||
case strings.HasSuffix(m.Filename, "composer.lock"),
|
||||
strings.HasSuffix(m.Filename, "composer.json"):
|
||||
return ftypes.Composer // PHP
|
||||
case strings.HasSuffix(m.Filename, ".csproj"),
|
||||
strings.HasSuffix(m.Filename, ".vbproj"),
|
||||
strings.HasSuffix(m.Filename, ".nuspec"),
|
||||
strings.HasSuffix(m.Filename, ".vcxproj"),
|
||||
strings.HasSuffix(m.Filename, ".fsproj"),
|
||||
strings.HasSuffix(m.Filename, "packages.config"):
|
||||
return ftypes.NuGet // .NET languages (C#, F#, VB), C++
|
||||
case strings.HasSuffix(m.Filename, "go.sum"),
|
||||
strings.HasSuffix(m.Filename, "go.mod"):
|
||||
return ftypes.GoModule // Go
|
||||
case strings.HasSuffix(m.Filename, "pom.xml"):
|
||||
return ftypes.Pom // Java, Scala
|
||||
case strings.HasSuffix(m.Filename, "package-lock.json"),
|
||||
strings.HasSuffix(m.Filename, "package.json"):
|
||||
return ftypes.Npm // JavaScript
|
||||
case strings.HasSuffix(m.Filename, "yarn.lock"):
|
||||
return ftypes.Yarn // JavaScript
|
||||
case strings.HasSuffix(m.Filename, "requirements.txt"),
|
||||
strings.HasSuffix(m.Filename, "requirements-dev.txt"),
|
||||
strings.HasSuffix(m.Filename, "setup.py"):
|
||||
return ftypes.Pip // Python
|
||||
case strings.HasSuffix(m.Filename, "Pipfile.lock"),
|
||||
strings.HasSuffix(m.Filename, "Pipfile"):
|
||||
return ftypes.Pipenv // Python
|
||||
case strings.HasSuffix(m.Filename, "poetry.lock"),
|
||||
strings.HasSuffix(m.Filename, "pyproject.toml"):
|
||||
return ftypes.Poetry // Python
|
||||
case strings.HasSuffix(m.Filename, "Gemfile.lock"),
|
||||
strings.HasSuffix(m.Filename, "Gemfile"):
|
||||
return ftypes.Bundler // Ruby
|
||||
case strings.HasSuffix(m.Filename, ".gemspec"):
|
||||
return ftypes.GemSpec // Ruby
|
||||
case strings.HasSuffix(m.Filename, "pubspec.lock"),
|
||||
strings.HasSuffix(m.Filename, "pubspec.yaml"):
|
||||
return "pub" // Dart
|
||||
case strings.HasSuffix(m.Filename, ".yml"),
|
||||
strings.HasSuffix(m.Filename, ".yaml"):
|
||||
return "actions" // GitHub Actions workflows
|
||||
default:
|
||||
return "unknown"
|
||||
}
|
||||
}
|
||||
|
||||
// Dependency has dependency package information
|
||||
type Dependency struct {
|
||||
PackageName string `json:"packageName"`
|
||||
PackageManager string `json:"packageManager"`
|
||||
Repository string `json:"repository"`
|
||||
Requirements string `json:"requirements"`
|
||||
}
|
||||
|
||||
// Version returns version
|
||||
func (d Dependency) Version() string {
|
||||
s := strings.Split(d.Requirements, " ")
|
||||
if len(s) == 2 && s[0] == "=" {
|
||||
return s[1]
|
||||
}
|
||||
// in case of ranged version
|
||||
return ""
|
||||
}
|
||||
@@ -146,7 +146,9 @@ var FindLockFiles = []string{
|
||||
// gomod
|
||||
ftypes.GoMod, ftypes.GoSum,
|
||||
// java
|
||||
ftypes.MavenPom, "*.jar", "*.war", "*.ear", "*.par",
|
||||
ftypes.MavenPom, "*.jar", "*.war", "*.ear", "*.par", "*gradle.lockfile",
|
||||
// C / C++
|
||||
ftypes.ConanLock,
|
||||
}
|
||||
|
||||
// GetLibraryKey returns target library key
|
||||
@@ -160,7 +162,7 @@ func (s LibraryScanner) GetLibraryKey() string {
|
||||
return "php"
|
||||
case ftypes.GoBinary, ftypes.GoModule:
|
||||
return "gomod"
|
||||
case ftypes.Jar, ftypes.Pom:
|
||||
case ftypes.Jar, ftypes.Pom, ftypes.Gradle:
|
||||
return "java"
|
||||
case ftypes.Npm, ftypes.Yarn, ftypes.Pnpm, ftypes.NodePkg, ftypes.JavaScript:
|
||||
return "node"
|
||||
@@ -168,6 +170,8 @@ func (s LibraryScanner) GetLibraryKey() string {
|
||||
return ".net"
|
||||
case ftypes.Pipenv, ftypes.Poetry, ftypes.Pip, ftypes.PythonPkg:
|
||||
return "python"
|
||||
case ftypes.ConanLock:
|
||||
return "c"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -45,15 +45,17 @@ type ScanResult struct {
|
||||
Errors []string `json:"errors"`
|
||||
Warnings []string `json:"warnings"`
|
||||
|
||||
ScannedCves VulnInfos `json:"scannedCves"`
|
||||
RunningKernel Kernel `json:"runningKernel"`
|
||||
Packages Packages `json:"packages"`
|
||||
SrcPackages SrcPackages `json:",omitempty"`
|
||||
EnabledDnfModules []string `json:"enabledDnfModules,omitempty"` // for dnf modules
|
||||
WordPressPackages WordPressPackages `json:",omitempty"`
|
||||
LibraryScanners LibraryScanners `json:"libraries,omitempty"`
|
||||
CweDict CweDict `json:"cweDict,omitempty"`
|
||||
Optional map[string]interface{} `json:",omitempty"`
|
||||
ScannedCves VulnInfos `json:"scannedCves"`
|
||||
RunningKernel Kernel `json:"runningKernel"`
|
||||
Packages Packages `json:"packages"`
|
||||
SrcPackages SrcPackages `json:",omitempty"`
|
||||
EnabledDnfModules []string `json:"enabledDnfModules,omitempty"` // for dnf modules
|
||||
WordPressPackages WordPressPackages `json:",omitempty"`
|
||||
GitHubManifests DependencyGraphManifests `json:"gitHubManifests,omitempty"`
|
||||
LibraryScanners LibraryScanners `json:"libraries,omitempty"`
|
||||
WindowsKB *WindowsKB `json:"windowsKB,omitempty"`
|
||||
CweDict CweDict `json:"cweDict,omitempty"`
|
||||
Optional map[string]interface{} `json:",omitempty"`
|
||||
Config struct {
|
||||
Scan config.Config `json:"scan"`
|
||||
Report config.Config `json:"report"`
|
||||
@@ -82,6 +84,12 @@ type Kernel struct {
|
||||
RebootRequired bool `json:"rebootRequired"`
|
||||
}
|
||||
|
||||
// WindowsKB has applied and unapplied KBs
|
||||
type WindowsKB struct {
|
||||
Applied []string `json:"applied,omitempty"`
|
||||
Unapplied []string `json:"unapplied,omitempty"`
|
||||
}
|
||||
|
||||
// FilterInactiveWordPressLibs is filter function.
|
||||
func (r *ScanResult) FilterInactiveWordPressLibs(detectInactive bool) {
|
||||
if detectInactive {
|
||||
|
||||
@@ -236,10 +236,13 @@ func (ps PackageFixStatuses) Store(pkg PackageFixStatus) PackageFixStatuses {
|
||||
return ps
|
||||
}
|
||||
|
||||
// Sort by Name
|
||||
// Sort by Name asc, FixedIn desc
|
||||
func (ps PackageFixStatuses) Sort() {
|
||||
sort.Slice(ps, func(i, j int) bool {
|
||||
return ps[i].Name < ps[j].Name
|
||||
if ps[i].Name != ps[j].Name {
|
||||
return ps[i].Name < ps[j].Name
|
||||
}
|
||||
return ps[j].FixedIn < ps[i].FixedIn
|
||||
})
|
||||
}
|
||||
|
||||
@@ -267,6 +270,7 @@ type VulnInfo struct {
|
||||
GitHubSecurityAlerts GitHubSecurityAlerts `json:"gitHubSecurityAlerts,omitempty"`
|
||||
WpPackageFixStats WpPackageFixStats `json:"wpPackageFixStats,omitempty"`
|
||||
LibraryFixedIns LibraryFixedIns `json:"libraryFixedIns,omitempty"`
|
||||
WindowsKBFixedIns []string `json:"windowsKBFixedIns,omitempty"`
|
||||
VulnType string `json:"vulnType,omitempty"`
|
||||
DiffStatus DiffStatus `json:"diffStatus,omitempty"`
|
||||
}
|
||||
@@ -284,7 +288,7 @@ type GitHubSecurityAlerts []GitHubSecurityAlert
|
||||
// Add adds given arg to the slice and return the slice (immutable)
|
||||
func (g GitHubSecurityAlerts) Add(alert GitHubSecurityAlert) GitHubSecurityAlerts {
|
||||
for _, a := range g {
|
||||
if a.PackageName == alert.PackageName {
|
||||
if a.RepoURLPackageName() == alert.RepoURLPackageName() {
|
||||
return g
|
||||
}
|
||||
}
|
||||
@@ -294,19 +298,39 @@ func (g GitHubSecurityAlerts) Add(alert GitHubSecurityAlert) GitHubSecurityAlert
|
||||
// Names return a slice of lib names
|
||||
func (g GitHubSecurityAlerts) Names() (names []string) {
|
||||
for _, a := range g {
|
||||
names = append(names, a.PackageName)
|
||||
names = append(names, a.RepoURLPackageName())
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// GitHubSecurityAlert has detected CVE-ID, PackageName, Status fetched via GitHub API
|
||||
// GitHubSecurityAlert has detected CVE-ID, GSAVulnerablePackage, Status fetched via GitHub API
|
||||
type GitHubSecurityAlert struct {
|
||||
PackageName string `json:"packageName"`
|
||||
FixedIn string `json:"fixedIn"`
|
||||
AffectedRange string `json:"affectedRange"`
|
||||
Dismissed bool `json:"dismissed"`
|
||||
DismissedAt time.Time `json:"dismissedAt"`
|
||||
DismissReason string `json:"dismissReason"`
|
||||
Repository string `json:"repository"`
|
||||
Package GSAVulnerablePackage `json:"package,omitempty"`
|
||||
FixedIn string `json:"fixedIn"`
|
||||
AffectedRange string `json:"affectedRange"`
|
||||
Dismissed bool `json:"dismissed"`
|
||||
DismissedAt time.Time `json:"dismissedAt"`
|
||||
DismissReason string `json:"dismissReason"`
|
||||
}
|
||||
|
||||
// RepoURLPackageName returns a string connecting the repository and package name
|
||||
func (a GitHubSecurityAlert) RepoURLPackageName() string {
|
||||
return fmt.Sprintf("%s %s", a.Repository, a.Package.Name)
|
||||
}
|
||||
|
||||
// RepoURLManifestPath should be same format with DependencyGraphManifest.RepoURLFilename()
|
||||
func (a GitHubSecurityAlert) RepoURLManifestPath() string {
|
||||
return fmt.Sprintf("%s/%s", a.Repository, a.Package.ManifestPath)
|
||||
}
|
||||
|
||||
// GSAVulnerablePackage has vulnerable package information
|
||||
type GSAVulnerablePackage struct {
|
||||
Name string `json:"name"`
|
||||
Ecosystem string `json:"ecosystem"`
|
||||
ManifestFilename string `json:"manifestFilename"`
|
||||
ManifestPath string `json:"manifestPath"`
|
||||
Requirements string `json:"requirements"`
|
||||
}
|
||||
|
||||
// LibraryFixedIns is a list of Library's FixedIn
|
||||
@@ -393,7 +417,7 @@ func (v VulnInfo) Titles(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, Nvd, NewCveContentType(myFamily)}
|
||||
order := append(CveContentTypes{Trivy, Nvd}, GetCveContentTypes(myFamily)...)
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
@@ -440,7 +464,7 @@ func (v VulnInfo) Summaries(lang, myFamily string) (values []CveContentStr) {
|
||||
}
|
||||
}
|
||||
|
||||
order := CveContentTypes{Trivy, NewCveContentType(myFamily), Nvd, GitHub}
|
||||
order := append(append(CveContentTypes{Trivy}, GetCveContentTypes(myFamily)...), Nvd, GitHub)
|
||||
order = append(order, AllCveContetTypes.Except(append(order, Jvn)...)...)
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
@@ -511,7 +535,7 @@ func (v VulnInfo) Cvss2Scores() (values []CveContentCvss) {
|
||||
|
||||
// Cvss3Scores returns CVSS V3 Score
|
||||
func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
order := []CveContentType{RedHatAPI, RedHat, SUSE, Nvd, Jvn}
|
||||
order := []CveContentType{RedHatAPI, RedHat, SUSE, Microsoft, Nvd, Jvn}
|
||||
for _, ctype := range order {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
@@ -532,7 +556,7 @@ func (v VulnInfo) Cvss3Scores() (values []CveContentCvss) {
|
||||
}
|
||||
}
|
||||
|
||||
for _, ctype := range []CveContentType{Debian, DebianSecurityTracker, Ubuntu, Amazon, Trivy, GitHub, WpScan} {
|
||||
for _, ctype := range []CveContentType{Debian, DebianSecurityTracker, Ubuntu, UbuntuAPI, Amazon, Trivy, GitHub, WpScan} {
|
||||
if conts, found := v.CveContents[ctype]; found {
|
||||
for _, cont := range conts {
|
||||
if cont.Cvss3Severity != "" {
|
||||
@@ -641,6 +665,7 @@ func (v VulnInfo) PatchStatus(packs Packages) string {
|
||||
if len(v.CpeURIs) != 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
for _, p := range v.AffectedPackages {
|
||||
if p.NotFixedYet {
|
||||
return "unfixed"
|
||||
@@ -660,6 +685,13 @@ func (v VulnInfo) PatchStatus(packs Packages) string {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, c := range v.Confidences {
|
||||
if c == WindowsUpdateSearch && len(v.WindowsKBFixedIns) == 0 {
|
||||
return "unfixed"
|
||||
}
|
||||
}
|
||||
|
||||
return "fixed"
|
||||
}
|
||||
|
||||
@@ -710,7 +742,7 @@ func severityToCvssScoreRange(severity string) string {
|
||||
return "7.0-8.9"
|
||||
case "MODERATE", "MEDIUM":
|
||||
return "4.0-6.9"
|
||||
case "LOW":
|
||||
case "LOW", "NEGLIGIBLE":
|
||||
return "0.1-3.9"
|
||||
}
|
||||
return "None"
|
||||
@@ -728,6 +760,10 @@ func severityToCvssScoreRange(severity string) string {
|
||||
// Critical, High, Medium, Low
|
||||
// https://wiki.ubuntu.com/Bugs/Importance
|
||||
// https://people.canonical.com/~ubuntu-security/cve/priority.html
|
||||
//
|
||||
// Ubuntu CVE Tracker
|
||||
// Critical, High, Medium, Low, Negligible
|
||||
// https://people.canonical.com/~ubuntu-security/priority.html
|
||||
func severityToCvssScoreRoughly(severity string) float64 {
|
||||
switch strings.ToUpper(severity) {
|
||||
case "CRITICAL":
|
||||
@@ -736,7 +772,7 @@ func severityToCvssScoreRoughly(severity string) float64 {
|
||||
return 8.9
|
||||
case "MODERATE", "MEDIUM":
|
||||
return 6.9
|
||||
case "LOW":
|
||||
case "LOW", "NEGLIGIBLE":
|
||||
return 3.9
|
||||
}
|
||||
return 0
|
||||
@@ -797,6 +833,8 @@ type Exploit struct {
|
||||
DocumentURL *string `json:"documentURL,omitempty"`
|
||||
ShellCodeURL *string `json:"shellCodeURL,omitempty"`
|
||||
BinaryURL *string `json:"binaryURL,omitempty"`
|
||||
PaperURL *string `json:"paperURL,omitempty"`
|
||||
GHDBURL *string `json:"ghdbURL,omitempty"`
|
||||
}
|
||||
|
||||
// Metasploit :
|
||||
|
||||
@@ -991,6 +991,28 @@ func TestSortPackageStatues(t *testing.T) {
|
||||
{Name: "b"},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: PackageFixStatuses{
|
||||
{
|
||||
Name: "libzstd1",
|
||||
FixedIn: "1.3.1+dfsg-1~ubuntu0.16.04.1+esm1",
|
||||
},
|
||||
{
|
||||
Name: "libzstd1",
|
||||
FixedIn: "1.3.1+dfsg-1~ubuntu0.16.04.1+esm2",
|
||||
},
|
||||
},
|
||||
out: PackageFixStatuses{
|
||||
{
|
||||
Name: "libzstd1",
|
||||
FixedIn: "1.3.1+dfsg-1~ubuntu0.16.04.1+esm2",
|
||||
},
|
||||
{
|
||||
Name: "libzstd1",
|
||||
FixedIn: "1.3.1+dfsg-1~ubuntu0.16.04.1+esm1",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt.in.Sort()
|
||||
@@ -1717,3 +1739,103 @@ func TestVulnInfos_FilterByConfidenceOver(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestVulnInfo_PatchStatus(t *testing.T) {
|
||||
type fields struct {
|
||||
Confidences Confidences
|
||||
AffectedPackages PackageFixStatuses
|
||||
CpeURIs []string
|
||||
WindowsKBFixedIns []string
|
||||
}
|
||||
type args struct {
|
||||
packs Packages
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
fields fields
|
||||
args args
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "cpe",
|
||||
fields: fields{
|
||||
CpeURIs: []string{"cpe:/a:microsoft:internet_explorer:10"},
|
||||
},
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
name: "package unfixed",
|
||||
fields: fields{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "bash",
|
||||
NotFixedYet: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: "unfixed",
|
||||
},
|
||||
{
|
||||
name: "package unknown",
|
||||
fields: fields{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "bash",
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
packs: Packages{"bash": {
|
||||
Name: "bash",
|
||||
}},
|
||||
},
|
||||
want: "unknown",
|
||||
},
|
||||
{
|
||||
name: "package fixed",
|
||||
fields: fields{
|
||||
AffectedPackages: PackageFixStatuses{
|
||||
{
|
||||
Name: "bash",
|
||||
},
|
||||
},
|
||||
},
|
||||
args: args{
|
||||
packs: Packages{"bash": {
|
||||
Name: "bash",
|
||||
Version: "4.3-9.1",
|
||||
NewVersion: "5.0-4",
|
||||
}},
|
||||
},
|
||||
want: "fixed",
|
||||
},
|
||||
{
|
||||
name: "windows unfixed",
|
||||
fields: fields{
|
||||
Confidences: Confidences{WindowsUpdateSearch},
|
||||
},
|
||||
want: "unfixed",
|
||||
},
|
||||
{
|
||||
name: "windows fixed",
|
||||
fields: fields{
|
||||
Confidences: Confidences{WindowsUpdateSearch},
|
||||
WindowsKBFixedIns: []string{"000000"},
|
||||
},
|
||||
want: "fixed",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
v := VulnInfo{
|
||||
Confidences: tt.fields.Confidences,
|
||||
AffectedPackages: tt.fields.AffectedPackages,
|
||||
CpeURIs: tt.fields.CpeURIs,
|
||||
WindowsKBFixedIns: tt.fields.WindowsKBFixedIns,
|
||||
}
|
||||
if got := v.PatchStatus(tt.args.packs); got != tt.want {
|
||||
t.Errorf("VulnInfo.PatchStatus() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
487
oval/debian.go
487
oval/debian.go
@@ -4,17 +4,9 @@
|
||||
package oval
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/util"
|
||||
ovaldb "github.com/vulsio/goval-dictionary/db"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
// DebianBase is the base struct of Debian and Ubuntu
|
||||
@@ -22,102 +14,6 @@ type DebianBase struct {
|
||||
Base
|
||||
}
|
||||
|
||||
func (o DebianBase) update(r *models.ScanResult, defpacks defPacks) {
|
||||
for _, cve := range defpacks.def.Advisory.Cves {
|
||||
ovalContent := o.convertToModel(cve.CveID, &defpacks.def)
|
||||
if ovalContent == nil {
|
||||
continue
|
||||
}
|
||||
vinfo, ok := r.ScannedCves[cve.CveID]
|
||||
if !ok {
|
||||
logging.Log.Debugf("%s is newly detected by OVAL", cve.CveID)
|
||||
vinfo = models.VulnInfo{
|
||||
CveID: cve.CveID,
|
||||
Confidences: []models.Confidence{models.OvalMatch},
|
||||
CveContents: models.NewCveContents(*ovalContent),
|
||||
}
|
||||
} else {
|
||||
cveContents := vinfo.CveContents
|
||||
if _, ok := vinfo.CveContents[ovalContent.Type]; ok {
|
||||
logging.Log.Debugf("%s OVAL will be overwritten", cve.CveID)
|
||||
} else {
|
||||
logging.Log.Debugf("%s is also detected by OVAL", cve.CveID)
|
||||
cveContents = models.CveContents{}
|
||||
}
|
||||
vinfo.Confidences.AppendIfMissing(models.OvalMatch)
|
||||
cveContents[ovalContent.Type] = []models.CveContent{*ovalContent}
|
||||
vinfo.CveContents = cveContents
|
||||
}
|
||||
|
||||
// uniq(vinfo.AffectedPackages[].Name + defPacks.binpkgFixstat(map[string(=package name)]fixStat{}))
|
||||
collectBinpkgFixstat := defPacks{
|
||||
binpkgFixstat: map[string]fixStat{},
|
||||
}
|
||||
for packName, fixStatus := range defpacks.binpkgFixstat {
|
||||
collectBinpkgFixstat.binpkgFixstat[packName] = fixStatus
|
||||
}
|
||||
|
||||
for _, pack := range vinfo.AffectedPackages {
|
||||
collectBinpkgFixstat.binpkgFixstat[pack.Name] = fixStat{
|
||||
notFixedYet: pack.NotFixedYet,
|
||||
fixedIn: pack.FixedIn,
|
||||
isSrcPack: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Update package status of source packages.
|
||||
// In the case of Debian based Linux, sometimes source package name is defined as affected package in OVAL.
|
||||
// To display binary package name showed in apt-get, need to convert source name to binary name.
|
||||
for binName := range defpacks.binpkgFixstat {
|
||||
if srcPack, ok := r.SrcPackages.FindByBinName(binName); ok {
|
||||
for _, p := range defpacks.def.AffectedPacks {
|
||||
if p.Name == srcPack.Name {
|
||||
collectBinpkgFixstat.binpkgFixstat[binName] = fixStat{
|
||||
notFixedYet: p.NotFixedYet,
|
||||
fixedIn: p.Version,
|
||||
isSrcPack: true,
|
||||
srcPackName: srcPack.Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
vinfo.AffectedPackages = collectBinpkgFixstat.toPackStatuses()
|
||||
vinfo.AffectedPackages.Sort()
|
||||
r.ScannedCves[cve.CveID] = vinfo
|
||||
}
|
||||
}
|
||||
|
||||
func (o DebianBase) convertToModel(cveID string, def *ovalmodels.Definition) *models.CveContent {
|
||||
refs := make([]models.Reference, 0, len(def.References))
|
||||
for _, r := range def.References {
|
||||
refs = append(refs, models.Reference{
|
||||
Link: r.RefURL,
|
||||
Source: r.Source,
|
||||
RefID: r.RefID,
|
||||
})
|
||||
}
|
||||
|
||||
for _, cve := range def.Advisory.Cves {
|
||||
if cve.CveID != cveID {
|
||||
continue
|
||||
}
|
||||
|
||||
return &models.CveContent{
|
||||
Type: models.NewCveContentType(o.family),
|
||||
CveID: cve.CveID,
|
||||
Title: def.Title,
|
||||
Summary: def.Description,
|
||||
Cvss2Severity: def.Advisory.Severity,
|
||||
Cvss3Severity: def.Advisory.Severity,
|
||||
References: refs,
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Debian is the interface for Debian OVAL
|
||||
type Debian struct {
|
||||
DebianBase
|
||||
@@ -137,67 +33,8 @@ func NewDebian(driver ovaldb.DB, baseURL string) Debian {
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Debian) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
|
||||
//Debian's uname gives both of kernel release(uname -r), version(kernel-image version)
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
|
||||
// Add linux and set the version of running kernel to search OVAL.
|
||||
if r.Container.ContainerID == "" {
|
||||
if r.RunningKernel.Version != "" {
|
||||
newVer := ""
|
||||
if p, ok := r.Packages[linuxImage]; ok {
|
||||
newVer = p.NewVersion
|
||||
}
|
||||
r.Packages["linux"] = models.Package{
|
||||
Name: "linux",
|
||||
Version: r.RunningKernel.Version,
|
||||
NewVersion: newVer,
|
||||
}
|
||||
} else {
|
||||
logging.Log.Warnf("Since the exact kernel version is not available, the vulnerability in the linux package is not detected.")
|
||||
}
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
delete(r.Packages, "linux")
|
||||
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
// Remove "linux" added above for oval search
|
||||
// linux is not a real package name (key of affected packages in OVAL)
|
||||
if notFixedYet, ok := defPacks.binpkgFixstat["linux"]; ok {
|
||||
defPacks.binpkgFixstat[linuxImage] = notFixedYet
|
||||
delete(defPacks.binpkgFixstat, "linux")
|
||||
for i, p := range defPacks.def.AffectedPacks {
|
||||
if p.Name == "linux" {
|
||||
p.Name = linuxImage
|
||||
defPacks.def.AffectedPacks[i] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
o.update(r, defPacks)
|
||||
}
|
||||
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if conts, ok := vuln.CveContents[models.Debian]; ok {
|
||||
for i, cont := range conts {
|
||||
cont.SourceLink = "https://security-tracker.debian.org/tracker/" + cont.CveID
|
||||
vuln.CveContents[models.Debian][i] = cont
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(relatedDefs.entries), nil
|
||||
func (o Debian) FillWithOval(_ *models.ScanResult) (nCVEs int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
// Ubuntu is the interface for Debian OVAL
|
||||
@@ -219,322 +56,6 @@ func NewUbuntu(driver ovaldb.DB, baseURL string) Ubuntu {
|
||||
}
|
||||
|
||||
// FillWithOval returns scan result after updating CVE info by OVAL
|
||||
func (o Ubuntu) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
switch util.Major(r.Release) {
|
||||
case "14":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-azure",
|
||||
"linux-lts-xenial",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-lts-xenial",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-lts-xenial",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "16":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-aws-hwe",
|
||||
"linux-azure",
|
||||
"linux-euclid",
|
||||
"linux-flo",
|
||||
"linux-gcp",
|
||||
"linux-gke",
|
||||
"linux-goldfish",
|
||||
"linux-hwe",
|
||||
"linux-kvm",
|
||||
"linux-mako",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-aws-hwe",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-hwe",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-raspi2",
|
||||
"linux-meta-snapdragon",
|
||||
"linux-oem",
|
||||
"linux-oracle",
|
||||
"linux-raspi2",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-hwe",
|
||||
"linux-signed-oracle",
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "18":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-aws-5.0",
|
||||
"linux-azure",
|
||||
"linux-gcp",
|
||||
"linux-gcp-5.3",
|
||||
"linux-gke-4.15",
|
||||
"linux-gke-5.0",
|
||||
"linux-gke-5.3",
|
||||
"linux-hwe",
|
||||
"linux-kvm",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-aws-5.0",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-gcp-5.3",
|
||||
"linux-meta-gke-4.15",
|
||||
"linux-meta-gke-5.0",
|
||||
"linux-meta-gke-5.3",
|
||||
"linux-meta-hwe",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oem",
|
||||
"linux-meta-oem-osp1",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-oracle-5.0",
|
||||
"linux-meta-oracle-5.3",
|
||||
"linux-meta-raspi2",
|
||||
"linux-meta-raspi2-5.3",
|
||||
"linux-meta-snapdragon",
|
||||
"linux-oem",
|
||||
"linux-oem-osp1",
|
||||
"linux-oracle",
|
||||
"linux-oracle-5.0",
|
||||
"linux-oracle-5.3",
|
||||
"linux-raspi2",
|
||||
"linux-raspi2-5.3",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-gcp-5.3",
|
||||
"linux-signed-gke-4.15",
|
||||
"linux-signed-gke-5.0",
|
||||
"linux-signed-gke-5.3",
|
||||
"linux-signed-hwe",
|
||||
"linux-signed-oem",
|
||||
"linux-signed-oem-osp1",
|
||||
"linux-signed-oracle",
|
||||
"linux-signed-oracle-5.0",
|
||||
"linux-signed-oracle-5.3",
|
||||
"linux-snapdragon",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "20":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-azure",
|
||||
"linux-gcp",
|
||||
"linux-kvm",
|
||||
"linux-meta",
|
||||
"linux-meta-aws",
|
||||
"linux-meta-azure",
|
||||
"linux-meta-gcp",
|
||||
"linux-meta-kvm",
|
||||
"linux-meta-oem-5.6",
|
||||
"linux-meta-oracle",
|
||||
"linux-meta-raspi",
|
||||
"linux-meta-riscv",
|
||||
"linux-oem-5.6",
|
||||
"linux-oracle",
|
||||
"linux-raspi",
|
||||
"linux-raspi2",
|
||||
"linux-riscv",
|
||||
"linux-signed",
|
||||
"linux-signed-azure",
|
||||
"linux-signed-gcp",
|
||||
"linux-signed-oem-5.6",
|
||||
"linux-signed-oracle",
|
||||
"linux",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "21":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-base-sgx",
|
||||
"linux-base",
|
||||
"linux-cloud-tools-common",
|
||||
"linux-cloud-tools-generic",
|
||||
"linux-cloud-tools-lowlatency",
|
||||
"linux-cloud-tools-virtual",
|
||||
"linux-gcp",
|
||||
"linux-generic",
|
||||
"linux-gke",
|
||||
"linux-headers-aws",
|
||||
"linux-headers-gcp",
|
||||
"linux-headers-gke",
|
||||
"linux-headers-oracle",
|
||||
"linux-image-aws",
|
||||
"linux-image-extra-virtual",
|
||||
"linux-image-gcp",
|
||||
"linux-image-generic",
|
||||
"linux-image-gke",
|
||||
"linux-image-lowlatency",
|
||||
"linux-image-oracle",
|
||||
"linux-image-virtual",
|
||||
"linux-lowlatency",
|
||||
"linux-modules-extra-aws",
|
||||
"linux-modules-extra-gcp",
|
||||
"linux-modules-extra-gke",
|
||||
"linux-oracle",
|
||||
"linux-tools-aws",
|
||||
"linux-tools-common",
|
||||
"linux-tools-gcp",
|
||||
"linux-tools-generic",
|
||||
"linux-tools-gke",
|
||||
"linux-tools-host",
|
||||
"linux-tools-lowlatency",
|
||||
"linux-tools-oracle",
|
||||
"linux-tools-virtual",
|
||||
"linux-virtual",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
case "22":
|
||||
kernelNamesInOval := []string{
|
||||
"linux-aws",
|
||||
"linux-azure",
|
||||
"linux-gcp",
|
||||
"linux-generic",
|
||||
"linux-gke",
|
||||
"linux-header-aws",
|
||||
"linux-header-azure",
|
||||
"linux-header-gcp",
|
||||
"linux-header-generic",
|
||||
"linux-header-gke",
|
||||
"linux-header-oracle",
|
||||
"linux-image-aws",
|
||||
"linux-image-azure",
|
||||
"linux-image-gcp",
|
||||
"linux-image-generic",
|
||||
"linux-image-gke",
|
||||
"linux-image-oracle",
|
||||
"linux-oracle",
|
||||
"linux-tools-aws",
|
||||
"linux-tools-azure",
|
||||
"linux-tools-common",
|
||||
"linux-tools-gcp",
|
||||
"linux-tools-generic",
|
||||
"linux-tools-gke",
|
||||
"linux-tools-oracle",
|
||||
}
|
||||
return o.fillWithOval(r, kernelNamesInOval)
|
||||
}
|
||||
return 0, fmt.Errorf("Ubuntu %s is not support for now", r.Release)
|
||||
}
|
||||
|
||||
func (o Ubuntu) fillWithOval(r *models.ScanResult, kernelNamesInOval []string) (nCVEs int, err error) {
|
||||
linuxImage := "linux-image-" + r.RunningKernel.Release
|
||||
runningKernelVersion := ""
|
||||
kernelPkgInOVAL := ""
|
||||
isOVALKernelPkgAdded := false
|
||||
unusedKernels := []models.Package{}
|
||||
copiedSourcePkgs := models.SrcPackages{}
|
||||
|
||||
if r.Container.ContainerID == "" {
|
||||
if v, ok := r.Packages[linuxImage]; ok {
|
||||
runningKernelVersion = v.Version
|
||||
} else {
|
||||
logging.Log.Warnf("Unable to detect vulns of running kernel because the version of the running kernel is unknown. server: %s",
|
||||
r.ServerName)
|
||||
}
|
||||
|
||||
for _, n := range kernelNamesInOval {
|
||||
if p, ok := r.Packages[n]; ok {
|
||||
kernelPkgInOVAL = p.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// remove unused kernels from packages to prevent detecting vulns of unused kernel
|
||||
for _, n := range kernelNamesInOval {
|
||||
if v, ok := r.Packages[n]; ok {
|
||||
unusedKernels = append(unusedKernels, v)
|
||||
delete(r.Packages, n)
|
||||
}
|
||||
}
|
||||
|
||||
// Remove linux-* in order to detect only vulnerabilities in the running kernel.
|
||||
for n := range r.Packages {
|
||||
if n != kernelPkgInOVAL && strings.HasPrefix(n, "linux-") {
|
||||
unusedKernels = append(unusedKernels, r.Packages[n])
|
||||
delete(r.Packages, n)
|
||||
}
|
||||
}
|
||||
for srcPackName, srcPack := range r.SrcPackages {
|
||||
copiedSourcePkgs[srcPackName] = srcPack
|
||||
targetBinaryNames := []string{}
|
||||
for _, n := range srcPack.BinaryNames {
|
||||
if n == kernelPkgInOVAL || !strings.HasPrefix(n, "linux-") {
|
||||
targetBinaryNames = append(targetBinaryNames, n)
|
||||
}
|
||||
}
|
||||
srcPack.BinaryNames = targetBinaryNames
|
||||
r.SrcPackages[srcPackName] = srcPack
|
||||
}
|
||||
|
||||
if kernelPkgInOVAL == "" {
|
||||
logging.Log.Warnf("The OVAL name of the running kernel image %+v is not found. So vulns of `linux` wll be detected. server: %s",
|
||||
r.RunningKernel, r.ServerName)
|
||||
kernelPkgInOVAL = "linux"
|
||||
isOVALKernelPkgAdded = true
|
||||
}
|
||||
|
||||
if runningKernelVersion != "" {
|
||||
r.Packages[kernelPkgInOVAL] = models.Package{
|
||||
Name: kernelPkgInOVAL,
|
||||
Version: runningKernelVersion,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var relatedDefs ovalResult
|
||||
if o.driver == nil {
|
||||
if relatedDefs, err = getDefsByPackNameViaHTTP(r, o.baseURL); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions via HTTP. err: %w", err)
|
||||
}
|
||||
} else {
|
||||
if relatedDefs, err = getDefsByPackNameFromOvalDB(r, o.driver); err != nil {
|
||||
return 0, xerrors.Errorf("Failed to get Definitions from DB. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
if isOVALKernelPkgAdded {
|
||||
delete(r.Packages, kernelPkgInOVAL)
|
||||
}
|
||||
for _, p := range unusedKernels {
|
||||
r.Packages[p.Name] = p
|
||||
}
|
||||
r.SrcPackages = copiedSourcePkgs
|
||||
|
||||
for _, defPacks := range relatedDefs.entries {
|
||||
// Remove "linux" added above for searching oval
|
||||
// "linux" is not a real package name (key of affected packages in OVAL)
|
||||
if nfy, ok := defPacks.binpkgFixstat[kernelPkgInOVAL]; isOVALKernelPkgAdded && ok {
|
||||
defPacks.binpkgFixstat[linuxImage] = nfy
|
||||
delete(defPacks.binpkgFixstat, kernelPkgInOVAL)
|
||||
for i, p := range defPacks.def.AffectedPacks {
|
||||
if p.Name == kernelPkgInOVAL {
|
||||
p.Name = linuxImage
|
||||
defPacks.def.AffectedPacks[i] = p
|
||||
}
|
||||
}
|
||||
}
|
||||
o.update(r, defPacks)
|
||||
}
|
||||
|
||||
for _, vuln := range r.ScannedCves {
|
||||
if conts, ok := vuln.CveContents[models.Ubuntu]; ok {
|
||||
for i, cont := range conts {
|
||||
cont.SourceLink = "http://people.ubuntu.com/~ubuntu-security/cve/" + cont.CveID
|
||||
vuln.CveContents[models.Ubuntu][i] = cont
|
||||
}
|
||||
}
|
||||
}
|
||||
return len(relatedDefs.entries), nil
|
||||
func (o Ubuntu) FillWithOval(_ *models.ScanResult) (nCVEs int, err error) {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@@ -1,120 +0,0 @@
|
||||
//go:build !scanner
|
||||
// +build !scanner
|
||||
|
||||
package oval
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
ovalmodels "github.com/vulsio/goval-dictionary/models"
|
||||
)
|
||||
|
||||
func TestPackNamesOfUpdateDebian(t *testing.T) {
|
||||
var tests = []struct {
|
||||
in models.ScanResult
|
||||
defPacks defPacks
|
||||
out models.ScanResult
|
||||
}{
|
||||
{
|
||||
in: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2000-1000": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packA"},
|
||||
{Name: "packC"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
defPacks: defPacks{
|
||||
def: ovalmodels.Definition{
|
||||
Advisory: ovalmodels.Advisory{
|
||||
Cves: []ovalmodels.Cve{{CveID: "CVE-2000-1000"}},
|
||||
},
|
||||
},
|
||||
binpkgFixstat: map[string]fixStat{
|
||||
"packB": {
|
||||
notFixedYet: true,
|
||||
fixedIn: "1.0.0",
|
||||
},
|
||||
},
|
||||
},
|
||||
out: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2000-1000": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packA"},
|
||||
{Name: "packB", NotFixedYet: true, FixedIn: "1.0.0"},
|
||||
{Name: "packC"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2000-1000": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packA"},
|
||||
},
|
||||
},
|
||||
"CVE-2000-1001": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packC"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
defPacks: defPacks{
|
||||
def: ovalmodels.Definition{
|
||||
Advisory: ovalmodels.Advisory{
|
||||
Cves: []ovalmodels.Cve{
|
||||
{
|
||||
CveID: "CVE-2000-1000",
|
||||
},
|
||||
{
|
||||
CveID: "CVE-2000-1001",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
binpkgFixstat: map[string]fixStat{
|
||||
"packB": {
|
||||
notFixedYet: false,
|
||||
},
|
||||
},
|
||||
},
|
||||
out: models.ScanResult{
|
||||
ScannedCves: models.VulnInfos{
|
||||
"CVE-2000-1000": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packA"},
|
||||
{Name: "packB", NotFixedYet: false},
|
||||
},
|
||||
},
|
||||
"CVE-2000-1001": models.VulnInfo{
|
||||
AffectedPackages: models.PackageFixStatuses{
|
||||
{Name: "packB", NotFixedYet: false},
|
||||
{Name: "packC"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// util.Log = util.NewCustomLogger()
|
||||
for i, tt := range tests {
|
||||
Debian{}.update(&tt.in, tt.defPacks)
|
||||
for cveid := range tt.out.ScannedCves {
|
||||
e := tt.out.ScannedCves[cveid].AffectedPackages
|
||||
a := tt.in.ScannedCves[cveid].AffectedPackages
|
||||
if !reflect.DeepEqual(a, e) {
|
||||
t.Errorf("[%d] expected: %v\n actual: %v\n", i, e, a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -133,9 +133,9 @@ func newOvalDB(cnf config.VulnDictInterface) (ovaldb.DB, error) {
|
||||
if cnf.GetType() == "sqlite3" {
|
||||
path = cnf.GetSQLite3Path()
|
||||
}
|
||||
driver, locked, err := ovaldb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ovaldb.Option{})
|
||||
driver, err := ovaldb.NewDB(cnf.GetType(), path, cnf.GetDebugSQL(), ovaldb.Option{})
|
||||
if err != nil {
|
||||
if locked {
|
||||
if xerrors.Is(err, ovaldb.ErrDBLocked) {
|
||||
return nil, xerrors.Errorf("Failed to init OVAL DB. SQLite3: %s is locked. err: %w, ", cnf.GetSQLite3Path(), err)
|
||||
}
|
||||
return nil, xerrors.Errorf("Failed to init OVAL DB. DB Path: %s, err: %w", path, err)
|
||||
|
||||
@@ -68,12 +68,15 @@ func (o RedHatBase) FillWithOval(r *models.ScanResult) (nCVEs int, err error) {
|
||||
for _, d := range vuln.DistroAdvisories {
|
||||
if conts, ok := vuln.CveContents[models.Amazon]; ok {
|
||||
for i, cont := range conts {
|
||||
if strings.HasPrefix(d.AdvisoryID, "ALAS2022-") {
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/AL2022/%s.html", strings.ReplaceAll(d.AdvisoryID, "ALAS2022", "ALAS"))
|
||||
} else if strings.HasPrefix(d.AdvisoryID, "ALAS2-") {
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/AL2/%s.html", strings.ReplaceAll(d.AdvisoryID, "ALAS2", "ALAS"))
|
||||
} else if strings.HasPrefix(d.AdvisoryID, "ALAS-") {
|
||||
switch {
|
||||
case strings.HasPrefix(d.AdvisoryID, "ALAS-"):
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/%s.html", d.AdvisoryID)
|
||||
case strings.HasPrefix(d.AdvisoryID, "ALAS2-"):
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/AL2/%s.html", strings.ReplaceAll(d.AdvisoryID, "ALAS2", "ALAS"))
|
||||
case strings.HasPrefix(d.AdvisoryID, "ALAS2022-"):
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/AL2022/%s.html", strings.ReplaceAll(d.AdvisoryID, "ALAS2022", "ALAS"))
|
||||
case strings.HasPrefix(d.AdvisoryID, "ALAS2023-"):
|
||||
cont.SourceLink = fmt.Sprintf("https://alas.aws.amazon.com/AL2023/%s.html", strings.ReplaceAll(d.AdvisoryID, "ALAS2023", "ALAS"))
|
||||
}
|
||||
vuln.CveContents[models.Amazon][i] = cont
|
||||
}
|
||||
|
||||
22
oval/util.go
22
oval/util.go
@@ -112,12 +112,13 @@ func getDefsByPackNameViaHTTP(r *models.ScanResult, url string) (relatedDefs ova
|
||||
case constant.CentOS:
|
||||
ovalRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(r.Release)[0] {
|
||||
case "2022":
|
||||
ovalRelease = "2022"
|
||||
case "2":
|
||||
ovalRelease = "2"
|
||||
switch s := strings.Fields(r.Release)[0]; s {
|
||||
case "1", "2", "2022", "2023", "2025", "2027", "2029":
|
||||
ovalRelease = s
|
||||
default:
|
||||
if _, err := time.Parse("2006.01", s); err != nil {
|
||||
return relatedDefs, xerrors.Errorf(`Failed to detect amazon version. err: unexpected Amazon Linux 1 version format. expected: "yyyy.MM", actual: "%s". err: %w`, s, err)
|
||||
}
|
||||
ovalRelease = "1"
|
||||
}
|
||||
}
|
||||
@@ -274,12 +275,13 @@ func getDefsByPackNameFromOvalDB(r *models.ScanResult, driver ovaldb.DB) (relate
|
||||
case constant.CentOS:
|
||||
ovalRelease = strings.TrimPrefix(r.Release, "stream")
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(r.Release)[0] {
|
||||
case "2022":
|
||||
ovalRelease = "2022"
|
||||
case "2":
|
||||
ovalRelease = "2"
|
||||
switch s := strings.Fields(r.Release)[0]; s {
|
||||
case "1", "2", "2022", "2023", "2025", "2027", "2029":
|
||||
ovalRelease = s
|
||||
default:
|
||||
if _, err := time.Parse("2006.01", s); err != nil {
|
||||
return relatedDefs, xerrors.Errorf(`Failed to detect amazon version. err: unexpected Amazon Linux 1 version format. expected: "yyyy.MM", actual: "%s". err: %w`, s, err)
|
||||
}
|
||||
ovalRelease = "1"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type ChatWorkWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to ChatWork
|
||||
func (w ChatWorkWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
|
||||
for _, r := range rs {
|
||||
|
||||
@@ -23,6 +23,7 @@ type EMailWriter struct {
|
||||
Cnf config.SMTPConf
|
||||
}
|
||||
|
||||
// Write results to Email
|
||||
func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
var message string
|
||||
sender := NewEMailSender(w.Cnf)
|
||||
@@ -31,7 +32,7 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if w.FormatOneEMail {
|
||||
message += formatFullPlainText(r) + "\r\n\r\n"
|
||||
mm := r.ScannedCves.CountGroupBySeverity()
|
||||
keys := []string{"High", "Medium", "Low", "Unknown"}
|
||||
keys := []string{"Critical", "High", "Medium", "Low", "Unknown"}
|
||||
for _, k := range keys {
|
||||
m[k] += mm[k]
|
||||
}
|
||||
@@ -60,9 +61,9 @@ func (w EMailWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
summary := fmt.Sprintf("Total: %d (High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
summary := fmt.Sprintf("Total: %d (Critical:%d High:%d Medium:%d Low:%d ?:%d)",
|
||||
m["Critical"]+m["High"]+m["Medium"]+m["Low"]+m["Unknown"],
|
||||
m["Critical"], m["High"], m["Medium"], m["Low"], m["Unknown"])
|
||||
|
||||
origmessage := message
|
||||
if w.FormatOneEMail {
|
||||
|
||||
@@ -21,11 +21,12 @@ type GoogleChatWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to Google Chat
|
||||
func (w GoogleChatWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
re := regexp.MustCompile(w.Cnf.ServerNameRegexp)
|
||||
|
||||
for _, r := range rs {
|
||||
if re.Match([]byte(r.FormatServerName())) {
|
||||
if re.MatchString(r.FormatServerName()) {
|
||||
continue
|
||||
}
|
||||
msgs := []string{fmt.Sprintf("*%s*\n%s\t%s\t%s",
|
||||
@@ -72,11 +73,10 @@ func (w GoogleChatWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
|
||||
func (w GoogleChatWriter) postMessage(message string) error {
|
||||
uri := fmt.Sprintf("%s", w.Cnf.WebHookURL)
|
||||
payload := `{"text": "` + message + `" }`
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, uri, bytes.NewBuffer([]byte(payload)))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, w.Cnf.WebHookURL, bytes.NewBuffer([]byte(payload)))
|
||||
defer cancel()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -87,7 +87,7 @@ func (w GoogleChatWriter) postMessage(message string) error {
|
||||
return err
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if checkResponse(resp) != nil && err != nil {
|
||||
if w.checkResponse(resp) != nil && err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
@@ -2,26 +2,33 @@ package reporter
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/CycloneDX/cyclonedx-go"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/reporter/sbom"
|
||||
)
|
||||
|
||||
// LocalFileWriter writes results to a local file.
|
||||
type LocalFileWriter struct {
|
||||
CurrentDir string
|
||||
DiffPlus bool
|
||||
DiffMinus bool
|
||||
FormatJSON bool
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
Gzip bool
|
||||
CurrentDir string
|
||||
DiffPlus bool
|
||||
DiffMinus bool
|
||||
FormatJSON bool
|
||||
FormatCsv bool
|
||||
FormatFullText bool
|
||||
FormatOneLineText bool
|
||||
FormatList bool
|
||||
FormatCycloneDXJSON bool
|
||||
FormatCycloneDXXML bool
|
||||
Gzip bool
|
||||
}
|
||||
|
||||
// Write results to Local File
|
||||
func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
if w.FormatOneLineText {
|
||||
path := filepath.Join(w.CurrentDir, "summary.txt")
|
||||
@@ -86,6 +93,28 @@ func (w LocalFileWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
}
|
||||
}
|
||||
|
||||
if w.FormatCycloneDXJSON {
|
||||
bs, err := sbom.GenerateCycloneDX(cyclonedx.BOMFileFormatJSON, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to generate CycloneDX JSON. err: %w", err)
|
||||
}
|
||||
p := fmt.Sprintf("%s_cyclonedx.json", path)
|
||||
if err := w.writeFile(p, bs, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write CycloneDX JSON. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
if w.FormatCycloneDXXML {
|
||||
bs, err := sbom.GenerateCycloneDX(cyclonedx.BOMFileFormatXML, r)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to generate CycloneDX XML. err: %w", err)
|
||||
}
|
||||
p := fmt.Sprintf("%s_cyclonedx.xml", path)
|
||||
if err := w.writeFile(p, bs, 0600); err != nil {
|
||||
return xerrors.Errorf("Failed to write CycloneDX XML. path: %s, err: %w", p, err)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
561
reporter/sbom/cyclonedx.go
Normal file
561
reporter/sbom/cyclonedx.go
Normal file
@@ -0,0 +1,561 @@
|
||||
package sbom
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
cdx "github.com/CycloneDX/cyclonedx-go"
|
||||
"github.com/google/uuid"
|
||||
"github.com/package-url/packageurl-go"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
)
|
||||
|
||||
// GenerateCycloneDX generates a string in CycloneDX format
|
||||
func GenerateCycloneDX(format cdx.BOMFileFormat, r models.ScanResult) ([]byte, error) {
|
||||
bom := cdx.NewBOM()
|
||||
bom.SerialNumber = uuid.New().URN()
|
||||
bom.Metadata = cdxMetadata(r)
|
||||
bom.Components, bom.Dependencies, bom.Vulnerabilities = cdxComponents(r, bom.Metadata.Component.BOMRef)
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
enc := cdx.NewBOMEncoder(buf, format)
|
||||
enc.SetPretty(true)
|
||||
if err := enc.Encode(bom); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to encode CycloneDX. err: %w", err)
|
||||
}
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func cdxMetadata(result models.ScanResult) *cdx.Metadata {
|
||||
metadata := cdx.Metadata{
|
||||
Timestamp: result.ReportedAt.Format(time.RFC3339),
|
||||
Tools: &[]cdx.Tool{
|
||||
{
|
||||
Vendor: "future-architect",
|
||||
Name: "vuls",
|
||||
Version: fmt.Sprintf("%s-%s", result.ReportedVersion, result.ReportedRevision),
|
||||
},
|
||||
},
|
||||
Component: &cdx.Component{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeOS,
|
||||
Name: result.ServerName,
|
||||
},
|
||||
}
|
||||
return &metadata
|
||||
}
|
||||
|
||||
func cdxComponents(result models.ScanResult, metaBomRef string) (*[]cdx.Component, *[]cdx.Dependency, *[]cdx.Vulnerability) {
|
||||
var components []cdx.Component
|
||||
bomRefs := map[string][]string{}
|
||||
|
||||
ospkgToPURL := map[string]string{}
|
||||
if ospkgComps := ospkgToCdxComponents(result.Family, result.Release, result.RunningKernel, result.Packages, result.SrcPackages, ospkgToPURL); ospkgComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], ospkgComps[0].BOMRef)
|
||||
for _, comp := range ospkgComps[1:] {
|
||||
bomRefs[ospkgComps[0].BOMRef] = append(bomRefs[ospkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, ospkgComps...)
|
||||
}
|
||||
|
||||
if cpeComps := cpeToCdxComponents(result.ScannedCves); cpeComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], cpeComps[0].BOMRef)
|
||||
for _, comp := range cpeComps[1:] {
|
||||
bomRefs[cpeComps[0].BOMRef] = append(bomRefs[cpeComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, cpeComps...)
|
||||
}
|
||||
|
||||
libpkgToPURL := map[string]map[string]string{}
|
||||
for _, libscanner := range result.LibraryScanners {
|
||||
libpkgToPURL[libscanner.LockfilePath] = map[string]string{}
|
||||
|
||||
libpkgComps := libpkgToCdxComponents(libscanner, libpkgToPURL)
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], libpkgComps[0].BOMRef)
|
||||
for _, comp := range libpkgComps[1:] {
|
||||
bomRefs[libpkgComps[0].BOMRef] = append(bomRefs[libpkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, libpkgComps...)
|
||||
}
|
||||
|
||||
ghpkgToPURL := map[string]map[string]string{}
|
||||
for _, ghm := range result.GitHubManifests {
|
||||
ghpkgToPURL[ghm.RepoURLFilename()] = map[string]string{}
|
||||
|
||||
ghpkgComps := ghpkgToCdxComponents(ghm, ghpkgToPURL)
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], ghpkgComps[0].BOMRef)
|
||||
for _, comp := range ghpkgComps[1:] {
|
||||
bomRefs[ghpkgComps[0].BOMRef] = append(bomRefs[ghpkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, ghpkgComps...)
|
||||
}
|
||||
|
||||
wppkgToPURL := map[string]string{}
|
||||
if wppkgComps := wppkgToCdxComponents(result.WordPressPackages, wppkgToPURL); wppkgComps != nil {
|
||||
bomRefs[metaBomRef] = append(bomRefs[metaBomRef], wppkgComps[0].BOMRef)
|
||||
for _, comp := range wppkgComps[1:] {
|
||||
bomRefs[wppkgComps[0].BOMRef] = append(bomRefs[wppkgComps[0].BOMRef], comp.BOMRef)
|
||||
}
|
||||
components = append(components, wppkgComps...)
|
||||
}
|
||||
|
||||
return &components, cdxDependencies(bomRefs), cdxVulnerabilities(result, ospkgToPURL, libpkgToPURL, ghpkgToPURL, wppkgToPURL)
|
||||
}
|
||||
|
||||
func osToCdxComponent(family, release, runningKernelRelease, runningKernelVersion string) cdx.Component {
|
||||
props := []cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "Package",
|
||||
},
|
||||
}
|
||||
if runningKernelRelease != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "RunningKernelRelease",
|
||||
Value: runningKernelRelease,
|
||||
})
|
||||
}
|
||||
if runningKernelVersion != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "RunningKernelVersion",
|
||||
Value: runningKernelVersion,
|
||||
})
|
||||
}
|
||||
return cdx.Component{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeOS,
|
||||
Name: family,
|
||||
Version: release,
|
||||
Properties: &props,
|
||||
}
|
||||
}
|
||||
|
||||
func ospkgToCdxComponents(family, release string, runningKernel models.Kernel, binpkgs models.Packages, srcpkgs models.SrcPackages, ospkgToPURL map[string]string) []cdx.Component {
|
||||
if family == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
osToCdxComponent(family, release, runningKernel.Release, runningKernel.Version),
|
||||
}
|
||||
|
||||
if len(binpkgs) == 0 {
|
||||
return components
|
||||
}
|
||||
|
||||
type srcpkg struct {
|
||||
name string
|
||||
version string
|
||||
arch string
|
||||
}
|
||||
binToSrc := map[string]srcpkg{}
|
||||
for _, pack := range srcpkgs {
|
||||
for _, binpkg := range pack.BinaryNames {
|
||||
binToSrc[binpkg] = srcpkg{
|
||||
name: pack.Name,
|
||||
version: pack.Version,
|
||||
arch: pack.Arch,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, pack := range binpkgs {
|
||||
var props []cdx.Property
|
||||
if p, ok := binToSrc[pack.Name]; ok {
|
||||
if p.name != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcName",
|
||||
Value: p.name,
|
||||
})
|
||||
}
|
||||
if p.version != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcVersion",
|
||||
Value: p.version,
|
||||
})
|
||||
}
|
||||
if p.arch != "" {
|
||||
props = append(props, cdx.Property{
|
||||
Name: "future-architect:vuls:SrcArch",
|
||||
Value: p.arch,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
purl := toPkgPURL(family, release, pack.Name, pack.Version, pack.Release, pack.Arch, pack.Repository)
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: pack.Name,
|
||||
Version: pack.Version,
|
||||
PackageURL: purl,
|
||||
Properties: &props,
|
||||
})
|
||||
|
||||
ospkgToPURL[pack.Name] = purl
|
||||
}
|
||||
return components
|
||||
}
|
||||
|
||||
func cpeToCdxComponents(scannedCves models.VulnInfos) []cdx.Component {
|
||||
cpes := map[string]struct{}{}
|
||||
for _, cve := range scannedCves {
|
||||
for _, cpe := range cve.CpeURIs {
|
||||
cpes[cpe] = struct{}{}
|
||||
}
|
||||
}
|
||||
if len(cpes) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: "CPEs",
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "CPE",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for cpe := range cpes {
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: cpe,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: cpe,
|
||||
CPE: cpe,
|
||||
})
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func libpkgToCdxComponents(libscanner models.LibraryScanner, libpkgToPURL map[string]map[string]string) []cdx.Component {
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: libscanner.LockfilePath,
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: libscanner.Type,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, lib := range libscanner.Libs {
|
||||
purl := packageurl.NewPackageURL(libscanner.Type, "", lib.Name, lib.Version, packageurl.Qualifiers{{Key: "file_path", Value: libscanner.LockfilePath}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: lib.Name,
|
||||
Version: lib.Version,
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
libpkgToPURL[libscanner.LockfilePath][lib.Name] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func ghpkgToCdxComponents(m models.DependencyGraphManifest, ghpkgToPURL map[string]map[string]string) []cdx.Component {
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: m.BlobPath,
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: m.Ecosystem(),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, dep := range m.Dependencies {
|
||||
purl := packageurl.NewPackageURL(m.Ecosystem(), "", dep.PackageName, dep.Version(), packageurl.Qualifiers{{Key: "repo_url", Value: m.Repository}, {Key: "file_path", Value: m.Filename}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: dep.PackageName,
|
||||
Version: dep.Version(),
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
ghpkgToPURL[m.RepoURLFilename()][dep.PackageName] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func wppkgToCdxComponents(wppkgs models.WordPressPackages, wppkgToPURL map[string]string) []cdx.Component {
|
||||
if len(wppkgs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
components := []cdx.Component{
|
||||
{
|
||||
BOMRef: uuid.NewString(),
|
||||
Type: cdx.ComponentTypeApplication,
|
||||
Name: "wordpress",
|
||||
Properties: &[]cdx.Property{
|
||||
{
|
||||
Name: "future-architect:vuls:Type",
|
||||
Value: "WordPress",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, wppkg := range wppkgs {
|
||||
purl := packageurl.NewPackageURL("wordpress", wppkg.Type, wppkg.Name, wppkg.Version, packageurl.Qualifiers{{Key: "status", Value: wppkg.Status}}, "").ToString()
|
||||
components = append(components, cdx.Component{
|
||||
BOMRef: purl,
|
||||
Type: cdx.ComponentTypeLibrary,
|
||||
Name: wppkg.Name,
|
||||
Version: wppkg.Version,
|
||||
PackageURL: purl,
|
||||
})
|
||||
|
||||
wppkgToPURL[wppkg.Name] = purl
|
||||
}
|
||||
|
||||
return components
|
||||
}
|
||||
|
||||
func cdxDependencies(bomRefs map[string][]string) *[]cdx.Dependency {
|
||||
dependencies := make([]cdx.Dependency, 0, len(bomRefs))
|
||||
for ref, depRefs := range bomRefs {
|
||||
ds := depRefs
|
||||
dependencies = append(dependencies, cdx.Dependency{
|
||||
Ref: ref,
|
||||
Dependencies: &ds,
|
||||
})
|
||||
}
|
||||
return &dependencies
|
||||
}
|
||||
|
||||
func toPkgPURL(osFamily, osVersion, packName, packVersion, packRelease, packArch, packRepository string) string {
|
||||
var purlType string
|
||||
switch osFamily {
|
||||
case constant.Alma, constant.Amazon, constant.CentOS, constant.Fedora, constant.OpenSUSE, constant.OpenSUSELeap, constant.Oracle, constant.RedHat, constant.Rocky, constant.SUSEEnterpriseDesktop, constant.SUSEEnterpriseServer:
|
||||
purlType = "rpm"
|
||||
case constant.Alpine:
|
||||
purlType = "apk"
|
||||
case constant.Debian, constant.Raspbian, constant.Ubuntu:
|
||||
purlType = "deb"
|
||||
case constant.FreeBSD:
|
||||
purlType = "pkg"
|
||||
case constant.Windows:
|
||||
purlType = "win"
|
||||
case constant.ServerTypePseudo:
|
||||
purlType = "pseudo"
|
||||
default:
|
||||
purlType = "unknown"
|
||||
}
|
||||
|
||||
version := packVersion
|
||||
if packRelease != "" {
|
||||
version = fmt.Sprintf("%s-%s", packVersion, packRelease)
|
||||
}
|
||||
|
||||
var qualifiers packageurl.Qualifiers
|
||||
if osVersion != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "distro",
|
||||
Value: osVersion,
|
||||
})
|
||||
}
|
||||
if packArch != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "arch",
|
||||
Value: packArch,
|
||||
})
|
||||
}
|
||||
if packRepository != "" {
|
||||
qualifiers = append(qualifiers, packageurl.Qualifier{
|
||||
Key: "repo",
|
||||
Value: packRepository,
|
||||
})
|
||||
}
|
||||
|
||||
return packageurl.NewPackageURL(purlType, osFamily, packName, version, qualifiers, "").ToString()
|
||||
}
|
||||
|
||||
func cdxVulnerabilities(result models.ScanResult, ospkgToPURL map[string]string, libpkgToPURL, ghpkgToPURL map[string]map[string]string, wppkgToPURL map[string]string) *[]cdx.Vulnerability {
|
||||
vulnerabilities := make([]cdx.Vulnerability, 0, len(result.ScannedCves))
|
||||
for _, cve := range result.ScannedCves {
|
||||
vulnerabilities = append(vulnerabilities, cdx.Vulnerability{
|
||||
ID: cve.CveID,
|
||||
Ratings: cdxRatings(cve.CveContents),
|
||||
CWEs: cdxCWEs(cve.CveContents),
|
||||
Description: cdxDescription(cve.CveContents),
|
||||
Advisories: cdxAdvisories(cve.CveContents),
|
||||
Affects: cdxAffects(cve, ospkgToPURL, libpkgToPURL, ghpkgToPURL, wppkgToPURL),
|
||||
})
|
||||
}
|
||||
return &vulnerabilities
|
||||
}
|
||||
|
||||
func cdxRatings(cveContents models.CveContents) *[]cdx.VulnerabilityRating {
|
||||
var ratings []cdx.VulnerabilityRating
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
if content.Cvss2Score != 0 || content.Cvss2Vector != "" || content.Cvss2Severity != "" {
|
||||
ratings = append(ratings, cdxCVSS2Rating(string(content.Type), content.Cvss2Vector, content.Cvss2Score, content.Cvss2Severity))
|
||||
}
|
||||
if content.Cvss3Score != 0 || content.Cvss3Vector != "" || content.Cvss3Severity != "" {
|
||||
ratings = append(ratings, cdxCVSS3Rating(string(content.Type), content.Cvss3Vector, content.Cvss3Score, content.Cvss3Severity))
|
||||
}
|
||||
}
|
||||
}
|
||||
return &ratings
|
||||
}
|
||||
|
||||
func cdxCVSS2Rating(source, vector string, score float64, severity string) cdx.VulnerabilityRating {
|
||||
r := cdx.VulnerabilityRating{
|
||||
Source: &cdx.Source{Name: source},
|
||||
Method: cdx.ScoringMethodCVSSv2,
|
||||
Vector: vector,
|
||||
}
|
||||
if score != 0 {
|
||||
r.Score = &score
|
||||
}
|
||||
switch strings.ToLower(severity) {
|
||||
case "high":
|
||||
r.Severity = cdx.SeverityHigh
|
||||
case "medium":
|
||||
r.Severity = cdx.SeverityMedium
|
||||
case "low":
|
||||
r.Severity = cdx.SeverityLow
|
||||
default:
|
||||
r.Severity = cdx.SeverityUnknown
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func cdxCVSS3Rating(source, vector string, score float64, severity string) cdx.VulnerabilityRating {
|
||||
r := cdx.VulnerabilityRating{
|
||||
Source: &cdx.Source{Name: source},
|
||||
Method: cdx.ScoringMethodCVSSv3,
|
||||
Vector: vector,
|
||||
}
|
||||
if strings.HasPrefix(vector, "CVSS:3.1") {
|
||||
r.Method = cdx.ScoringMethodCVSSv31
|
||||
}
|
||||
if score != 0 {
|
||||
r.Score = &score
|
||||
}
|
||||
switch strings.ToLower(severity) {
|
||||
case "critical":
|
||||
r.Severity = cdx.SeverityCritical
|
||||
case "high":
|
||||
r.Severity = cdx.SeverityHigh
|
||||
case "medium":
|
||||
r.Severity = cdx.SeverityMedium
|
||||
case "low":
|
||||
r.Severity = cdx.SeverityLow
|
||||
case "none":
|
||||
r.Severity = cdx.SeverityNone
|
||||
default:
|
||||
r.Severity = cdx.SeverityUnknown
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
func cdxAffects(cve models.VulnInfo, ospkgToPURL map[string]string, libpkgToPURL, ghpkgToPURL map[string]map[string]string, wppkgToPURL map[string]string) *[]cdx.Affects {
|
||||
affects := make([]cdx.Affects, 0, len(cve.AffectedPackages)+len(cve.CpeURIs)+len(cve.LibraryFixedIns)+len(cve.WpPackageFixStats))
|
||||
|
||||
for _, p := range cve.AffectedPackages {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: ospkgToPURL[p.Name],
|
||||
})
|
||||
}
|
||||
for _, cpe := range cve.CpeURIs {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: cpe,
|
||||
})
|
||||
}
|
||||
for _, lib := range cve.LibraryFixedIns {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: libpkgToPURL[lib.Path][lib.Name],
|
||||
})
|
||||
}
|
||||
for _, alert := range cve.GitHubSecurityAlerts {
|
||||
// TODO: not in dependency graph
|
||||
if purl, ok := ghpkgToPURL[alert.RepoURLManifestPath()][alert.Package.Name]; ok {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: purl,
|
||||
})
|
||||
}
|
||||
}
|
||||
for _, wppack := range cve.WpPackageFixStats {
|
||||
affects = append(affects, cdx.Affects{
|
||||
Ref: wppkgToPURL[wppack.Name],
|
||||
})
|
||||
}
|
||||
|
||||
return &affects
|
||||
}
|
||||
|
||||
func cdxCWEs(cveContents models.CveContents) *[]int {
|
||||
m := map[int]struct{}{}
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
for _, cweID := range content.CweIDs {
|
||||
if !strings.HasPrefix(cweID, "CWE-") {
|
||||
continue
|
||||
}
|
||||
i, err := strconv.Atoi(strings.TrimPrefix(cweID, "CWE-"))
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
m[i] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
cweIDs := maps.Keys(m)
|
||||
return &cweIDs
|
||||
}
|
||||
|
||||
func cdxDescription(cveContents models.CveContents) string {
|
||||
if contents, ok := cveContents[models.Nvd]; ok {
|
||||
return contents[0].Summary
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func cdxAdvisories(cveContents models.CveContents) *[]cdx.Advisory {
|
||||
urls := map[string]struct{}{}
|
||||
for _, contents := range cveContents {
|
||||
for _, content := range contents {
|
||||
if content.SourceLink != "" {
|
||||
urls[content.SourceLink] = struct{}{}
|
||||
}
|
||||
for _, r := range content.References {
|
||||
urls[r.Link] = struct{}{}
|
||||
}
|
||||
}
|
||||
}
|
||||
advisories := make([]cdx.Advisory, 0, len(urls))
|
||||
for u := range urls {
|
||||
advisories = append(advisories, cdx.Advisory{
|
||||
URL: u,
|
||||
})
|
||||
}
|
||||
return &advisories
|
||||
}
|
||||
@@ -33,12 +33,13 @@ type message struct {
|
||||
Attachments []slack.Attachment `json:"attachments"`
|
||||
}
|
||||
|
||||
// Write results to Slack
|
||||
func (w SlackWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
|
||||
channel := w.Cnf.Channel
|
||||
for _, r := range rs {
|
||||
w.lang, w.osFamily = r.Lang, r.Family
|
||||
if channel == "${servername}" {
|
||||
channel := w.Cnf.Channel
|
||||
if w.Cnf.Channel == "${servername}" {
|
||||
channel = fmt.Sprintf("#%s", r.ServerName)
|
||||
}
|
||||
|
||||
@@ -195,7 +196,7 @@ func (w SlackWriter) toSlackAttachments(r models.ScanResult) (attaches []slack.A
|
||||
candidate = append(candidate, "?")
|
||||
}
|
||||
for _, n := range vinfo.GitHubSecurityAlerts {
|
||||
installed = append(installed, n.PackageName)
|
||||
installed = append(installed, n.RepoURLPackageName())
|
||||
candidate = append(candidate, "?")
|
||||
}
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ func (w StdoutWriter) WriteScanSummary(rs ...models.ScanResult) {
|
||||
fmt.Printf("%s\n", formatScanSummary(rs...))
|
||||
}
|
||||
|
||||
// Write results to stdout
|
||||
func (w StdoutWriter) Write(rs ...models.ScanResult) error {
|
||||
if w.FormatOneLineText {
|
||||
fmt.Print("\n\n")
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
//go:build !windows
|
||||
|
||||
package reporter
|
||||
|
||||
import (
|
||||
@@ -16,6 +18,7 @@ type SyslogWriter struct {
|
||||
Cnf config.SyslogConf
|
||||
}
|
||||
|
||||
// Write results to syslog
|
||||
func (w SyslogWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
facility, _ := w.Cnf.GetFacility()
|
||||
severity, _ := w.Cnf.GetSeverity()
|
||||
|
||||
@@ -21,6 +21,7 @@ type TelegramWriter struct {
|
||||
Proxy string
|
||||
}
|
||||
|
||||
// Write results to Telegram
|
||||
func (w TelegramWriter) Write(rs ...models.ScanResult) (err error) {
|
||||
for _, r := range rs {
|
||||
msgs := []string{fmt.Sprintf("*%s*\n%s\n%s\n%s",
|
||||
@@ -74,14 +75,14 @@ func (w TelegramWriter) sendMessage(chatID, token, message string) error {
|
||||
return err
|
||||
}
|
||||
resp, err := client.Do(req)
|
||||
if checkResponse(resp) != nil && err != nil {
|
||||
if w.checkResponse(resp) != nil && err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkResponse(r *http.Response) error {
|
||||
func (w TelegramWriter) checkResponse(r *http.Response) error {
|
||||
if c := r.StatusCode; 200 <= c && c <= 299 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -81,24 +80,23 @@ func loadOneServerScanResult(jsonFile string) (*models.ScanResult, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// jsonDirPattern is file name pattern of JSON directory
|
||||
// 2016-11-16T10:43:28+09:00
|
||||
// 2016-11-16T10:43:28Z
|
||||
var jsonDirPattern = regexp.MustCompile(
|
||||
`^\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}(?:Z|[+-]\d{2}:\d{2})$`)
|
||||
|
||||
// ListValidJSONDirs returns valid json directory as array
|
||||
// Returned array is sorted so that recent directories are at the head
|
||||
func ListValidJSONDirs(resultsDir string) (dirs []string, err error) {
|
||||
var dirInfo []fs.DirEntry
|
||||
if dirInfo, err = os.ReadDir(resultsDir); err != nil {
|
||||
err = xerrors.Errorf("Failed to read %s: %w", resultsDir, err)
|
||||
return
|
||||
dirInfo, err := os.ReadDir(resultsDir)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to read %s: %w", resultsDir, err)
|
||||
}
|
||||
for _, d := range dirInfo {
|
||||
if d.IsDir() && jsonDirPattern.MatchString(d.Name()) {
|
||||
jsonDir := filepath.Join(resultsDir, d.Name())
|
||||
dirs = append(dirs, jsonDir)
|
||||
if !d.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, layout := range []string{"2006-01-02T15:04:05Z", "2006-01-02T15:04:05-07:00", "2006-01-02T15-04-05-0700"} {
|
||||
if _, err := time.Parse(layout, d.Name()); err == nil {
|
||||
dirs = append(dirs, filepath.Join(resultsDir, d.Name()))
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
sort.Slice(dirs, func(i, j int) bool {
|
||||
@@ -258,9 +256,13 @@ No CVE-IDs are found in updatable packages.
|
||||
// v2max := vinfo.MaxCvss2Score().Value.Score
|
||||
// v3max := vinfo.MaxCvss3Score().Value.Score
|
||||
|
||||
packnames := strings.Join(vinfo.AffectedPackages.Names(), ", ")
|
||||
// packname := vinfo.AffectedPackages.FormatTuiSummary()
|
||||
// packname += strings.Join(vinfo.CpeURIs, ", ")
|
||||
pkgNames := vinfo.AffectedPackages.Names()
|
||||
pkgNames = append(pkgNames, vinfo.CpeURIs...)
|
||||
pkgNames = append(pkgNames, vinfo.GitHubSecurityAlerts.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.WpPackageFixStats.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.LibraryFixedIns.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.WindowsKBFixedIns...)
|
||||
packnames := strings.Join(pkgNames, ", ")
|
||||
|
||||
exploits := ""
|
||||
if 0 < len(vinfo.Exploits) || 0 < len(vinfo.Metasploits) {
|
||||
@@ -404,7 +406,7 @@ No CVE-IDs are found in updatable packages.
|
||||
}
|
||||
|
||||
for _, alert := range vuln.GitHubSecurityAlerts {
|
||||
data = append(data, []string{"GitHub", alert.PackageName})
|
||||
data = append(data, []string{"GitHub", alert.RepoURLPackageName()})
|
||||
}
|
||||
|
||||
for _, wp := range vuln.WpPackageFixStats {
|
||||
@@ -431,6 +433,10 @@ No CVE-IDs are found in updatable packages.
|
||||
}
|
||||
}
|
||||
|
||||
if len(vuln.WindowsKBFixedIns) > 0 {
|
||||
data = append(data, []string{"Windows KB", fmt.Sprintf("FixedIn: %s", strings.Join(vuln.WindowsKBFixedIns, ", "))})
|
||||
}
|
||||
|
||||
for _, confidence := range vuln.Confidences {
|
||||
data = append(data, []string{"Confidence", confidence.String()})
|
||||
}
|
||||
@@ -730,11 +736,7 @@ func getMinusDiffCves(previous, current models.ScanResult) models.VulnInfos {
|
||||
}
|
||||
|
||||
func isCveInfoUpdated(cveID string, previous, current models.ScanResult) bool {
|
||||
cTypes := []models.CveContentType{
|
||||
models.Nvd,
|
||||
models.Jvn,
|
||||
models.NewCveContentType(current.Family),
|
||||
}
|
||||
cTypes := append([]models.CveContentType{models.Nvd, models.Jvn}, models.GetCveContentTypes(current.Family)...)
|
||||
|
||||
prevLastModifieds := map[models.CveContentType][]time.Time{}
|
||||
preVinfo, ok := previous.ScannedCves[cveID]
|
||||
|
||||
@@ -103,6 +103,9 @@ func writeToFile(cnf config.Config, path string) error {
|
||||
if cnf.Default.WordPress != nil && cnf.Default.WordPress.IsZero() {
|
||||
cnf.Default.WordPress = nil
|
||||
}
|
||||
if cnf.Default.PortScan != nil && cnf.Default.PortScan.IsZero() {
|
||||
cnf.Default.PortScan = nil
|
||||
}
|
||||
|
||||
c := struct {
|
||||
Saas *config.SaasConf `toml:"saas"`
|
||||
@@ -198,5 +201,11 @@ func cleanForTOMLEncoding(server config.ServerInfo, def config.ServerInfo) confi
|
||||
}
|
||||
}
|
||||
|
||||
if server.PortScan != nil {
|
||||
if server.PortScan.IsZero() || reflect.DeepEqual(server.PortScan, def.PortScan) {
|
||||
server.PortScan = nil
|
||||
}
|
||||
}
|
||||
|
||||
return server
|
||||
}
|
||||
|
||||
@@ -1,10 +1,14 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/xerrors"
|
||||
)
|
||||
|
||||
// inherit OsTypeInterface
|
||||
@@ -50,12 +54,26 @@ func (o *amazon) depsFast() []string {
|
||||
return []string{}
|
||||
}
|
||||
// repoquery
|
||||
return []string{"yum-utils"}
|
||||
switch s := strings.Fields(o.getDistro().Release)[0]; s {
|
||||
case "1", "2":
|
||||
return []string{"yum-utils"}
|
||||
default:
|
||||
if _, err := time.Parse("2006.01", s); err == nil {
|
||||
return []string{"yum-utils"}
|
||||
}
|
||||
return []string{"dnf-utils"}
|
||||
}
|
||||
}
|
||||
|
||||
func (o *amazon) depsFastRoot() []string {
|
||||
return []string{
|
||||
"yum-utils",
|
||||
switch s := strings.Fields(o.getDistro().Release)[0]; s {
|
||||
case "1", "2":
|
||||
return []string{"yum-utils"}
|
||||
default:
|
||||
if _, err := time.Parse("2006.01", s); err == nil {
|
||||
return []string{"yum-utils"}
|
||||
}
|
||||
return []string{"dnf-utils"}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
357
scanner/base.go
357
scanner/base.go
@@ -28,10 +28,12 @@ import (
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
// Import library scanner
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/c/conan"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/deps"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/dotnet/nuget"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/binary"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/golang/mod"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/gradle"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/jar"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/java/pom"
|
||||
_ "github.com/aquasecurity/trivy/pkg/fanal/analyzer/language/nodejs/npm"
|
||||
@@ -58,6 +60,7 @@ type base struct {
|
||||
osPackages
|
||||
LibraryScanners []models.LibraryScanner
|
||||
WordPress models.WordPressPackages
|
||||
windowsKB *models.WindowsKB
|
||||
|
||||
log logging.Logger
|
||||
errs []error
|
||||
@@ -136,7 +139,6 @@ func (l *base) runningKernel() (release, version string, err error) {
|
||||
version = ss[6]
|
||||
}
|
||||
if _, err := debver.NewVersion(version); err != nil {
|
||||
l.log.Warnf("kernel running version is invalid. skip kernel vulnerability detection. actual kernel version: %s, err: %s", version, err)
|
||||
version = ""
|
||||
}
|
||||
}
|
||||
@@ -361,7 +363,6 @@ func (l *base) detectPlatform() {
|
||||
|
||||
//TODO Azure, GCP...
|
||||
l.setPlatform(models.Platform{Name: "other"})
|
||||
return
|
||||
}
|
||||
|
||||
var dsFingerPrintPrefix = "AgentStatus.agentCertHash: "
|
||||
@@ -505,6 +506,7 @@ func (l *base) convertToModel() models.ScanResult {
|
||||
EnabledDnfModules: l.EnabledDnfModules,
|
||||
WordPressPackages: l.WordPress,
|
||||
LibraryScanners: l.LibraryScanners,
|
||||
WindowsKB: l.windowsKB,
|
||||
Optional: l.ServerInfo.Optional,
|
||||
Errors: errs,
|
||||
Warnings: warns,
|
||||
@@ -582,12 +584,6 @@ func (l *base) parseSystemctlStatus(stdout string) string {
|
||||
return ss[1]
|
||||
}
|
||||
|
||||
// LibFile : library file content
|
||||
type LibFile struct {
|
||||
Contents []byte
|
||||
Filemode os.FileMode
|
||||
}
|
||||
|
||||
func (l *base) scanLibraries() (err error) {
|
||||
if len(l.LibraryScanners) != 0 {
|
||||
return nil
|
||||
@@ -598,9 +594,9 @@ func (l *base) scanLibraries() (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
l.log.Info("Scanning Lockfile...")
|
||||
l.log.Info("Scanning Language-specific Packages...")
|
||||
|
||||
libFilemap := map[string]LibFile{}
|
||||
found := map[string]bool{}
|
||||
detectFiles := l.ServerInfo.Lockfiles
|
||||
|
||||
priv := noSudo
|
||||
@@ -615,9 +611,17 @@ func (l *base) scanLibraries() (err error) {
|
||||
findopt += fmt.Sprintf("-name %q -o ", filename)
|
||||
}
|
||||
|
||||
dir := "/"
|
||||
if len(l.ServerInfo.FindLockDirs) != 0 {
|
||||
dir = strings.Join(l.ServerInfo.FindLockDirs, " ")
|
||||
} else {
|
||||
l.log.Infof("It's recommended to specify FindLockDirs in config.toml. If FindLockDirs is not specified, all directories under / will be searched, which may increase CPU load")
|
||||
}
|
||||
l.log.Infof("Finding files under %s", dir)
|
||||
|
||||
// delete last "-o "
|
||||
// find / -type f -and \( -name "package-lock.json" -o -name "yarn.lock" ... \) 2>&1 | grep -v "find: "
|
||||
cmd := fmt.Sprintf(`find / -type f -and \( ` + findopt[:len(findopt)-3] + ` \) 2>&1 | grep -v "find: "`)
|
||||
cmd := fmt.Sprintf(`find %s -type f -and \( `+findopt[:len(findopt)-3]+` \) 2>&1 | grep -v "find: "`, dir)
|
||||
r := exec(l.ServerInfo, cmd, priv)
|
||||
if r.ExitStatus != 0 && r.ExitStatus != 1 {
|
||||
return xerrors.Errorf("Failed to find lock files")
|
||||
@@ -635,154 +639,167 @@ func (l *base) scanLibraries() (err error) {
|
||||
}
|
||||
|
||||
// skip already exist
|
||||
if _, ok := libFilemap[path]; ok {
|
||||
if _, ok := found[path]; ok {
|
||||
continue
|
||||
}
|
||||
|
||||
var f LibFile
|
||||
var contents []byte
|
||||
var filemode os.FileMode
|
||||
|
||||
switch l.Distro.Family {
|
||||
case constant.ServerTypePseudo:
|
||||
fileinfo, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to get target file info. err: %w, filepath: %s", err, path)
|
||||
l.log.Warnf("Failed to get target file info. err: %s, filepath: %s", err, path)
|
||||
continue
|
||||
}
|
||||
f.Filemode = fileinfo.Mode().Perm()
|
||||
f.Contents, err = os.ReadFile(path)
|
||||
filemode = fileinfo.Mode().Perm()
|
||||
contents, err = os.ReadFile(path)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to read target file contents. err: %w, filepath: %s", err, path)
|
||||
l.log.Warnf("Failed to read target file contents. err: %s, filepath: %s", err, path)
|
||||
continue
|
||||
}
|
||||
default:
|
||||
l.log.Debugf("Analyzing file: %s", path)
|
||||
cmd := fmt.Sprintf(`stat -c "%%a" %s`, path)
|
||||
r := exec(l.ServerInfo, cmd, priv)
|
||||
r := exec(l.ServerInfo, cmd, priv, logging.NewIODiscardLogger())
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to get target file permission: %s, filepath: %s", r, path)
|
||||
l.log.Warnf("Failed to get target file permission: %s, filepath: %s", r, path)
|
||||
continue
|
||||
}
|
||||
permStr := fmt.Sprintf("0%s", strings.ReplaceAll(r.Stdout, "\n", ""))
|
||||
perm, err := strconv.ParseUint(permStr, 8, 32)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to parse permission string. err: %w, permission string: %s", err, permStr)
|
||||
l.log.Warnf("Failed to parse permission string. err: %s, permission string: %s", err, permStr)
|
||||
continue
|
||||
}
|
||||
f.Filemode = os.FileMode(perm)
|
||||
filemode = os.FileMode(perm)
|
||||
|
||||
cmd = fmt.Sprintf("cat %s", path)
|
||||
r = exec(l.ServerInfo, cmd, priv)
|
||||
r = exec(l.ServerInfo, cmd, priv, logging.NewIODiscardLogger())
|
||||
if !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to get target file contents: %s, filepath: %s", r, path)
|
||||
l.log.Warnf("Failed to get target file contents: %s, filepath: %s", r, path)
|
||||
continue
|
||||
}
|
||||
f.Contents = []byte(r.Stdout)
|
||||
contents = []byte(r.Stdout)
|
||||
}
|
||||
libFilemap[path] = f
|
||||
found[path] = true
|
||||
var libraryScanners []models.LibraryScanner
|
||||
if libraryScanners, err = AnalyzeLibrary(context.Background(), path, contents, filemode, l.ServerInfo.Mode.IsOffline()); err != nil {
|
||||
return err
|
||||
}
|
||||
l.LibraryScanners = append(l.LibraryScanners, libraryScanners...)
|
||||
}
|
||||
|
||||
var libraryScanners []models.LibraryScanner
|
||||
if libraryScanners, err = AnalyzeLibraries(context.Background(), libFilemap, l.ServerInfo.Mode.IsOffline()); err != nil {
|
||||
return err
|
||||
}
|
||||
l.LibraryScanners = append(l.LibraryScanners, libraryScanners...)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AnalyzeLibraries : detects libs defined in lockfile
|
||||
func AnalyzeLibraries(ctx context.Context, libFilemap map[string]LibFile, isOffline bool) (libraryScanners []models.LibraryScanner, err error) {
|
||||
// https://github.com/aquasecurity/trivy/blob/84677903a6fa1b707a32d0e8b2bffc23dde52afa/pkg/fanal/analyzer/const.go
|
||||
disabledAnalyzers := []analyzer.Type{
|
||||
// ======
|
||||
// OS
|
||||
// ======
|
||||
analyzer.TypeOSRelease,
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeAmazon,
|
||||
analyzer.TypeCBLMariner,
|
||||
analyzer.TypeDebian,
|
||||
analyzer.TypePhoton,
|
||||
analyzer.TypeCentOS,
|
||||
analyzer.TypeRocky,
|
||||
analyzer.TypeAlma,
|
||||
analyzer.TypeFedora,
|
||||
analyzer.TypeOracle,
|
||||
analyzer.TypeRedHatBase,
|
||||
analyzer.TypeSUSE,
|
||||
analyzer.TypeUbuntu,
|
||||
|
||||
// OS Package
|
||||
analyzer.TypeApk,
|
||||
analyzer.TypeDpkg,
|
||||
analyzer.TypeDpkgLicense,
|
||||
analyzer.TypeRpm,
|
||||
analyzer.TypeRpmqa,
|
||||
|
||||
// OS Package Repository
|
||||
analyzer.TypeApkRepo,
|
||||
|
||||
// ============
|
||||
// Image Config
|
||||
// ============
|
||||
analyzer.TypeApkCommand,
|
||||
|
||||
// =================
|
||||
// Structured Config
|
||||
// =================
|
||||
analyzer.TypeYaml,
|
||||
analyzer.TypeJSON,
|
||||
analyzer.TypeDockerfile,
|
||||
analyzer.TypeTerraform,
|
||||
analyzer.TypeCloudFormation,
|
||||
analyzer.TypeHelm,
|
||||
|
||||
// ========
|
||||
// License
|
||||
// ========
|
||||
analyzer.TypeLicenseFile,
|
||||
|
||||
// ========
|
||||
// Secrets
|
||||
// ========
|
||||
analyzer.TypeSecret,
|
||||
|
||||
// =======
|
||||
// Red Hat
|
||||
// =======
|
||||
analyzer.TypeRedHatContentManifestType,
|
||||
analyzer.TypeRedHatDockerfileType,
|
||||
// AnalyzeLibrary : detects library defined in artifact such as lockfile or jar
|
||||
func AnalyzeLibrary(ctx context.Context, path string, contents []byte, filemode os.FileMode, isOffline bool) (libraryScanners []models.LibraryScanner, err error) {
|
||||
anal, err := analyzer.NewAnalyzerGroup(analyzer.AnalyzerOptions{
|
||||
Group: analyzer.GroupBuiltin,
|
||||
DisabledAnalyzers: disabledAnalyzers,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to new analyzer group. err: %w", err)
|
||||
}
|
||||
anal := analyzer.NewAnalyzerGroup(analyzer.GroupBuiltin, disabledAnalyzers)
|
||||
|
||||
for path, f := range libFilemap {
|
||||
var wg sync.WaitGroup
|
||||
result := new(analyzer.AnalysisResult)
|
||||
if err := anal.AnalyzeFile(
|
||||
ctx,
|
||||
&wg,
|
||||
semaphore.NewWeighted(1),
|
||||
result,
|
||||
"",
|
||||
path,
|
||||
&DummyFileInfo{size: int64(len(f.Contents)), filemode: f.Filemode},
|
||||
func() (dio.ReadSeekCloserAt, error) { return dio.NopCloser(bytes.NewReader(f.Contents)), nil },
|
||||
nil,
|
||||
analyzer.AnalysisOptions{Offline: isOffline},
|
||||
); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get libs. err: %w", err)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
libscan, err := convertLibWithScanner(result.Applications)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to convert libs. err: %w", err)
|
||||
}
|
||||
libraryScanners = append(libraryScanners, libscan...)
|
||||
var wg sync.WaitGroup
|
||||
result := new(analyzer.AnalysisResult)
|
||||
if err := anal.AnalyzeFile(
|
||||
ctx,
|
||||
&wg,
|
||||
semaphore.NewWeighted(1),
|
||||
result,
|
||||
"",
|
||||
path,
|
||||
&DummyFileInfo{name: filepath.Base(path), size: int64(len(contents)), filemode: filemode},
|
||||
func() (dio.ReadSeekCloserAt, error) { return dio.NopCloser(bytes.NewReader(contents)), nil },
|
||||
nil,
|
||||
analyzer.AnalysisOptions{Offline: isOffline},
|
||||
); err != nil {
|
||||
return nil, xerrors.Errorf("Failed to get libs. err: %w", err)
|
||||
}
|
||||
wg.Wait()
|
||||
|
||||
libscan, err := convertLibWithScanner(result.Applications)
|
||||
if err != nil {
|
||||
return nil, xerrors.Errorf("Failed to convert libs. err: %w", err)
|
||||
}
|
||||
libraryScanners = append(libraryScanners, libscan...)
|
||||
return libraryScanners, nil
|
||||
}
|
||||
|
||||
// https://github.com/aquasecurity/trivy/blob/84677903a6fa1b707a32d0e8b2bffc23dde52afa/pkg/fanal/analyzer/const.go
|
||||
var disabledAnalyzers = []analyzer.Type{
|
||||
// ======
|
||||
// OS
|
||||
// ======
|
||||
analyzer.TypeOSRelease,
|
||||
analyzer.TypeAlpine,
|
||||
analyzer.TypeAmazon,
|
||||
analyzer.TypeCBLMariner,
|
||||
analyzer.TypeDebian,
|
||||
analyzer.TypePhoton,
|
||||
analyzer.TypeCentOS,
|
||||
analyzer.TypeRocky,
|
||||
analyzer.TypeAlma,
|
||||
analyzer.TypeFedora,
|
||||
analyzer.TypeOracle,
|
||||
analyzer.TypeRedHatBase,
|
||||
analyzer.TypeSUSE,
|
||||
analyzer.TypeUbuntu,
|
||||
|
||||
// OS Package
|
||||
analyzer.TypeApk,
|
||||
analyzer.TypeDpkg,
|
||||
analyzer.TypeDpkgLicense,
|
||||
analyzer.TypeRpm,
|
||||
analyzer.TypeRpmqa,
|
||||
|
||||
// OS Package Repository
|
||||
analyzer.TypeApkRepo,
|
||||
|
||||
// ============
|
||||
// Image Config
|
||||
// ============
|
||||
analyzer.TypeApkCommand,
|
||||
|
||||
// =================
|
||||
// Structured Config
|
||||
// =================
|
||||
analyzer.TypeYaml,
|
||||
analyzer.TypeJSON,
|
||||
analyzer.TypeDockerfile,
|
||||
analyzer.TypeTerraform,
|
||||
analyzer.TypeCloudFormation,
|
||||
analyzer.TypeHelm,
|
||||
|
||||
// ========
|
||||
// License
|
||||
// ========
|
||||
analyzer.TypeLicenseFile,
|
||||
|
||||
// ========
|
||||
// Secrets
|
||||
// ========
|
||||
analyzer.TypeSecret,
|
||||
|
||||
// =======
|
||||
// Red Hat
|
||||
// =======
|
||||
analyzer.TypeRedHatContentManifestType,
|
||||
analyzer.TypeRedHatDockerfileType,
|
||||
}
|
||||
|
||||
// DummyFileInfo is a dummy struct for libscan
|
||||
type DummyFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
filemode os.FileMode
|
||||
}
|
||||
|
||||
// Name is
|
||||
func (d *DummyFileInfo) Name() string { return "dummy" }
|
||||
func (d *DummyFileInfo) Name() string { return d.name }
|
||||
|
||||
// Size is
|
||||
func (d *DummyFileInfo) Size() int64 { return d.size }
|
||||
@@ -799,20 +816,48 @@ func (d *DummyFileInfo) IsDir() bool { return false }
|
||||
// Sys is
|
||||
func (d *DummyFileInfo) Sys() interface{} { return nil }
|
||||
|
||||
func (l *base) buildWpCliCmd(wpCliArgs string, suppressStderr bool, shell string) string {
|
||||
cmd := fmt.Sprintf("%s %s --path=%s", l.ServerInfo.WordPress.CmdPath, wpCliArgs, l.ServerInfo.WordPress.DocRoot)
|
||||
if !l.ServerInfo.WordPress.NoSudo {
|
||||
cmd = fmt.Sprintf("sudo -u %s -i -- %s --allow-root", l.ServerInfo.WordPress.OSUser, cmd)
|
||||
} else if l.ServerInfo.User != l.ServerInfo.WordPress.OSUser {
|
||||
cmd = fmt.Sprintf("su %s -c '%s'", l.ServerInfo.WordPress.OSUser, cmd)
|
||||
}
|
||||
|
||||
if suppressStderr {
|
||||
switch shell {
|
||||
case "csh", "tcsh":
|
||||
cmd = fmt.Sprintf("( %s > /dev/tty ) >& /dev/null", cmd)
|
||||
default:
|
||||
cmd = fmt.Sprintf("%s 2>/dev/null", cmd)
|
||||
}
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
func (l *base) scanWordPress() error {
|
||||
if l.ServerInfo.WordPress.IsZero() || l.ServerInfo.Type == constant.ServerTypePseudo {
|
||||
return nil
|
||||
}
|
||||
|
||||
shell, err := l.detectShell()
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to detect shell. err: %w", err)
|
||||
}
|
||||
|
||||
l.log.Info("Scanning WordPress...")
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s core version --path=%s --allow-root",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
if l.ServerInfo.WordPress.NoSudo && l.ServerInfo.User != l.ServerInfo.WordPress.OSUser {
|
||||
if r := l.exec(fmt.Sprintf("timeout 2 su %s -c exit", l.ServerInfo.WordPress.OSUser), noSudo); !r.isSuccess() {
|
||||
return xerrors.New("Failed to switch user without password. err: please configure to switch users without password")
|
||||
}
|
||||
}
|
||||
|
||||
cmd := l.buildWpCliCmd("core version", false, shell)
|
||||
if r := exec(l.ServerInfo, cmd, noSudo); !r.isSuccess() {
|
||||
return xerrors.Errorf("Failed to exec `%s`. Check the OS user, command path of wp-cli, DocRoot and permission: %#v", cmd, l.ServerInfo.WordPress)
|
||||
}
|
||||
|
||||
wp, err := l.detectWordPress()
|
||||
wp, err := l.detectWordPress(shell)
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to scan wordpress: %w", err)
|
||||
}
|
||||
@@ -820,18 +865,44 @@ func (l *base) scanWordPress() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (l *base) detectWordPress() (*models.WordPressPackages, error) {
|
||||
ver, err := l.detectWpCore()
|
||||
func (l *base) detectShell() (string, error) {
|
||||
if r := l.exec("printenv SHELL", noSudo); r.isSuccess() {
|
||||
if t := strings.TrimSpace(r.Stdout); t != "" {
|
||||
return filepath.Base(t), nil
|
||||
}
|
||||
}
|
||||
|
||||
if r := l.exec(fmt.Sprintf(`grep "^%s" /etc/passwd | awk -F: '/%s/ { print $7 }'`, l.ServerInfo.User, l.ServerInfo.User), noSudo); r.isSuccess() {
|
||||
if t := strings.TrimSpace(r.Stdout); t != "" {
|
||||
return filepath.Base(t), nil
|
||||
}
|
||||
}
|
||||
|
||||
if isLocalExec(l.ServerInfo.Port, l.ServerInfo.Host) {
|
||||
if r := l.exec("ps -p $$ | tail +2 | awk '{print $NF}'", noSudo); r.isSuccess() {
|
||||
return strings.TrimSpace(r.Stdout), nil
|
||||
}
|
||||
|
||||
if r := l.exec("ps -p %self | tail +2 | awk '{print $NF}'", noSudo); r.isSuccess() {
|
||||
return strings.TrimSpace(r.Stdout), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", xerrors.New("shell cannot be determined")
|
||||
}
|
||||
|
||||
func (l *base) detectWordPress(shell string) (*models.WordPressPackages, error) {
|
||||
ver, err := l.detectWpCore(shell)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
themes, err := l.detectWpThemes()
|
||||
themes, err := l.detectWpThemes(shell)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
plugins, err := l.detectWpPlugins()
|
||||
plugins, err := l.detectWpPlugins(shell)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -848,11 +919,8 @@ func (l *base) detectWordPress() (*models.WordPressPackages, error) {
|
||||
return &pkgs, nil
|
||||
}
|
||||
|
||||
func (l *base) detectWpCore() (string, error) {
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s core version --path=%s --allow-root 2>/dev/null",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
func (l *base) detectWpCore(shell string) (string, error) {
|
||||
cmd := l.buildWpCliCmd("core version", true, shell)
|
||||
|
||||
r := exec(l.ServerInfo, cmd, noSudo)
|
||||
if !r.isSuccess() {
|
||||
@@ -861,11 +929,8 @@ func (l *base) detectWpCore() (string, error) {
|
||||
return strings.TrimSpace(r.Stdout), nil
|
||||
}
|
||||
|
||||
func (l *base) detectWpThemes() ([]models.WpPackage, error) {
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s theme list --path=%s --format=json --allow-root 2>/dev/null",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
func (l *base) detectWpThemes(shell string) ([]models.WpPackage, error) {
|
||||
cmd := l.buildWpCliCmd("theme list --format=json", true, shell)
|
||||
|
||||
var themes []models.WpPackage
|
||||
r := exec(l.ServerInfo, cmd, noSudo)
|
||||
@@ -882,11 +947,8 @@ func (l *base) detectWpThemes() ([]models.WpPackage, error) {
|
||||
return themes, nil
|
||||
}
|
||||
|
||||
func (l *base) detectWpPlugins() ([]models.WpPackage, error) {
|
||||
cmd := fmt.Sprintf("sudo -u %s -i -- %s plugin list --path=%s --format=json --allow-root 2>/dev/null",
|
||||
l.ServerInfo.WordPress.OSUser,
|
||||
l.ServerInfo.WordPress.CmdPath,
|
||||
l.ServerInfo.WordPress.DocRoot)
|
||||
func (l *base) detectWpPlugins(shell string) ([]models.WpPackage, error) {
|
||||
cmd := l.buildWpCliCmd("plugin list --format=json", true, shell)
|
||||
|
||||
var plugins []models.WpPackage
|
||||
r := exec(l.ServerInfo, cmd, noSudo)
|
||||
@@ -1243,10 +1305,15 @@ func (l *base) parseGrepProcMap(stdout string) (soPaths []string) {
|
||||
return soPaths
|
||||
}
|
||||
|
||||
var errLSOFNoInternetFiles = xerrors.New("no Internet files located")
|
||||
|
||||
func (l *base) lsOfListen() (string, error) {
|
||||
cmd := `lsof -i -P -n`
|
||||
cmd := `lsof -i -P -n -V`
|
||||
r := l.exec(util.PrependProxyEnv(cmd), sudo)
|
||||
if !r.isSuccess() {
|
||||
if strings.TrimSpace(r.Stdout) == "lsof: no Internet files located" {
|
||||
return "", xerrors.Errorf("Failed to lsof: %w", errLSOFNoInternetFiles)
|
||||
}
|
||||
return "", xerrors.Errorf("Failed to lsof: %s", r)
|
||||
}
|
||||
return r.Stdout, nil
|
||||
@@ -1302,7 +1369,7 @@ func (l *base) pkgPs(getOwnerPkgs func([]string) ([]string, error)) error {
|
||||
|
||||
pidListenPorts := map[string][]models.PortStat{}
|
||||
stdout, err = l.lsOfListen()
|
||||
if err != nil {
|
||||
if err != nil && !xerrors.Is(err, errLSOFNoInternetFiles) {
|
||||
// warning only, continue scanning
|
||||
l.log.Warnf("Failed to lsof: %+v", err)
|
||||
}
|
||||
|
||||
@@ -42,16 +42,10 @@ func newDebian(c config.ServerInfo) *debian {
|
||||
|
||||
// Ubuntu, Debian, Raspbian
|
||||
// https://github.com/serverspec/specinfra/blob/master/lib/specinfra/helper/detect_os/debian.rb
|
||||
func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
func detectDebian(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
if r := exec(c, "ls /etc/debian_version", noSudo); !r.isSuccess() {
|
||||
if r.Error != nil {
|
||||
return false, nil, nil
|
||||
}
|
||||
if r.ExitStatus == 255 {
|
||||
return false, &unknown{base{ServerInfo: c}}, xerrors.Errorf("Unable to connect via SSH. Scan with -vvv option to print SSH debugging messages and check SSH settings.\n%s", r)
|
||||
}
|
||||
logging.Log.Debugf("Not Debian like Linux. %s", r)
|
||||
return false, nil, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Raspbian
|
||||
@@ -64,7 +58,7 @@ func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
if len(result) > 2 && result[0] == constant.Raspbian {
|
||||
deb := newDebian(c)
|
||||
deb.setDistro(strings.ToLower(trim(result[0])), trim(result[2]))
|
||||
return true, deb, nil
|
||||
return true, deb
|
||||
}
|
||||
}
|
||||
|
||||
@@ -84,7 +78,7 @@ func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
distro := strings.ToLower(trim(result[1]))
|
||||
deb.setDistro(distro, trim(result[2]))
|
||||
}
|
||||
return true, deb, nil
|
||||
return true, deb
|
||||
}
|
||||
|
||||
if r := exec(c, "cat /etc/lsb-release", noSudo); r.isSuccess() {
|
||||
@@ -104,7 +98,7 @@ func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
distro := strings.ToLower(trim(result[1]))
|
||||
deb.setDistro(distro, trim(result[2]))
|
||||
}
|
||||
return true, deb, nil
|
||||
return true, deb
|
||||
}
|
||||
|
||||
// Debian
|
||||
@@ -112,11 +106,11 @@ func detectDebian(c config.ServerInfo) (bool, osTypeInterface, error) {
|
||||
if r := exec(c, cmd, noSudo); r.isSuccess() {
|
||||
deb := newDebian(c)
|
||||
deb.setDistro(constant.Debian, trim(r.Stdout))
|
||||
return true, deb, nil
|
||||
return true, deb
|
||||
}
|
||||
|
||||
logging.Log.Debugf("Not Debian like Linux: %s", c.ServerName)
|
||||
return false, nil, nil
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func trim(str string) string {
|
||||
@@ -344,7 +338,7 @@ func (o *debian) rebootRequired() (bool, error) {
|
||||
}
|
||||
}
|
||||
|
||||
const dpkgQuery = `dpkg-query -W -f="\${binary:Package},\${db:Status-Abbrev},\${Version},\${Source},\${source:Version}\n"`
|
||||
const dpkgQuery = `dpkg-query -W -f="\${binary:Package},\${db:Status-Abbrev},\${Version},\${source:Package},\${source:Version}\n"`
|
||||
|
||||
func (o *debian) scanInstalledPackages() (models.Packages, models.Packages, models.SrcPackages, error) {
|
||||
updatable := models.Packages{}
|
||||
@@ -423,29 +417,19 @@ func (o *debian) parseInstalledPackages(stdout string) (models.Packages, models.
|
||||
Version: version,
|
||||
}
|
||||
|
||||
if srcName != "" && srcName != name {
|
||||
if pack, ok := srcPacks[srcName]; ok {
|
||||
pack.AddBinaryName(name)
|
||||
srcPacks[srcName] = pack
|
||||
} else {
|
||||
srcPacks[srcName] = models.SrcPackage{
|
||||
Name: srcName,
|
||||
Version: srcVersion,
|
||||
BinaryNames: []string{name},
|
||||
}
|
||||
if pack, ok := srcPacks[srcName]; ok {
|
||||
pack.AddBinaryName(name)
|
||||
srcPacks[srcName] = pack
|
||||
} else {
|
||||
srcPacks[srcName] = models.SrcPackage{
|
||||
Name: srcName,
|
||||
Version: srcVersion,
|
||||
BinaryNames: []string{name},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove "linux"
|
||||
// kernel-related packages are showed "linux" as source package name
|
||||
// If "linux" is left, oval detection will cause trouble, so delete.
|
||||
delete(srcPacks, "linux")
|
||||
// Remove duplicate
|
||||
for name := range installed {
|
||||
delete(srcPacks, name)
|
||||
}
|
||||
return installed, srcPacks, nil
|
||||
}
|
||||
|
||||
@@ -460,8 +444,20 @@ func (o *debian) parseScannedPackagesLine(line string) (name, status, version, s
|
||||
status = strings.TrimSpace(ss[1])
|
||||
version = ss[2]
|
||||
// remove version. ex: tar (1.27.1-2)
|
||||
|
||||
// Source name and version are computed from binary package name and version in dpkg.
|
||||
// Source package name:
|
||||
// https://git.dpkg.org/cgit/dpkg/dpkg.git/tree/lib/dpkg/pkg-format.c#n338
|
||||
srcName = strings.Split(ss[3], " ")[0]
|
||||
if srcName == "" {
|
||||
srcName = name
|
||||
}
|
||||
// Source package version:
|
||||
// https://git.dpkg.org/cgit/dpkg/dpkg.git/tree/lib/dpkg/pkg-show.c#n428
|
||||
srcVersion = ss[4]
|
||||
if srcVersion == "" {
|
||||
srcVersion = version
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1155,7 +1151,7 @@ func (o *debian) checkrestart() error {
|
||||
o.Packages[p.Name] = pack
|
||||
|
||||
for j, proc := range p.NeedRestartProcs {
|
||||
if proc.HasInit == false {
|
||||
if !proc.HasInit {
|
||||
continue
|
||||
}
|
||||
packs[i].NeedRestartProcs[j].InitSystem = initName
|
||||
|
||||
@@ -3,17 +3,24 @@ package scanner
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
ex "os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
"github.com/saintfish/chardet"
|
||||
"golang.org/x/text/encoding/japanese"
|
||||
"golang.org/x/text/encoding/unicode"
|
||||
"golang.org/x/text/transform"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
homedir "github.com/mitchellh/go-homedir"
|
||||
)
|
||||
|
||||
type execResult struct {
|
||||
@@ -62,7 +69,7 @@ const sudo = true
|
||||
// noSudo is Const value for normal user mode
|
||||
const noSudo = false
|
||||
|
||||
// Issue commands to the target servers in parallel via SSH or local execution. If execution fails, the server will be excluded from the target server list(servers) and added to the error server list(errServers).
|
||||
// Issue commands to the target servers in parallel via SSH or local execution. If execution fails, the server will be excluded from the target server list(servers) and added to the error server list(errServers).
|
||||
func parallelExec(fn func(osTypeInterface) error, timeoutSec ...int) {
|
||||
resChan := make(chan osTypeInterface, len(servers))
|
||||
defer close(resChan)
|
||||
@@ -128,7 +135,6 @@ func parallelExec(fn func(osTypeInterface) error, timeoutSec ...int) {
|
||||
}
|
||||
}
|
||||
servers = successes
|
||||
return
|
||||
}
|
||||
|
||||
func exec(c config.ServerInfo, cmd string, sudo bool, log ...logging.Logger) (result execResult) {
|
||||
@@ -153,15 +159,14 @@ func localExec(c config.ServerInfo, cmdstr string, sudo bool) (result execResult
|
||||
cmdstr = decorateCmd(c, cmdstr, sudo)
|
||||
var cmd *ex.Cmd
|
||||
switch c.Distro.Family {
|
||||
// case conf.FreeBSD, conf.Alpine, conf.Debian:
|
||||
// cmd = ex.Command("/bin/sh", "-c", cmdstr)
|
||||
case constant.Windows:
|
||||
cmd = ex.Command("powershell.exe", "-NoProfile", "-NonInteractive", cmdstr)
|
||||
default:
|
||||
cmd = ex.Command("/bin/sh", "-c", cmdstr)
|
||||
}
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
|
||||
if err := cmd.Run(); err != nil {
|
||||
result.Error = err
|
||||
if exitError, ok := err.(*ex.ExitError); ok {
|
||||
@@ -173,42 +178,47 @@ func localExec(c config.ServerInfo, cmdstr string, sudo bool) (result execResult
|
||||
} else {
|
||||
result.ExitStatus = 0
|
||||
}
|
||||
|
||||
result.Stdout = stdoutBuf.String()
|
||||
result.Stderr = stderrBuf.String()
|
||||
result.Stdout = toUTF8(stdoutBuf.String())
|
||||
result.Stderr = toUTF8(stderrBuf.String())
|
||||
result.Cmd = strings.Replace(cmdstr, "\n", "", -1)
|
||||
return
|
||||
}
|
||||
|
||||
func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execResult) {
|
||||
func sshExecExternal(c config.ServerInfo, cmdstr string, sudo bool) (result execResult) {
|
||||
sshBinaryPath, err := ex.LookPath("ssh")
|
||||
if err != nil {
|
||||
return execResult{Error: err}
|
||||
}
|
||||
if runtime.GOOS == "windows" {
|
||||
sshBinaryPath = "ssh.exe"
|
||||
}
|
||||
|
||||
args := []string{"-tt"}
|
||||
var args []string
|
||||
|
||||
if c.SSHConfigPath != "" {
|
||||
args = append(args, "-F", c.SSHConfigPath)
|
||||
} else {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to get HOME directory: %s", err)
|
||||
result.Stderr = msg
|
||||
result.ExitStatus = 997
|
||||
return
|
||||
}
|
||||
controlPath := filepath.Join(home, ".vuls", `controlmaster-%r-`+c.ServerName+`.%p`)
|
||||
|
||||
args = append(args,
|
||||
"-o", "StrictHostKeyChecking=yes",
|
||||
"-o", "LogLevel=quiet",
|
||||
"-o", "ConnectionAttempts=3",
|
||||
"-o", "ConnectTimeout=10",
|
||||
"-o", "ControlMaster=auto",
|
||||
"-o", fmt.Sprintf("ControlPath=%s", controlPath),
|
||||
"-o", "Controlpersist=10m",
|
||||
)
|
||||
if runtime.GOOS != "windows" {
|
||||
home, err := homedir.Dir()
|
||||
if err != nil {
|
||||
msg := fmt.Sprintf("Failed to get HOME directory: %s", err)
|
||||
result.Stderr = msg
|
||||
result.ExitStatus = 997
|
||||
return
|
||||
}
|
||||
|
||||
controlPath := filepath.Join(home, ".vuls", `controlmaster-%r-`+c.ServerName+`.%p`)
|
||||
args = append(args,
|
||||
"-o", "ControlMaster=auto",
|
||||
"-o", fmt.Sprintf("ControlPath=%s", controlPath),
|
||||
"-o", "Controlpersist=10m")
|
||||
}
|
||||
}
|
||||
|
||||
if config.Conf.Vvv {
|
||||
@@ -229,16 +239,18 @@ func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execRes
|
||||
}
|
||||
args = append(args, c.Host)
|
||||
|
||||
cmd = decorateCmd(c, cmd, sudo)
|
||||
cmd = fmt.Sprintf("stty cols 1000; %s", cmd)
|
||||
|
||||
args = append(args, cmd)
|
||||
execCmd := ex.Command(sshBinaryPath, args...)
|
||||
|
||||
cmdstr = decorateCmd(c, cmdstr, sudo)
|
||||
var cmd *ex.Cmd
|
||||
switch c.Distro.Family {
|
||||
case constant.Windows:
|
||||
cmd = ex.Command(sshBinaryPath, append(args, "powershell.exe", "-NoProfile", "-NonInteractive", fmt.Sprintf(`"%s`, cmdstr))...)
|
||||
default:
|
||||
cmd = ex.Command(sshBinaryPath, append(args, fmt.Sprintf("stty cols 1000; %s", cmdstr))...)
|
||||
}
|
||||
var stdoutBuf, stderrBuf bytes.Buffer
|
||||
execCmd.Stdout = &stdoutBuf
|
||||
execCmd.Stderr = &stderrBuf
|
||||
if err := execCmd.Run(); err != nil {
|
||||
cmd.Stdout = &stdoutBuf
|
||||
cmd.Stderr = &stderrBuf
|
||||
if err := cmd.Run(); err != nil {
|
||||
if e, ok := err.(*ex.ExitError); ok {
|
||||
if s, ok := e.Sys().(syscall.WaitStatus); ok {
|
||||
result.ExitStatus = s.ExitStatus()
|
||||
@@ -251,9 +263,8 @@ func sshExecExternal(c config.ServerInfo, cmd string, sudo bool) (result execRes
|
||||
} else {
|
||||
result.ExitStatus = 0
|
||||
}
|
||||
|
||||
result.Stdout = stdoutBuf.String()
|
||||
result.Stderr = stderrBuf.String()
|
||||
result.Stdout = toUTF8(stdoutBuf.String())
|
||||
result.Stderr = toUTF8(stderrBuf.String())
|
||||
result.Servername = c.ServerName
|
||||
result.Container = c.Container
|
||||
result.Host = c.Host
|
||||
@@ -281,7 +292,7 @@ func dockerShell(family string) string {
|
||||
|
||||
func decorateCmd(c config.ServerInfo, cmd string, sudo bool) string {
|
||||
if sudo && c.User != "root" && !c.IsContainer() {
|
||||
cmd = fmt.Sprintf("sudo -S %s", cmd)
|
||||
cmd = fmt.Sprintf("sudo %s", cmd)
|
||||
}
|
||||
|
||||
// If you are using pipe and you want to detect preprocessing errors, remove comment out
|
||||
@@ -307,10 +318,40 @@ func decorateCmd(c config.ServerInfo, cmd string, sudo bool) string {
|
||||
c.Container.Name, dockerShell(c.Distro.Family), cmd)
|
||||
// LXC required root privilege
|
||||
if c.User != "root" {
|
||||
cmd = fmt.Sprintf("sudo -S %s", cmd)
|
||||
cmd = fmt.Sprintf("sudo %s", cmd)
|
||||
}
|
||||
}
|
||||
}
|
||||
// cmd = fmt.Sprintf("set -x; %s", cmd)
|
||||
return cmd
|
||||
}
|
||||
|
||||
func toUTF8(s string) string {
|
||||
d := chardet.NewTextDetector()
|
||||
res, err := d.DetectBest([]byte(s))
|
||||
if err != nil {
|
||||
return s
|
||||
}
|
||||
|
||||
var bs []byte
|
||||
switch res.Charset {
|
||||
case "UTF-8":
|
||||
bs, err = []byte(s), nil
|
||||
case "UTF-16LE":
|
||||
bs, err = io.ReadAll(transform.NewReader(strings.NewReader(s), unicode.UTF16(unicode.LittleEndian, unicode.UseBOM).NewDecoder()))
|
||||
case "UTF-16BE":
|
||||
bs, err = io.ReadAll(transform.NewReader(strings.NewReader(s), unicode.UTF16(unicode.BigEndian, unicode.UseBOM).NewDecoder()))
|
||||
case "Shift_JIS":
|
||||
bs, err = io.ReadAll(transform.NewReader(strings.NewReader(s), japanese.ShiftJIS.NewDecoder()))
|
||||
case "EUC-JP":
|
||||
bs, err = io.ReadAll(transform.NewReader(strings.NewReader(s), japanese.EUCJP.NewDecoder()))
|
||||
case "ISO-2022-JP":
|
||||
bs, err = io.ReadAll(transform.NewReader(strings.NewReader(s), japanese.ISO2022JP.NewDecoder()))
|
||||
default:
|
||||
bs, err = []byte(s), nil
|
||||
}
|
||||
if err != nil {
|
||||
return s
|
||||
}
|
||||
return string(bs)
|
||||
}
|
||||
|
||||
@@ -39,14 +39,14 @@ func TestDecorateCmd(t *testing.T) {
|
||||
conf: config.ServerInfo{User: "non-root"},
|
||||
cmd: "ls",
|
||||
sudo: true,
|
||||
expected: "sudo -S ls",
|
||||
expected: "sudo ls",
|
||||
},
|
||||
// non-root sudo true
|
||||
{
|
||||
conf: config.ServerInfo{User: "non-root"},
|
||||
cmd: "ls | grep hoge",
|
||||
sudo: true,
|
||||
expected: "sudo -S ls | grep hoge",
|
||||
expected: "sudo ls | grep hoge",
|
||||
},
|
||||
// -------------docker-------------
|
||||
// root sudo false docker
|
||||
@@ -192,7 +192,7 @@ func TestDecorateCmd(t *testing.T) {
|
||||
},
|
||||
cmd: "ls",
|
||||
sudo: false,
|
||||
expected: `sudo -S lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls'`,
|
||||
expected: `sudo lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls'`,
|
||||
},
|
||||
// non-root sudo true, lxc
|
||||
{
|
||||
@@ -203,7 +203,7 @@ func TestDecorateCmd(t *testing.T) {
|
||||
},
|
||||
cmd: "ls",
|
||||
sudo: true,
|
||||
expected: `sudo -S lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls'`,
|
||||
expected: `sudo lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls'`,
|
||||
},
|
||||
// non-root sudo true lxc
|
||||
{
|
||||
@@ -214,7 +214,7 @@ func TestDecorateCmd(t *testing.T) {
|
||||
},
|
||||
cmd: "ls | grep hoge",
|
||||
sudo: true,
|
||||
expected: `sudo -S lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls | grep hoge'`,
|
||||
expected: `sudo lxc-attach -n def 2>/dev/null -- /bin/sh -c 'ls | grep hoge'`,
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ func newBsd(c config.ServerInfo) *bsd {
|
||||
return d
|
||||
}
|
||||
|
||||
//https://github.com/mizzy/specinfra/blob/master/lib/specinfra/helper/detect_os/freebsd.rb
|
||||
// https://github.com/mizzy/specinfra/blob/master/lib/specinfra/helper/detect_os/freebsd.rb
|
||||
func detectFreebsd(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
// Prevent from adding `set -o pipefail` option
|
||||
c.Distro = config.Distro{Family: constant.FreeBSD}
|
||||
|
||||
@@ -188,6 +188,39 @@ func detectRedhat(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/amazon-linux-release", noSudo); r.isSuccess() {
|
||||
// $ cat /etc/amazon-linux-release
|
||||
// Amazon Linux release 2022 (Amazon Linux)
|
||||
// Amazon Linux release 2023 (Amazon Linux)
|
||||
if r := exec(c, "cat /etc/amazon-linux-release", noSudo); r.isSuccess() {
|
||||
amazon := newAmazon(c)
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
amazon.setErrs([]error{xerrors.Errorf("Failed to parse /etc/amazon-linux-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, amazon
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
amazon.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, amazon
|
||||
}
|
||||
if major < 2022 {
|
||||
amazon.setErrs([]error{xerrors.Errorf("Failed to init Amazon Linux. err: not supported major version. versions prior to Amazon Linux 2022 are not supported, detected version is %s", release)})
|
||||
return true, amazon
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "amazon", "amazon linux":
|
||||
amazon.setDistro(constant.Amazon, release)
|
||||
return true, amazon
|
||||
default:
|
||||
amazon.setErrs([]error{xerrors.Errorf("Failed to parse Amazon Linux Name. release: %s", release)})
|
||||
return true, amazon
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r := exec(c, "ls /etc/redhat-release", noSudo); r.isSuccess() {
|
||||
// https://www.rackaid.com/blog/how-to-determine-centos-or-red-hat-version/
|
||||
// e.g.
|
||||
@@ -200,68 +233,64 @@ func detectRedhat(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
// Fedora release 35 (Thirty Five)
|
||||
if r := exec(c, "cat /etc/redhat-release", noSudo); r.isSuccess() {
|
||||
result := releasePattern.FindStringSubmatch(strings.TrimSpace(r.Stdout))
|
||||
if len(result) != 3 {
|
||||
rhel := newRHEL(c)
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to parse /etc/redhat-release. r.Stdout: %s", r.Stdout)})
|
||||
return true, rhel
|
||||
}
|
||||
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
rhel := newRHEL(c)
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, rhel
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "fedora":
|
||||
fed := newFedora(c)
|
||||
if major < 32 {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to init Fedora. err: not supported major version. versions prior to Fedora 32 are not supported, detected version is %s", release)})
|
||||
return true, fed
|
||||
}
|
||||
fed.setDistro(constant.Fedora, release)
|
||||
return true, fed
|
||||
case "centos", "centos linux":
|
||||
cent := newCentOS(c)
|
||||
if major < 5 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS. err: not supported major version. versions prior to CentOS 5 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, release)
|
||||
return true, cent
|
||||
case "centos stream":
|
||||
cent := newCentOS(c)
|
||||
if major < 8 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS Stream. err: not supported major version. versions prior to CentOS Stream 8 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, fmt.Sprintf("stream%s", release))
|
||||
return true, cent
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
if major < 8 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to init AlmaLinux. err: not supported major version. versions prior to AlmaLinux 8 are not supported, detected version is %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
case "rocky", "rocky linux":
|
||||
rocky := newRocky(c)
|
||||
if major < 8 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to init Rocky Linux. err: not supported major version. versions prior to Rocky Linux 8 are not supported, detected version is %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
rhel := newRHEL(c)
|
||||
if major < 5 {
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to init RedHat Enterprise Linux. err: not supported major version. versions prior to RedHat Enterprise Linux 5 are not supported, detected version is %s", release)})
|
||||
if len(result) == 3 {
|
||||
release := result[2]
|
||||
major, err := strconv.Atoi(util.Major(release))
|
||||
if err != nil {
|
||||
rhel := newRHEL(c)
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to parse major version from release: %s", release)})
|
||||
return true, rhel
|
||||
}
|
||||
switch strings.ToLower(result[1]) {
|
||||
case "fedora":
|
||||
fed := newFedora(c)
|
||||
if major < 32 {
|
||||
fed.setErrs([]error{xerrors.Errorf("Failed to init Fedora. err: not supported major version. versions prior to Fedora 32 are not supported, detected version is %s", release)})
|
||||
return true, fed
|
||||
}
|
||||
fed.setDistro(constant.Fedora, release)
|
||||
return true, fed
|
||||
case "centos", "centos linux":
|
||||
cent := newCentOS(c)
|
||||
if major < 5 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS. err: not supported major version. versions prior to CentOS 5 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, release)
|
||||
return true, cent
|
||||
case "centos stream":
|
||||
cent := newCentOS(c)
|
||||
if major < 8 {
|
||||
cent.setErrs([]error{xerrors.Errorf("Failed to init CentOS Stream. err: not supported major version. versions prior to CentOS Stream 8 are not supported, detected version is %s", release)})
|
||||
return true, cent
|
||||
}
|
||||
cent.setDistro(constant.CentOS, fmt.Sprintf("stream%s", release))
|
||||
return true, cent
|
||||
case "alma", "almalinux":
|
||||
alma := newAlma(c)
|
||||
if major < 8 {
|
||||
alma.setErrs([]error{xerrors.Errorf("Failed to init AlmaLinux. err: not supported major version. versions prior to AlmaLinux 8 are not supported, detected version is %s", release)})
|
||||
return true, alma
|
||||
}
|
||||
alma.setDistro(constant.Alma, release)
|
||||
return true, alma
|
||||
case "rocky", "rocky linux":
|
||||
rocky := newRocky(c)
|
||||
if major < 8 {
|
||||
rocky.setErrs([]error{xerrors.Errorf("Failed to init Rocky Linux. err: not supported major version. versions prior to Rocky Linux 8 are not supported, detected version is %s", release)})
|
||||
return true, rocky
|
||||
}
|
||||
rocky.setDistro(constant.Rocky, release)
|
||||
return true, rocky
|
||||
default:
|
||||
rhel := newRHEL(c)
|
||||
if major < 5 {
|
||||
rhel.setErrs([]error{xerrors.Errorf("Failed to init RedHat Enterprise Linux. err: not supported major version. versions prior to RedHat Enterprise Linux 5 are not supported, detected version is %s", release)})
|
||||
return true, rhel
|
||||
}
|
||||
rhel.setDistro(constant.RedHat, release)
|
||||
return true, rhel
|
||||
}
|
||||
rhel.setDistro(constant.RedHat, release)
|
||||
return true, rhel
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -270,19 +299,24 @@ func detectRedhat(c config.ServerInfo) (bool, osTypeInterface) {
|
||||
family := constant.Amazon
|
||||
release := "unknown"
|
||||
if r := exec(c, "cat /etc/system-release", noSudo); r.isSuccess() {
|
||||
if strings.HasPrefix(r.Stdout, "Amazon Linux release 2022") {
|
||||
fields := strings.Fields(r.Stdout)
|
||||
release = strings.Join(fields[3:], " ")
|
||||
} else if strings.HasPrefix(r.Stdout, "Amazon Linux 2022") {
|
||||
fields := strings.Fields(r.Stdout)
|
||||
release = strings.Join(fields[2:], " ")
|
||||
} else if strings.HasPrefix(r.Stdout, "Amazon Linux release 2") {
|
||||
fields := strings.Fields(r.Stdout)
|
||||
release = fmt.Sprintf("%s %s", fields[3], fields[4])
|
||||
} else if strings.HasPrefix(r.Stdout, "Amazon Linux 2") {
|
||||
fields := strings.Fields(r.Stdout)
|
||||
release = strings.Join(fields[2:], " ")
|
||||
} else {
|
||||
switch {
|
||||
case strings.HasPrefix(r.Stdout, "Amazon Linux AMI release"):
|
||||
// Amazon Linux AMI release 2017.09
|
||||
// Amazon Linux AMI release 2018.03
|
||||
release = "1"
|
||||
case strings.HasPrefix(r.Stdout, "Amazon Linux 2022"), strings.HasPrefix(r.Stdout, "Amazon Linux release 2022"):
|
||||
// Amazon Linux 2022 (Amazon Linux)
|
||||
// Amazon Linux release 2022 (Amazon Linux)
|
||||
release = "2022"
|
||||
case strings.HasPrefix(r.Stdout, "Amazon Linux 2023"), strings.HasPrefix(r.Stdout, "Amazon Linux release 2023"):
|
||||
// Amazon Linux 2023 (Amazon Linux)
|
||||
// Amazon Linux release 2023 (Amazon Linux)
|
||||
release = "2023"
|
||||
case strings.HasPrefix(r.Stdout, "Amazon Linux 2"), strings.HasPrefix(r.Stdout, "Amazon Linux release 2"):
|
||||
// Amazon Linux 2 (Karoo)
|
||||
// Amazon Linux release 2 (Karoo)
|
||||
release = "2"
|
||||
default:
|
||||
fields := strings.Fields(r.Stdout)
|
||||
if len(fields) == 5 {
|
||||
release = fields[4]
|
||||
@@ -485,28 +519,7 @@ func (o *redhatBase) parseInstalledPackages(stdout string) (models.Packages, mod
|
||||
continue
|
||||
}
|
||||
|
||||
var (
|
||||
pack *models.Package
|
||||
err error
|
||||
)
|
||||
switch o.getDistro().Family {
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(o.getDistro().Release)[0] {
|
||||
case "2":
|
||||
switch len(strings.Fields(line)) {
|
||||
case 5:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
case 6:
|
||||
pack, err = o.parseInstalledPackagesLineFromRepoquery(line)
|
||||
default:
|
||||
return nil, nil, xerrors.Errorf("Failed to parse package line: %s", line)
|
||||
}
|
||||
default:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
}
|
||||
default:
|
||||
pack, err = o.parseInstalledPackagesLine(line)
|
||||
}
|
||||
pack, err := o.parseInstalledPackagesLine(line)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@@ -538,30 +551,7 @@ func (o *redhatBase) parseInstalledPackages(stdout string) (models.Packages, mod
|
||||
|
||||
func (o *redhatBase) parseInstalledPackagesLine(line string) (*models.Package, error) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 5 {
|
||||
return nil,
|
||||
xerrors.Errorf("Failed to parse package line: %s", line)
|
||||
}
|
||||
|
||||
ver := ""
|
||||
epoch := fields[1]
|
||||
if epoch == "0" || epoch == "(none)" {
|
||||
ver = fields[2]
|
||||
} else {
|
||||
ver = fmt.Sprintf("%s:%s", epoch, fields[2])
|
||||
}
|
||||
|
||||
return &models.Package{
|
||||
Name: fields[0],
|
||||
Version: ver,
|
||||
Release: fields[3],
|
||||
Arch: fields[4],
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (o *redhatBase) parseInstalledPackagesLineFromRepoquery(line string) (*models.Package, error) {
|
||||
fields := strings.Fields(line)
|
||||
if len(fields) != 6 {
|
||||
if len(fields) < 5 {
|
||||
return nil, xerrors.Errorf("Failed to parse package line: %s", line)
|
||||
}
|
||||
|
||||
@@ -573,9 +563,22 @@ func (o *redhatBase) parseInstalledPackagesLineFromRepoquery(line string) (*mode
|
||||
ver = fmt.Sprintf("%s:%s", epoch, fields[2])
|
||||
}
|
||||
|
||||
repo := strings.TrimPrefix(fields[5], "@")
|
||||
if repo == "installed" {
|
||||
repo = "amzn2-core"
|
||||
var repo string
|
||||
switch o.getDistro().Family {
|
||||
case constant.Amazon:
|
||||
switch strings.Fields(o.getDistro().Release)[0] {
|
||||
case "2":
|
||||
if len(fields) == 5 {
|
||||
break
|
||||
}
|
||||
if fields[5] == "installed" {
|
||||
repo = "amzn2-core"
|
||||
break
|
||||
}
|
||||
repo = strings.TrimPrefix(fields[5], "@")
|
||||
default:
|
||||
}
|
||||
default:
|
||||
}
|
||||
|
||||
return &models.Package{
|
||||
@@ -805,7 +808,7 @@ func (o *redhatBase) parseNeedsRestarting(stdout string) (procs []models.NeedRes
|
||||
return
|
||||
}
|
||||
|
||||
//TODO refactor
|
||||
// TODO refactor
|
||||
// procPathToFQPN returns Fully-Qualified-Package-Name from the command
|
||||
func (o *redhatBase) procPathToFQPN(execCommand string) (string, error) {
|
||||
execCommand = strings.Replace(execCommand, "\x00", " ", -1) // for CentOS6.9
|
||||
|
||||
@@ -118,32 +118,6 @@ kernel-devel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `openssl 0 1.0.1e 30.el6.11 x86_64
|
||||
Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64
|
||||
kernel 0 2.6.32 696.20.1.el6 x86_64
|
||||
kernel 0 2.6.32 696.20.3.el6 x86_64
|
||||
kernel 0 2.6.32 695.20.3.el6 x86_64`,
|
||||
distro: config.Distro{Family: constant.Amazon, Release: "2 (Karoo)"},
|
||||
kernel: models.Kernel{},
|
||||
packages: models.Packages{
|
||||
"openssl": models.Package{
|
||||
Name: "openssl",
|
||||
Version: "1.0.1e",
|
||||
Release: "30.el6.11",
|
||||
},
|
||||
"Percona-Server-shared-56": models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
Release: "rel67.0.el6",
|
||||
},
|
||||
"kernel": models.Package{
|
||||
Name: "kernel",
|
||||
Version: "2.6.32",
|
||||
Release: "696.20.3.el6",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
in: `yum-utils 0 1.1.31 46.amzn2.0.1 noarch @amzn2-core
|
||||
zlib 0 1.2.7 19.amzn2.0.1 x86_64 installed
|
||||
@@ -195,67 +169,52 @@ java-1.8.0-amazon-corretto 1 1.8.0_192.b12 1.amzn2 x86_64 @amzn2extra-corretto8`
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
func TestParseInstalledPackagesLine(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
|
||||
var packagetests = []struct {
|
||||
in string
|
||||
pack models.Package
|
||||
err bool
|
||||
func Test_redhatBase_parseInstalledPackagesLine(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
distro config.Distro
|
||||
line string
|
||||
want *models.Package
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
"openssl 0 1.0.1e 30.el6.11 x86_64",
|
||||
models.Package{
|
||||
name: "rpm -qa redhat 6.11 1",
|
||||
distro: config.Distro{
|
||||
Family: constant.RedHat,
|
||||
Release: "6.11",
|
||||
},
|
||||
line: "openssl 0 1.0.1e 30.el6.11 x86_64",
|
||||
want: &models.Package{
|
||||
Name: "openssl",
|
||||
Version: "1.0.1e",
|
||||
Release: "30.el6.11",
|
||||
Arch: "x86_64",
|
||||
},
|
||||
false,
|
||||
},
|
||||
{
|
||||
"Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64",
|
||||
models.Package{
|
||||
name: "rpm -qa redhat 6.11 2",
|
||||
distro: config.Distro{
|
||||
Family: constant.RedHat,
|
||||
Release: "6.11",
|
||||
},
|
||||
line: "Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x84_64",
|
||||
want: &models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
Release: "rel67.0.el6",
|
||||
Arch: "x84_64",
|
||||
},
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range packagetests {
|
||||
p, err := r.parseInstalledPackagesLine(tt.in)
|
||||
if err == nil && tt.err {
|
||||
t.Errorf("Expected err not occurred: %d", i)
|
||||
}
|
||||
if err != nil && !tt.err {
|
||||
t.Errorf("UnExpected err not occurred: %d", i)
|
||||
}
|
||||
if p.Name != tt.pack.Name {
|
||||
t.Errorf("name: expected %s, actual %s", tt.pack.Name, p.Name)
|
||||
}
|
||||
if p.Version != tt.pack.Version {
|
||||
t.Errorf("version: expected %s, actual %s", tt.pack.Version, p.Version)
|
||||
}
|
||||
if p.Release != tt.pack.Release {
|
||||
t.Errorf("release: expected %s, actual %s", tt.pack.Release, p.Release)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseInstalledPackagesLineFromRepoquery(t *testing.T) {
|
||||
r := newRHEL(config.ServerInfo{})
|
||||
|
||||
var packagetests = []struct {
|
||||
in string
|
||||
pack models.Package
|
||||
err bool
|
||||
}{
|
||||
{
|
||||
in: "yum-utils 0 1.1.31 46.amzn2.0.1 noarch @amzn2-core",
|
||||
pack: models.Package{
|
||||
name: "rpmquery amazonlinux 2 1",
|
||||
distro: config.Distro{
|
||||
Family: constant.Amazon,
|
||||
Release: "2",
|
||||
},
|
||||
line: "yum-utils 0 1.1.31 46.amzn2.0.1 noarch @amzn2-core",
|
||||
want: &models.Package{
|
||||
Name: "yum-utils",
|
||||
Version: "1.1.31",
|
||||
Release: "46.amzn2.0.1",
|
||||
@@ -264,8 +223,13 @@ func TestParseInstalledPackagesLineFromRepoquery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "zlib 0 1.2.7 19.amzn2.0.1 x86_64 installed",
|
||||
pack: models.Package{
|
||||
name: "rpmquery amazonlinux 2 2",
|
||||
distro: config.Distro{
|
||||
Family: constant.Amazon,
|
||||
Release: "2",
|
||||
},
|
||||
line: "zlib 0 1.2.7 19.amzn2.0.1 x86_64 installed",
|
||||
want: &models.Package{
|
||||
Name: "zlib",
|
||||
Version: "1.2.7",
|
||||
Release: "19.amzn2.0.1",
|
||||
@@ -274,8 +238,13 @@ func TestParseInstalledPackagesLineFromRepoquery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
in: "java-1.8.0-amazon-corretto 1 1.8.0_192.b12 1.amzn2 x86_64 @amzn2extra-corretto8",
|
||||
pack: models.Package{
|
||||
name: "rpmquery amazonlinux 2 3",
|
||||
distro: config.Distro{
|
||||
Family: constant.Amazon,
|
||||
Release: "2",
|
||||
},
|
||||
line: "java-1.8.0-amazon-corretto 1 1.8.0_192.b12 1.amzn2 x86_64 @amzn2extra-corretto8",
|
||||
want: &models.Package{
|
||||
Name: "java-1.8.0-amazon-corretto",
|
||||
Version: "1:1.8.0_192.b12",
|
||||
Release: "1.amzn2",
|
||||
@@ -284,32 +253,18 @@ func TestParseInstalledPackagesLineFromRepoquery(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tt := range packagetests {
|
||||
p, err := r.parseInstalledPackagesLineFromRepoquery(tt.in)
|
||||
if err == nil && tt.err {
|
||||
t.Errorf("Expected err not occurred: %d", i)
|
||||
}
|
||||
if err != nil && !tt.err {
|
||||
t.Errorf("UnExpected err not occurred: %d", i)
|
||||
}
|
||||
if p.Name != tt.pack.Name {
|
||||
t.Errorf("name: expected %s, actual %s", tt.pack.Name, p.Name)
|
||||
}
|
||||
if p.Version != tt.pack.Version {
|
||||
t.Errorf("version: expected %s, actual %s", tt.pack.Version, p.Version)
|
||||
}
|
||||
if p.Release != tt.pack.Release {
|
||||
t.Errorf("release: expected %s, actual %s", tt.pack.Release, p.Release)
|
||||
}
|
||||
if p.Arch != tt.pack.Arch {
|
||||
t.Errorf("arch: expected %s, actual %s", tt.pack.Arch, p.Arch)
|
||||
}
|
||||
if p.Repository != tt.pack.Repository {
|
||||
t.Errorf("repository: expected %s, actual %s", tt.pack.Repository, p.Repository)
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := (&redhatBase{base: base{Distro: tt.distro}}).parseInstalledPackagesLine(tt.line)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("redhatBase.parseInstalledPackagesLine() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("redhatBase.parseInstalledPackagesLine() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestParseYumCheckUpdateLine(t *testing.T) {
|
||||
@@ -603,7 +558,7 @@ func Test_redhatBase_parseRpmQfLine(t *testing.T) {
|
||||
{
|
||||
name: "valid line",
|
||||
fields: fields{base: base{}},
|
||||
args: args{line: "Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x86_64"},
|
||||
args: args{line: "Percona-Server-shared-56 1 5.6.19 rel67.0.el6 x86_64"},
|
||||
wantPkg: &models.Package{
|
||||
Name: "Percona-Server-shared-56",
|
||||
Version: "1:5.6.19",
|
||||
|
||||
@@ -6,10 +6,11 @@ import (
|
||||
"net/http"
|
||||
"os"
|
||||
ex "os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
debver "github.com/knqyf263/go-deb-version"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/xerrors"
|
||||
|
||||
"github.com/future-architect/vuls/cache"
|
||||
@@ -149,64 +150,103 @@ func (s Scanner) Configtest() error {
|
||||
|
||||
// ViaHTTP scans servers by HTTP header and body
|
||||
func ViaHTTP(header http.Header, body string, toLocalFile bool) (models.ScanResult, error) {
|
||||
family := header.Get("X-Vuls-OS-Family")
|
||||
if family == "" {
|
||||
return models.ScanResult{}, errOSFamilyHeader
|
||||
}
|
||||
|
||||
release := header.Get("X-Vuls-OS-Release")
|
||||
if release == "" {
|
||||
return models.ScanResult{}, errOSReleaseHeader
|
||||
}
|
||||
|
||||
kernelRelease := header.Get("X-Vuls-Kernel-Release")
|
||||
if kernelRelease == "" {
|
||||
logging.Log.Warn("If X-Vuls-Kernel-Release is not specified, there is a possibility of false detection")
|
||||
}
|
||||
|
||||
kernelVersion := header.Get("X-Vuls-Kernel-Version")
|
||||
if family == constant.Debian {
|
||||
if kernelVersion == "" {
|
||||
logging.Log.Warn("X-Vuls-Kernel-Version is empty. skip kernel vulnerability detection.")
|
||||
} else {
|
||||
if _, err := debver.NewVersion(kernelVersion); err != nil {
|
||||
logging.Log.Warnf("X-Vuls-Kernel-Version is invalid. skip kernel vulnerability detection. actual kernelVersion: %s, err: %s", kernelVersion, err)
|
||||
kernelVersion = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
serverName := header.Get("X-Vuls-Server-Name")
|
||||
if toLocalFile && serverName == "" {
|
||||
return models.ScanResult{}, errServerNameHeader
|
||||
}
|
||||
|
||||
distro := config.Distro{
|
||||
Family: family,
|
||||
Release: release,
|
||||
family := header.Get("X-Vuls-OS-Family")
|
||||
if family == "" {
|
||||
return models.ScanResult{}, errOSFamilyHeader
|
||||
}
|
||||
|
||||
kernel := models.Kernel{
|
||||
Release: kernelRelease,
|
||||
Version: kernelVersion,
|
||||
}
|
||||
installedPackages, srcPackages, err := ParseInstalledPkgs(distro, kernel, body)
|
||||
if err != nil {
|
||||
return models.ScanResult{}, err
|
||||
}
|
||||
switch family {
|
||||
case constant.Windows:
|
||||
osInfo, hotfixs, err := parseSystemInfo(toUTF8(body))
|
||||
if err != nil {
|
||||
return models.ScanResult{}, xerrors.Errorf("Failed to parse systeminfo.exe. err: %w", err)
|
||||
}
|
||||
|
||||
return models.ScanResult{
|
||||
ServerName: serverName,
|
||||
Family: family,
|
||||
Release: release,
|
||||
RunningKernel: models.Kernel{
|
||||
release := header.Get("X-Vuls-OS-Release")
|
||||
if release == "" {
|
||||
logging.Log.Debugf("osInfo(systeminfo.exe): %+v", osInfo)
|
||||
release, err = detectOSName(osInfo)
|
||||
if err != nil {
|
||||
return models.ScanResult{}, xerrors.Errorf("Failed to detect os name. err: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
kernelVersion := header.Get("X-Vuls-Kernel-Version")
|
||||
if kernelVersion == "" {
|
||||
kernelVersion = formatKernelVersion(osInfo)
|
||||
}
|
||||
|
||||
kbs, err := DetectKBsFromKernelVersion(release, kernelVersion)
|
||||
if err != nil {
|
||||
return models.ScanResult{}, xerrors.Errorf("Failed to detect KBs from kernel version. err: %w", err)
|
||||
}
|
||||
|
||||
applied, unapplied := map[string]struct{}{}, map[string]struct{}{}
|
||||
for _, kb := range hotfixs {
|
||||
applied[kb] = struct{}{}
|
||||
}
|
||||
for _, kb := range kbs.Applied {
|
||||
applied[kb] = struct{}{}
|
||||
}
|
||||
for _, kb := range kbs.Unapplied {
|
||||
unapplied[kb] = struct{}{}
|
||||
}
|
||||
|
||||
return models.ScanResult{
|
||||
ServerName: serverName,
|
||||
Family: family,
|
||||
Release: release,
|
||||
RunningKernel: models.Kernel{
|
||||
Version: kernelVersion,
|
||||
},
|
||||
WindowsKB: &models.WindowsKB{Applied: maps.Keys(applied), Unapplied: maps.Keys(unapplied)},
|
||||
ScannedCves: models.VulnInfos{},
|
||||
}, nil
|
||||
default:
|
||||
release := header.Get("X-Vuls-OS-Release")
|
||||
if release == "" {
|
||||
return models.ScanResult{}, errOSReleaseHeader
|
||||
}
|
||||
|
||||
kernelRelease := header.Get("X-Vuls-Kernel-Release")
|
||||
if kernelRelease == "" {
|
||||
logging.Log.Warn("If X-Vuls-Kernel-Release is not specified, there is a possibility of false detection")
|
||||
}
|
||||
|
||||
kernelVersion := header.Get("X-Vuls-Kernel-Version")
|
||||
|
||||
distro := config.Distro{
|
||||
Family: family,
|
||||
Release: release,
|
||||
}
|
||||
|
||||
kernel := models.Kernel{
|
||||
Release: kernelRelease,
|
||||
Version: kernelVersion,
|
||||
},
|
||||
Packages: installedPackages,
|
||||
SrcPackages: srcPackages,
|
||||
ScannedCves: models.VulnInfos{},
|
||||
}, nil
|
||||
}
|
||||
installedPackages, srcPackages, err := ParseInstalledPkgs(distro, kernel, body)
|
||||
if err != nil {
|
||||
return models.ScanResult{}, err
|
||||
}
|
||||
|
||||
return models.ScanResult{
|
||||
ServerName: serverName,
|
||||
Family: family,
|
||||
Release: release,
|
||||
RunningKernel: models.Kernel{
|
||||
Release: kernelRelease,
|
||||
Version: kernelVersion,
|
||||
},
|
||||
Packages: installedPackages,
|
||||
SrcPackages: srcPackages,
|
||||
ScannedCves: models.VulnInfos{},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// ParseInstalledPkgs parses installed pkgs line
|
||||
@@ -342,7 +382,14 @@ func validateSSHConfig(c *config.ServerInfo) error {
|
||||
|
||||
logging.Log.Debugf("Validating SSH Settings for Server:%s ...", c.GetServerName())
|
||||
|
||||
sshBinaryPath, err := ex.LookPath("ssh")
|
||||
if runtime.GOOS == "windows" {
|
||||
c.Distro.Family = constant.Windows
|
||||
}
|
||||
defer func(c *config.ServerInfo) {
|
||||
c.Distro.Family = ""
|
||||
}(c)
|
||||
|
||||
sshBinaryPath, err := lookpath(c.Distro.Family, "ssh")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh binary path. err: %w", err)
|
||||
}
|
||||
@@ -351,6 +398,10 @@ func validateSSHConfig(c *config.ServerInfo) error {
|
||||
logging.Log.Debugf("Executing... %s", strings.Replace(sshConfigCmd, "\n", "", -1))
|
||||
configResult := localExec(*c, sshConfigCmd, noSudo)
|
||||
if !configResult.isSuccess() {
|
||||
if strings.Contains(configResult.Stderr, "unknown option -- G") {
|
||||
logging.Log.Warn("SSH configuration validation is skipped. To enable validation, G option introduced in OpenSSH 6.8 must be enabled.")
|
||||
return nil
|
||||
}
|
||||
return xerrors.Errorf("Failed to print SSH configuration. err: %w", configResult.Error)
|
||||
}
|
||||
sshConfig := parseSSHConfiguration(configResult.Stdout)
|
||||
@@ -381,7 +432,7 @@ func validateSSHConfig(c *config.ServerInfo) error {
|
||||
return xerrors.New("Failed to find any known_hosts to use. Please check the UserKnownHostsFile and GlobalKnownHostsFile settings for SSH")
|
||||
}
|
||||
|
||||
sshKeyscanBinaryPath, err := ex.LookPath("ssh-keyscan")
|
||||
sshKeyscanBinaryPath, err := lookpath(c.Distro.Family, "ssh-keyscan")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh-keyscan binary path. err: %w", err)
|
||||
}
|
||||
@@ -392,7 +443,7 @@ func validateSSHConfig(c *config.ServerInfo) error {
|
||||
}
|
||||
serverKeys := parseSSHScan(r.Stdout)
|
||||
|
||||
sshKeygenBinaryPath, err := ex.LookPath("ssh-keygen")
|
||||
sshKeygenBinaryPath, err := lookpath(c.Distro.Family, "ssh-keygen")
|
||||
if err != nil {
|
||||
return xerrors.Errorf("Failed to lookup ssh-keygen binary path. err: %w", err)
|
||||
}
|
||||
@@ -428,6 +479,19 @@ func validateSSHConfig(c *config.ServerInfo) error {
|
||||
buildSSHKeyScanCmd(sshKeyscanBinaryPath, c.Port, knownHostsPaths[0], sshConfig))
|
||||
}
|
||||
|
||||
func lookpath(family, file string) (string, error) {
|
||||
switch family {
|
||||
case constant.Windows:
|
||||
return fmt.Sprintf("%s.exe", strings.TrimPrefix(file, ".exe")), nil
|
||||
default:
|
||||
p, err := ex.LookPath(file)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
func buildSSHBaseCmd(sshBinaryPath string, c *config.ServerInfo, options []string) []string {
|
||||
cmd := []string{sshBinaryPath}
|
||||
if len(options) > 0 {
|
||||
@@ -483,6 +547,7 @@ type sshConfiguration struct {
|
||||
func parseSSHConfiguration(stdout string) sshConfiguration {
|
||||
sshConfig := sshConfiguration{}
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
line = strings.TrimSuffix(line, "\r")
|
||||
switch {
|
||||
case strings.HasPrefix(line, "user "):
|
||||
sshConfig.user = strings.TrimPrefix(line, "user ")
|
||||
@@ -512,6 +577,7 @@ func parseSSHConfiguration(stdout string) sshConfiguration {
|
||||
func parseSSHScan(stdout string) map[string]string {
|
||||
keys := map[string]string{}
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
line = strings.TrimSuffix(line, "\r")
|
||||
if line == "" || strings.HasPrefix(line, "# ") {
|
||||
continue
|
||||
}
|
||||
@@ -524,6 +590,7 @@ func parseSSHScan(stdout string) map[string]string {
|
||||
|
||||
func parseSSHKeygen(stdout string) (string, string, error) {
|
||||
for _, line := range strings.Split(stdout, "\n") {
|
||||
line = strings.TrimSuffix(line, "\r")
|
||||
if line == "" || strings.HasPrefix(line, "# ") {
|
||||
continue
|
||||
}
|
||||
@@ -669,10 +736,20 @@ func (s Scanner) detectOS(c config.ServerInfo) osTypeInterface {
|
||||
return osType
|
||||
}
|
||||
|
||||
if itsMe, osType, fatalErr := s.detectDebianWithRetry(c); fatalErr != nil {
|
||||
osType.setErrs([]error{xerrors.Errorf("Failed to detect OS: %w", fatalErr)})
|
||||
if !isLocalExec(c.Port, c.Host) {
|
||||
if err := testFirstSSHConnection(c); err != nil {
|
||||
osType := &unknown{base{ServerInfo: c}}
|
||||
osType.setErrs([]error{xerrors.Errorf("Failed to test first SSH Connection. err: %w", err)})
|
||||
return osType
|
||||
}
|
||||
}
|
||||
|
||||
if itsMe, osType := detectWindows(c); itsMe {
|
||||
logging.Log.Debugf("Windows. Host: %s:%s", c.Host, c.Port)
|
||||
return osType
|
||||
} else if itsMe {
|
||||
}
|
||||
|
||||
if itsMe, osType := detectDebian(c); itsMe {
|
||||
logging.Log.Debugf("Debian based Linux. Host: %s:%s", c.Host, c.Port)
|
||||
return osType
|
||||
}
|
||||
@@ -702,28 +779,23 @@ func (s Scanner) detectOS(c config.ServerInfo) osTypeInterface {
|
||||
return osType
|
||||
}
|
||||
|
||||
// Retry as it may stall on the first SSH connection
|
||||
// https://github.com/future-architect/vuls/pull/753
|
||||
func (s Scanner) detectDebianWithRetry(c config.ServerInfo) (itsMe bool, deb osTypeInterface, err error) {
|
||||
type Response struct {
|
||||
itsMe bool
|
||||
deb osTypeInterface
|
||||
err error
|
||||
}
|
||||
resChan := make(chan Response, 1)
|
||||
go func(c config.ServerInfo) {
|
||||
itsMe, osType, fatalErr := detectDebian(c)
|
||||
resChan <- Response{itsMe, osType, fatalErr}
|
||||
}(c)
|
||||
|
||||
timeout := time.After(time.Duration(3) * time.Second)
|
||||
select {
|
||||
case res := <-resChan:
|
||||
return res.itsMe, res.deb, res.err
|
||||
case <-timeout:
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
return detectDebian(c)
|
||||
func testFirstSSHConnection(c config.ServerInfo) error {
|
||||
for i := 3; i > 0; i-- {
|
||||
rChan := make(chan execResult, 1)
|
||||
go func() {
|
||||
rChan <- exec(c, "exit", noSudo)
|
||||
}()
|
||||
select {
|
||||
case r := <-rChan:
|
||||
if r.ExitStatus == 255 {
|
||||
return xerrors.Errorf("Unable to connect via SSH. Scan with -vvv option to print SSH debugging messages and check SSH settings.\n%s", r)
|
||||
}
|
||||
return nil
|
||||
case <-time.After(time.Duration(3) * time.Second):
|
||||
}
|
||||
}
|
||||
logging.Log.Warnf("First SSH Connection to Host: %s:%s timeout", c.Host, c.Port)
|
||||
return nil
|
||||
}
|
||||
|
||||
// checkScanModes checks scan mode
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/constant"
|
||||
"github.com/future-architect/vuls/models"
|
||||
@@ -104,6 +106,74 @@ func TestViaHTTP(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
header: map[string]string{
|
||||
"X-Vuls-OS-Family": "windows",
|
||||
},
|
||||
body: `
|
||||
Host Name: DESKTOP
|
||||
OS Name: Microsoft Windows 10 Pro
|
||||
OS Version: 10.0.19044 N/A Build 19044
|
||||
OS Manufacturer: Microsoft Corporation
|
||||
OS Configuration: Member Workstation
|
||||
OS Build Type: Multiprocessor Free
|
||||
Registered Owner: Windows User
|
||||
Registered Organization:
|
||||
Product ID: 00000-00000-00000-AA000
|
||||
Original Install Date: 2022/04/13, 12:25:41
|
||||
System Boot Time: 2022/06/06, 16:43:45
|
||||
System Manufacturer: HP
|
||||
System Model: HP EliteBook 830 G7 Notebook PC
|
||||
System Type: x64-based PC
|
||||
Processor(s): 1 Processor(s) Installed.
|
||||
[01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~1803 Mhz
|
||||
BIOS Version: HP S70 Ver. 01.05.00, 2021/04/26
|
||||
Windows Directory: C:\WINDOWS
|
||||
System Directory: C:\WINDOWS\system32
|
||||
Boot Device: \Device\HarddiskVolume2
|
||||
System Locale: en-us;English (United States)
|
||||
Input Locale: en-us;English (United States)
|
||||
Time Zone: (UTC-08:00) Pacific Time (US & Canada)
|
||||
Total Physical Memory: 15,709 MB
|
||||
Available Physical Memory: 12,347 MB
|
||||
Virtual Memory: Max Size: 18,141 MB
|
||||
Virtual Memory: Available: 14,375 MB
|
||||
Virtual Memory: In Use: 3,766 MB
|
||||
Page File Location(s): C:\pagefile.sys
|
||||
Domain: WORKGROUP
|
||||
Logon Server: \\DESKTOP
|
||||
Hotfix(s): 7 Hotfix(s) Installed.
|
||||
[01]: KB5012117
|
||||
[02]: KB4562830
|
||||
[03]: KB5003791
|
||||
[04]: KB5007401
|
||||
[05]: KB5012599
|
||||
[06]: KB5011651
|
||||
[07]: KB5005699
|
||||
Network Card(s): 1 NIC(s) Installed.
|
||||
[01]: Intel(R) Wi-Fi 6 AX201 160MHz
|
||||
Connection Name: Wi-Fi
|
||||
DHCP Enabled: Yes
|
||||
DHCP Server: 192.168.0.1
|
||||
IP address(es)
|
||||
[01]: 192.168.0.205
|
||||
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
|
||||
Virtualization Enabled In Firmware: Yes
|
||||
Second Level Address Translation: Yes
|
||||
Data Execution Prevention Available: Yes
|
||||
`,
|
||||
expectedResult: models.ScanResult{
|
||||
Family: "windows",
|
||||
Release: "Windows 10 Version 21H2 for x64-based Systems",
|
||||
RunningKernel: models.Kernel{
|
||||
Version: "10.0.19044",
|
||||
},
|
||||
WindowsKB: &models.WindowsKB{
|
||||
Applied: []string{"5012117", "4562830", "5003791", "5007401", "5012599", "5011651", "5005699"},
|
||||
Unapplied: []string{},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
@@ -144,6 +214,18 @@ func TestViaHTTP(t *testing.T) {
|
||||
t.Errorf("release: expected %s, actual %s", expectedPack.Release, pack.Release)
|
||||
}
|
||||
}
|
||||
|
||||
if tt.expectedResult.WindowsKB != nil {
|
||||
slices.Sort(tt.expectedResult.WindowsKB.Applied)
|
||||
slices.Sort(tt.expectedResult.WindowsKB.Unapplied)
|
||||
}
|
||||
if result.WindowsKB != nil {
|
||||
slices.Sort(result.WindowsKB.Applied)
|
||||
slices.Sort(result.WindowsKB.Unapplied)
|
||||
}
|
||||
if !reflect.DeepEqual(tt.expectedResult.WindowsKB, result.WindowsKB) {
|
||||
t.Errorf("windows KB: expected %s, actual %s", tt.expectedResult.WindowsKB, result.WindowsKB)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -42,7 +42,7 @@ func isRunningKernel(pack models.Package, family string, kernel models.Kernel) (
|
||||
|
||||
// EnsureResultDir ensures the directory for scan results
|
||||
func EnsureResultDir(resultsDir string, scannedAt time.Time) (currentDir string, err error) {
|
||||
jsonDirName := scannedAt.Format(time.RFC3339)
|
||||
jsonDirName := scannedAt.Format("2006-01-02T15-04-05-0700")
|
||||
if resultsDir == "" {
|
||||
wd, _ := os.Getwd()
|
||||
resultsDir = filepath.Join(wd, "results")
|
||||
@@ -51,19 +51,6 @@ func EnsureResultDir(resultsDir string, scannedAt time.Time) (currentDir string,
|
||||
if err := os.MkdirAll(jsonDir, 0700); err != nil {
|
||||
return "", xerrors.Errorf("Failed to create dir: %w", err)
|
||||
}
|
||||
|
||||
symlinkPath := filepath.Join(resultsDir, "current")
|
||||
if _, err := os.Lstat(symlinkPath); err == nil {
|
||||
if err := os.Remove(symlinkPath); err != nil {
|
||||
return "", xerrors.Errorf(
|
||||
"Failed to remove symlink. path: %s, err: %w", symlinkPath, err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := os.Symlink(jsonDir, symlinkPath); err != nil {
|
||||
return "", xerrors.Errorf(
|
||||
"Failed to create symlink: path: %s, err: %w", symlinkPath, err)
|
||||
}
|
||||
return jsonDir, nil
|
||||
}
|
||||
|
||||
|
||||
4565
scanner/windows.go
Normal file
4565
scanner/windows.go
Normal file
File diff suppressed because it is too large
Load Diff
916
scanner/windows_test.go
Normal file
916
scanner/windows_test.go
Normal file
@@ -0,0 +1,916 @@
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
func Test_parseSystemInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
osInfo osInfo
|
||||
kbs []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Workstation",
|
||||
args: `
|
||||
Host Name: DESKTOP
|
||||
OS Name: Microsoft Windows 10 Pro
|
||||
OS Version: 10.0.19044 N/A Build 19044
|
||||
OS Manufacturer: Microsoft Corporation
|
||||
OS Configuration: Member Workstation
|
||||
OS Build Type: Multiprocessor Free
|
||||
Registered Owner: Windows User
|
||||
Registered Organization:
|
||||
Product ID: 00000-00000-00000-AA000
|
||||
Original Install Date: 2022/04/13, 12:25:41
|
||||
System Boot Time: 2022/06/06, 16:43:45
|
||||
System Manufacturer: HP
|
||||
System Model: HP EliteBook 830 G7 Notebook PC
|
||||
System Type: x64-based PC
|
||||
Processor(s): 1 Processor(s) Installed.
|
||||
[01]: Intel64 Family 6 Model 142 Stepping 12 GenuineIntel ~1803 Mhz
|
||||
BIOS Version: HP S70 Ver. 01.05.00, 2021/04/26
|
||||
Windows Directory: C:\WINDOWS
|
||||
System Directory: C:\WINDOWS\system32
|
||||
Boot Device: \Device\HarddiskVolume2
|
||||
System Locale: en-us;English (United States)
|
||||
Input Locale: en-us;English (United States)
|
||||
Time Zone: (UTC-08:00) Pacific Time (US & Canada)
|
||||
Total Physical Memory: 15,709 MB
|
||||
Available Physical Memory: 12,347 MB
|
||||
Virtual Memory: Max Size: 18,141 MB
|
||||
Virtual Memory: Available: 14,375 MB
|
||||
Virtual Memory: In Use: 3,766 MB
|
||||
Page File Location(s): C:\pagefile.sys
|
||||
Domain: WORKGROUP
|
||||
Logon Server: \\DESKTOP
|
||||
Hotfix(s): 7 Hotfix(s) Installed.
|
||||
[01]: KB5012117
|
||||
[02]: KB4562830
|
||||
[03]: KB5003791
|
||||
[04]: KB5007401
|
||||
[05]: KB5012599
|
||||
[06]: KB5011651
|
||||
[07]: KB5005699
|
||||
Network Card(s): 1 NIC(s) Installed.
|
||||
[01]: Intel(R) Wi-Fi 6 AX201 160MHz
|
||||
Connection Name: Wi-Fi
|
||||
DHCP Enabled: Yes
|
||||
DHCP Server: 192.168.0.1
|
||||
IP address(es)
|
||||
[01]: 192.168.0.205
|
||||
Hyper-V Requirements: VM Monitor Mode Extensions: Yes
|
||||
Virtualization Enabled In Firmware: Yes
|
||||
Second Level Address Translation: Yes
|
||||
Data Execution Prevention Available: Yes
|
||||
`,
|
||||
osInfo: osInfo{
|
||||
productName: "Microsoft Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "19044",
|
||||
revision: "",
|
||||
edition: "",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
kbs: []string{"5012117", "4562830", "5003791", "5007401", "5012599", "5011651", "5005699"},
|
||||
},
|
||||
{
|
||||
name: "Server",
|
||||
args: `
|
||||
Host Name: WIN-RIBN7SM07BK
|
||||
OS Name: Microsoft Windows Server 2022 Standard
|
||||
OS Version: 10.0.20348 N/A Build 20348
|
||||
OS Manufacturer: Microsoft Corporation
|
||||
OS Configuration: Standalone Server
|
||||
OS Build Type: Multiprocessor Free
|
||||
Registered Owner: Windows User
|
||||
Registered Organization:
|
||||
Product ID: 00454-10000-00001-AA483
|
||||
Original Install Date: 10/1/2021, 4:15:34 PM
|
||||
System Boot Time: 10/22/2021, 8:36:55 AM
|
||||
System Manufacturer: Microsoft Corporation
|
||||
System Model: Virtual Machine
|
||||
System Type: x64-based PC
|
||||
Processor(s): 1 Processor(s) Installed.
|
||||
[01]: Intel64 Family 6 Model 158 Stepping 9 GenuineIntel ~2808 Mhz
|
||||
BIOS Version: Microsoft Corporation Hyper-V UEFI Release v4.0, 12/17/2019
|
||||
Windows Directory: C:\Windows
|
||||
System Directory: C:\Windows\system32
|
||||
Boot Device: \Device\HarddiskVolume1
|
||||
System Locale: en-us;English (United States)
|
||||
Input Locale: en-us;English (United States)
|
||||
Time Zone: (UTC-08:00) Pacific Time (US & Canada)
|
||||
Total Physical Memory: 2,047 MB
|
||||
Available Physical Memory: 900 MB
|
||||
Virtual Memory: Max Size: 3,199 MB
|
||||
Virtual Memory: Available: 2,143 MB
|
||||
Virtual Memory: In Use: 1,056 MB
|
||||
Page File Location(s): C:\pagefile.sys
|
||||
Domain: WORKGROUP
|
||||
Logon Server: \\WIN-RIBN7SM07BK
|
||||
Hotfix(s): 3 Hotfix(s) Installed.
|
||||
[01]: KB5004330
|
||||
[02]: KB5005039
|
||||
[03]: KB5005552
|
||||
Network Card(s): 1 NIC(s) Installed.
|
||||
[01]: Microsoft Hyper-V Network Adapter
|
||||
Connection Name: Ethernet
|
||||
DHCP Enabled: Yes
|
||||
DHCP Server: 192.168.254.254
|
||||
IP address(es)
|
||||
[01]: 192.168.254.172
|
||||
[02]: fe80::b4a1:11cc:2c4:4f57
|
||||
Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed.
|
||||
`,
|
||||
osInfo: osInfo{
|
||||
productName: "Microsoft Windows Server 2022 Standard",
|
||||
version: "10.0",
|
||||
build: "20348",
|
||||
revision: "",
|
||||
edition: "",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Server",
|
||||
},
|
||||
kbs: []string{"5004330", "5005039", "5005552"},
|
||||
},
|
||||
{
|
||||
name: "Domain Controller",
|
||||
args: `
|
||||
Host Name: vuls
|
||||
OS Name: Microsoft Windows Server 2019 Datacenter
|
||||
OS Version: 10.0.17763 N/A Build 17763
|
||||
OS Manufacturer: Microsoft Corporation
|
||||
OS Configuration: Primary Domain Controller
|
||||
OS Build Type: Multiprocessor Free
|
||||
Registered Owner: N/A
|
||||
Registered Organization: N/A
|
||||
Product ID: 00430-00000-00000-AA602
|
||||
Original Install Date: 1/16/2023, 10:04:07 AM
|
||||
System Boot Time: 3/28/2023, 8:37:14 AM
|
||||
System Manufacturer: Microsoft Corporation
|
||||
System Model: Virtual Machine
|
||||
System Type: x64-based PC
|
||||
Processor(s): 1 Processor(s) Installed.
|
||||
[01]: Intel64 Family 6 Model 85 Stepping 4 GenuineIntel ~2095 Mhz
|
||||
BIOS Version: Microsoft Corporation Hyper-V UEFI Release v4.1, 5/9/2022
|
||||
Windows Directory: C:\Windows
|
||||
System Directory: C:\Windows\system32
|
||||
Boot Device: \Device\HarddiskVolume3
|
||||
System Locale: en-us;English (United States)
|
||||
Input Locale: en-us;English (United States)
|
||||
Time Zone: (UTC) Coordinated Universal Time
|
||||
Total Physical Memory: 16,383 MB
|
||||
Available Physical Memory: 13,170 MB
|
||||
Virtual Memory: Max Size: 18,431 MB
|
||||
Virtual Memory: Available: 15,208 MB
|
||||
Virtual Memory: In Use: 3,223 MB
|
||||
Page File Location(s): C:\pagefile.sys
|
||||
Domain: vuls
|
||||
Logon Server: \\vuls
|
||||
Hotfix(s): 5 Hotfix(s) Installed.
|
||||
[01]: KB5022511
|
||||
[02]: KB5012170
|
||||
[03]: KB5023702
|
||||
[04]: KB5020374
|
||||
[05]: KB5023789
|
||||
Hyper-V Requirements: A hypervisor has been detected. Features required for Hyper-V will not be displayed.
|
||||
`,
|
||||
osInfo: osInfo{
|
||||
productName: "Microsoft Windows Server 2019 Datacenter",
|
||||
version: "10.0",
|
||||
build: "17763",
|
||||
revision: "",
|
||||
edition: "",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Domain Controller",
|
||||
},
|
||||
kbs: []string{"5022511", "5012170", "5023702", "5020374", "5023789"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
osInfo, kbs, err := parseSystemInfo(tt.args)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseSystemInfo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if osInfo != tt.osInfo {
|
||||
t.Errorf("parseSystemInfo() got = %v, want %v", osInfo, tt.osInfo)
|
||||
}
|
||||
if !reflect.DeepEqual(kbs, tt.kbs) {
|
||||
t.Errorf("parseSystemInfo() got = %v, want %v", kbs, tt.kbs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseGetComputerInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
want osInfo
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: `
|
||||
WindowsProductName : Windows 10 Pro
|
||||
OsVersion : 10.0.19044
|
||||
WindowsEditionId : Professional
|
||||
OsCSDVersion :
|
||||
CsSystemType : x64-based PC
|
||||
WindowsInstallationType : Client
|
||||
`,
|
||||
want: osInfo{
|
||||
productName: "Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "19044",
|
||||
revision: "",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseGetComputerInfo(tt.args)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseGetComputerInfo() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("parseGetComputerInfo() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseWmiObject(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
want osInfo
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: `
|
||||
Caption : Microsoft Windows 10 Pro
|
||||
Version : 10.0.19044
|
||||
OperatingSystemSKU : 48
|
||||
CSDVersion :
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
DomainRole : 1
|
||||
SystemType : x64-based PC`,
|
||||
want: osInfo{
|
||||
productName: "Microsoft Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "19044",
|
||||
revision: "",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseWmiObject(tt.args)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseWmiObject() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("parseWmiObject() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseRegistry(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
arch string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want osInfo
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `
|
||||
ProductName : Windows 10 Pro
|
||||
CurrentVersion : 6.3
|
||||
CurrentMajorVersionNumber : 10
|
||||
CurrentMinorVersionNumber : 0
|
||||
CurrentBuildNumber : 19044
|
||||
UBR : 2364
|
||||
EditionID : Professional
|
||||
InstallationType : Client`,
|
||||
arch: "AMD64",
|
||||
},
|
||||
want: osInfo{
|
||||
productName: "Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "19044",
|
||||
revision: "2364",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := parseRegistry(tt.args.stdout, tt.args.arch)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("parseRegistry() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("parseRegistry() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_detectOSName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args osInfo
|
||||
want string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Windows 10 for x64-based Systems",
|
||||
args: osInfo{
|
||||
productName: "Microsoft Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "10585",
|
||||
revision: "",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
want: "Windows 10 for x64-based Systems",
|
||||
},
|
||||
{
|
||||
name: "Windows 10 Version 21H2 for x64-based Systems",
|
||||
args: osInfo{
|
||||
productName: "Microsoft Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "19044",
|
||||
revision: "",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
want: "Windows 10 Version 21H2 for x64-based Systems",
|
||||
},
|
||||
{
|
||||
name: "Windows Server 2022",
|
||||
args: osInfo{
|
||||
productName: "Windows Server",
|
||||
version: "10.0",
|
||||
build: "30000",
|
||||
revision: "",
|
||||
edition: "",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Server",
|
||||
},
|
||||
want: "Windows Server 2022",
|
||||
},
|
||||
{
|
||||
name: "Windows Server 2019",
|
||||
args: osInfo{
|
||||
productName: "Microsoft Windows Server 2019 Datacenter",
|
||||
version: "10.0",
|
||||
build: "17763",
|
||||
revision: "",
|
||||
edition: "",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Domain Controller",
|
||||
},
|
||||
want: "Windows Server 2019",
|
||||
},
|
||||
{
|
||||
name: "err",
|
||||
args: osInfo{
|
||||
productName: "Microsoft Windows 10 Pro",
|
||||
version: "10.0",
|
||||
build: "build",
|
||||
revision: "",
|
||||
edition: "Professional",
|
||||
servicePack: "",
|
||||
arch: "x64-based",
|
||||
installationType: "Client",
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := detectOSName(tt.args)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("detectOSName() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if got != tt.want {
|
||||
t.Errorf("detectOSName() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_formatKernelVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args osInfo
|
||||
want string
|
||||
}{
|
||||
{
|
||||
name: "major.minor.build.revision",
|
||||
args: osInfo{
|
||||
version: "10.0",
|
||||
build: "19045",
|
||||
revision: "2130",
|
||||
},
|
||||
want: "10.0.19045.2130",
|
||||
},
|
||||
{
|
||||
name: "major.minor.build",
|
||||
args: osInfo{
|
||||
version: "10.0",
|
||||
build: "19045",
|
||||
},
|
||||
want: "10.0.19045",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if got := formatKernelVersion(tt.args); got != tt.want {
|
||||
t.Errorf("formatKernelVersion() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseInstalledPackages(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want models.Packages
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `
|
||||
Name : Git
|
||||
Version : 2.35.1.2
|
||||
ProviderName : Programs
|
||||
|
||||
Name : Oracle Database 11g Express Edition
|
||||
Version : 11.2.0
|
||||
ProviderName : msi
|
||||
|
||||
Name : 2022-12 x64 ベース システム用 Windows 10 Version 21H2 の累積更新プログラム (KB5021233)
|
||||
Version :
|
||||
ProviderName : msu
|
||||
`,
|
||||
},
|
||||
want: models.Packages{
|
||||
"Git": {
|
||||
Name: "Git",
|
||||
Version: "2.35.1.2",
|
||||
},
|
||||
"Oracle Database 11g Express Edition": {
|
||||
Name: "Oracle Database 11g Express Edition",
|
||||
Version: "11.2.0",
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{}
|
||||
got, _, err := o.parseInstalledPackages(tt.args.stdout)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.parseInstalledPackages() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.parseInstalledPackages() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseGetHotfix(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `
|
||||
HotFixID : KB5020872
|
||||
|
||||
HotFixID : KB4562830
|
||||
`,
|
||||
},
|
||||
want: []string{"5020872", "4562830"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{}
|
||||
got, err := o.parseGetHotfix(tt.args.stdout)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.parseGetHotfix() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.parseGetHotfix() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseGetPackageMSU(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `
|
||||
Name : Git
|
||||
Version : 2.35.1.2
|
||||
ProviderName : Programs
|
||||
|
||||
Name : Oracle Database 11g Express Edition
|
||||
Version : 11.2.0
|
||||
ProviderName : msi
|
||||
|
||||
Name : 2022-12 x64 ベース システム用 Windows 10 Version 21H2 の累積更新プログラム (KB5021233)
|
||||
Version :
|
||||
ProviderName : msu
|
||||
`,
|
||||
},
|
||||
want: []string{"5021233"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{}
|
||||
got, err := o.parseGetPackageMSU(tt.args.stdout)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.parseGetPackageMSU() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.parseGetPackageMSU() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseWindowsUpdaterSearch(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `5012170
|
||||
5021233
|
||||
5021088
|
||||
`,
|
||||
},
|
||||
want: []string{"5012170", "5021233", "5021088"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{}
|
||||
got, err := o.parseWindowsUpdaterSearch(tt.args.stdout)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.parseWindowsUpdaterSearch() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.parseWindowsUpdaterSearch() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_parseWindowsUpdateHistory(t *testing.T) {
|
||||
type args struct {
|
||||
stdout string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want []string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "happy",
|
||||
args: args{
|
||||
stdout: `
|
||||
Title : 2022-10 x64 ベース システム用 Windows 10 Version 21H2 の累積更新プログラム (KB5020435)
|
||||
Operation : 1
|
||||
ResultCode : 2
|
||||
|
||||
Title : 2022-10 x64 ベース システム用 Windows 10 Version 21H2 の累積更新プログラム (KB5020435)
|
||||
Operation : 2
|
||||
ResultCode : 2
|
||||
|
||||
Title : 2022-12 x64 (KB5021088) 向け Windows 10 Version 21H2 用 .NET Framework 3.5、4.8 および 4.8.1 の累積的な更新プログラム
|
||||
Operation : 1
|
||||
ResultCode : 2
|
||||
|
||||
Title : 2022-12 x64 ベース システム用 Windows 10 Version 21H2 の累積更新プログラム (KB5021233)
|
||||
Operation : 1
|
||||
ResultCode : 2
|
||||
`,
|
||||
},
|
||||
want: []string{"5021088", "5021233"},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{}
|
||||
got, err := o.parseWindowsUpdateHistory(tt.args.stdout)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.parseWindowsUpdateHistory() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
slices.Sort(got)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.parseWindowsUpdateHistory() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_windows_detectKBsFromKernelVersion(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
base base
|
||||
want models.WindowsKB
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "10.0.19045.2129",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows 10 Version 22H2 for x64-based Systems"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0.19045.2129"}},
|
||||
},
|
||||
want: models.WindowsKB{
|
||||
Applied: nil,
|
||||
Unapplied: []string{"5020953", "5019959", "5020030", "5021233", "5022282", "5019275", "5022834", "5022906", "5023696", "5023773", "5025221", "5025297", "5026361", "5026435", "5027215", "5027293"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "10.0.19045.2130",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows 10 Version 22H2 for x64-based Systems"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0.19045.2130"}},
|
||||
},
|
||||
want: models.WindowsKB{
|
||||
Applied: nil,
|
||||
Unapplied: []string{"5020953", "5019959", "5020030", "5021233", "5022282", "5019275", "5022834", "5022906", "5023696", "5023773", "5025221", "5025297", "5026361", "5026435", "5027215", "5027293"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "10.0.22621.1105",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows 11 Version 22H2 for x64-based Systems"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0.22621.1105"}},
|
||||
},
|
||||
want: models.WindowsKB{
|
||||
Applied: []string{"5019311", "5017389", "5018427", "5019509", "5018496", "5019980", "5020044", "5021255", "5022303"},
|
||||
Unapplied: []string{"5022360", "5022845", "5022913", "5023706", "5023778", "5025239", "5025305", "5026372", "5026446", "5027231", "5027303"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "10.0.20348.1547",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows Server 2022"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0.20348.1547"}},
|
||||
},
|
||||
want: models.WindowsKB{
|
||||
Applied: []string{"5005575", "5005619", "5006699", "5006745", "5007205", "5007254", "5008223", "5010197", "5009555", "5010796", "5009608", "5010354", "5010421", "5011497", "5011558", "5012604", "5012637", "5013944", "5015013", "5014021", "5014678", "5014665", "5015827", "5015879", "5016627", "5016693", "5017316", "5017381", "5018421", "5020436", "5018485", "5019081", "5021656", "5020032", "5021249", "5022553", "5022291", "5022842"},
|
||||
Unapplied: []string{"5023705", "5025230", "5026370", "5027225"},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "10.0.20348.9999",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows Server 2022"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0.20348.9999"}},
|
||||
},
|
||||
want: models.WindowsKB{
|
||||
Applied: []string{"5005575", "5005619", "5006699", "5006745", "5007205", "5007254", "5008223", "5010197", "5009555", "5010796", "5009608", "5010354", "5010421", "5011497", "5011558", "5012604", "5012637", "5013944", "5015013", "5014021", "5014678", "5014665", "5015827", "5015879", "5016627", "5016693", "5017316", "5017381", "5018421", "5020436", "5018485", "5019081", "5021656", "5020032", "5021249", "5022553", "5022291", "5022842", "5023705", "5025230", "5026370", "5027225"},
|
||||
Unapplied: nil,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "err",
|
||||
base: base{
|
||||
Distro: config.Distro{Release: "Windows 10 Version 22H2 for x64-based Systems"},
|
||||
osPackages: osPackages{Kernel: models.Kernel{Version: "10.0"}},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
o := &windows{
|
||||
base: tt.base,
|
||||
}
|
||||
got, err := DetectKBsFromKernelVersion(o.getDistro().Release, o.Kernel.Version)
|
||||
if (err != nil) != tt.wantErr {
|
||||
t.Errorf("windows.detectKBsFromKernelVersion() error = %v, wantErr %v", err, tt.wantErr)
|
||||
return
|
||||
}
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
t.Errorf("windows.detectKBsFromKernelVersion() = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_windows_parseIP(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
ipv4Addrs []string
|
||||
ipv6Addrs []string
|
||||
}{
|
||||
{
|
||||
name: "en",
|
||||
args: `
|
||||
|
||||
Windows IP Configuration
|
||||
|
||||
|
||||
Ethernet adapter イーサネット 4:
|
||||
|
||||
Connection-specific DNS Suffix . : vuls.local
|
||||
Link-local IPv6 Address . . . . . : fe80::19b6:ae27:d1fe:2041%33
|
||||
Link-local IPv6 Address . . . . . : fe80::7080:8828:5cc8:c0ba%33
|
||||
IPv4 Address. . . . . . . . . . . : 10.145.8.50
|
||||
Subnet Mask . . . . . . . . . . . : 255.255.0.0
|
||||
Default Gateway . . . . . . . . . : ::
|
||||
|
||||
Ethernet adapter イーサネット 2:
|
||||
|
||||
Connection-specific DNS Suffix . :
|
||||
Link-local IPv6 Address . . . . . : fe80::f49d:2c16:4270:759d%9
|
||||
IPv4 Address. . . . . . . . . . . : 192.168.56.1
|
||||
Subnet Mask . . . . . . . . . . . : 255.255.255.0
|
||||
Default Gateway . . . . . . . . . :
|
||||
|
||||
Wireless LAN adapter ローカル エリア接続* 1:
|
||||
|
||||
Media State . . . . . . . . . . . : Media disconnected
|
||||
Connection-specific DNS Suffix . :
|
||||
|
||||
Wireless LAN adapter ローカル エリア接続* 2:
|
||||
|
||||
Media State . . . . . . . . . . . : Media disconnected
|
||||
Connection-specific DNS Suffix . :
|
||||
|
||||
Wireless LAN adapter Wi-Fi:
|
||||
|
||||
Connection-specific DNS Suffix . :
|
||||
IPv4 Address. . . . . . . . . . . : 192.168.0.205
|
||||
Subnet Mask . . . . . . . . . . . : 255.255.255.0
|
||||
Default Gateway . . . . . . . . . : 192.168.0.1
|
||||
|
||||
Ethernet adapter Bluetooth ネットワーク接続:
|
||||
|
||||
Media State . . . . . . . . . . . : Media disconnected
|
||||
Connection-specific DNS Suffix . :
|
||||
`,
|
||||
ipv4Addrs: []string{"10.145.8.50", "192.168.56.1", "192.168.0.205"},
|
||||
ipv6Addrs: []string{"fe80::19b6:ae27:d1fe:2041", "fe80::7080:8828:5cc8:c0ba", "fe80::f49d:2c16:4270:759d"},
|
||||
},
|
||||
{
|
||||
name: "ja",
|
||||
args: `
|
||||
|
||||
Windows IP 構成
|
||||
|
||||
|
||||
イーサネット アダプター イーサネット 4:
|
||||
|
||||
接続固有の DNS サフィックス . . . . .: future.co.jp
|
||||
リンクローカル IPv6 アドレス. . . . .: fe80::19b6:ae27:d1fe:2041%33
|
||||
リンクローカル IPv6 アドレス. . . . .: fe80::7080:8828:5cc8:c0ba%33
|
||||
IPv4 アドレス . . . . . . . . . . . .: 10.145.8.50
|
||||
サブネット マスク . . . . . . . . . .: 255.255.0.0
|
||||
デフォルト ゲートウェイ . . . . . . .: ::
|
||||
|
||||
イーサネット アダプター イーサネット 2:
|
||||
|
||||
接続固有の DNS サフィックス . . . . .:
|
||||
リンクローカル IPv6 アドレス. . . . .: fe80::f49d:2c16:4270:759d%9
|
||||
IPv4 アドレス . . . . . . . . . . . .: 192.168.56.1
|
||||
サブネット マスク . . . . . . . . . .: 255.255.255.0
|
||||
デフォルト ゲートウェイ . . . . . . .:
|
||||
|
||||
Wireless LAN adapter ローカル エリア接続* 1:
|
||||
|
||||
メディアの状態. . . . . . . . . . . .: メディアは接続されていません
|
||||
接続固有の DNS サフィックス . . . . .:
|
||||
|
||||
Wireless LAN adapter ローカル エリア接続* 2:
|
||||
|
||||
メディアの状態. . . . . . . . . . . .: メディアは接続されていません
|
||||
接続固有の DNS サフィックス . . . . .:
|
||||
|
||||
Wireless LAN adapter Wi-Fi:
|
||||
|
||||
接続固有の DNS サフィックス . . . . .:
|
||||
IPv4 アドレス . . . . . . . . . . . .: 192.168.0.205
|
||||
サブネット マスク . . . . . . . . . .: 255.255.255.0
|
||||
デフォルト ゲートウェイ . . . . . . .: 192.168.0.1
|
||||
|
||||
イーサネット アダプター Bluetooth ネットワーク接続:
|
||||
|
||||
メディアの状態. . . . . . . . . . . .: メディアは接続されていません
|
||||
接続固有の DNS サフィックス . . . . .:
|
||||
`,
|
||||
ipv4Addrs: []string{"10.145.8.50", "192.168.56.1", "192.168.0.205"},
|
||||
ipv6Addrs: []string{"fe80::19b6:ae27:d1fe:2041", "fe80::7080:8828:5cc8:c0ba", "fe80::f49d:2c16:4270:759d"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
gotIPv4s, gotIPv6s := (&windows{}).parseIP(tt.args)
|
||||
if !reflect.DeepEqual(gotIPv4s, tt.ipv4Addrs) {
|
||||
t.Errorf("windows.parseIP() got = %v, want %v", gotIPv4s, tt.ipv4Addrs)
|
||||
}
|
||||
if !reflect.DeepEqual(gotIPv6s, tt.ipv6Addrs) {
|
||||
t.Errorf("windows.parseIP() got = %v, want %v", gotIPv6s, tt.ipv6Addrs)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -39,13 +39,14 @@ func (h VulsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
if mediatype == "application/json" {
|
||||
switch mediatype {
|
||||
case "application/json":
|
||||
if err = json.NewDecoder(req.Body).Decode(&r); err != nil {
|
||||
logging.Log.Error(err)
|
||||
http.Error(w, "Invalid JSON", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else if mediatype == "text/plain" {
|
||||
case "text/plain":
|
||||
buf := new(bytes.Buffer)
|
||||
if _, err := io.Copy(buf, req.Body); err != nil {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
@@ -56,7 +57,7 @@ func (h VulsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
http.Error(w, err.Error(), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
default:
|
||||
logging.Log.Error(mediatype)
|
||||
http.Error(w, fmt.Sprintf("Invalid Content-Type: %s", contentType), http.StatusUnsupportedMediaType)
|
||||
return
|
||||
@@ -129,7 +130,7 @@ func (h VulsHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
// sever subcmd doesn't have diff option
|
||||
// server subcmd doesn't have diff option
|
||||
reports = append(reports, reporter.LocalFileWriter{
|
||||
CurrentDir: dir,
|
||||
FormatJSON: true,
|
||||
|
||||
@@ -216,6 +216,7 @@ host = "{{$ip}}"
|
||||
#type = "pseudo"
|
||||
#memo = "DB Server"
|
||||
#findLock = true
|
||||
#findLockDirs = [ "/path/to/prject/lib" ]
|
||||
#lockfiles = ["/path/to/package-lock.json"]
|
||||
#cpeNames = [ "cpe:/a:rubyonrails:ruby_on_rails:4.2.1" ]
|
||||
#owaspDCXMLPath = "/path/to/dependency-check-report.xml"
|
||||
@@ -239,6 +240,7 @@ host = "{{$ip}}"
|
||||
#cmdPath = "/usr/local/bin/wp"
|
||||
#osUser = "wordpress"
|
||||
#docRoot = "/path/to/DocumentRoot/"
|
||||
#noSudo = false
|
||||
|
||||
#[servers.{{index $names $i}}.portscan]
|
||||
#scannerBinPath = "/usr/bin/nmap"
|
||||
@@ -246,6 +248,10 @@ host = "{{$ip}}"
|
||||
#scanTechniques = ["sS"]
|
||||
#sourcePort = "65535"
|
||||
|
||||
#[servers.{{index $names $i}}.windows]
|
||||
#serverSelection = 3
|
||||
#cabPath = "/path/to/wsusscn2.cab"
|
||||
|
||||
#[servers.{{index $names $i}}.optional]
|
||||
#key = "value1"
|
||||
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
//go:build !scanner
|
||||
// +build !scanner
|
||||
//go:build !scanner && !windows
|
||||
|
||||
package subcmds
|
||||
|
||||
@@ -10,26 +9,29 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/utils"
|
||||
"github.com/google/subcommands"
|
||||
"github.com/k0kubun/pp"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/detector"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/reporter"
|
||||
"github.com/google/subcommands"
|
||||
"github.com/k0kubun/pp"
|
||||
)
|
||||
|
||||
// ReportCmd is subcommand for reporting
|
||||
type ReportCmd struct {
|
||||
configPath string
|
||||
|
||||
formatJSON bool
|
||||
formatOneEMail bool
|
||||
formatCsv bool
|
||||
formatFullText bool
|
||||
formatOneLineText bool
|
||||
formatList bool
|
||||
gzip bool
|
||||
formatJSON bool
|
||||
formatOneEMail bool
|
||||
formatCsv bool
|
||||
formatFullText bool
|
||||
formatOneLineText bool
|
||||
formatList bool
|
||||
formatCycloneDXJSON bool
|
||||
formatCycloneDXXML bool
|
||||
gzip bool
|
||||
|
||||
toSlack bool
|
||||
toChatWork bool
|
||||
@@ -80,6 +82,9 @@ func (*ReportCmd) Usage() string {
|
||||
[-format-one-line-text]
|
||||
[-format-list]
|
||||
[-format-full-text]
|
||||
[-format-csv]
|
||||
[-format-cyclonedx-json]
|
||||
[-format-cyclonedx-xml]
|
||||
[-gzip]
|
||||
[-http-proxy=http://192.168.0.1:8080]
|
||||
[-debug]
|
||||
@@ -150,6 +155,8 @@ func (p *ReportCmd) SetFlags(f *flag.FlagSet) {
|
||||
f.BoolVar(&p.formatList, "format-list", false, "Display as list format")
|
||||
f.BoolVar(&p.formatFullText, "format-full-text", false,
|
||||
"Detail report in plain text")
|
||||
f.BoolVar(&p.formatCycloneDXJSON, "format-cyclonedx-json", false, "CycloneDX JSON format")
|
||||
f.BoolVar(&p.formatCycloneDXXML, "format-cyclonedx-xml", false, "CycloneDX XML format")
|
||||
|
||||
f.BoolVar(&p.toSlack, "to-slack", false, "Send report via Slack")
|
||||
f.BoolVar(&p.toChatWork, "to-chatwork", false, "Send report via chatwork")
|
||||
@@ -225,7 +232,8 @@ func (p *ReportCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}
|
||||
}
|
||||
|
||||
if !(p.formatJSON || p.formatOneLineText ||
|
||||
p.formatList || p.formatFullText || p.formatCsv) {
|
||||
p.formatList || p.formatFullText || p.formatCsv ||
|
||||
p.formatCycloneDXJSON || p.formatCycloneDXXML) {
|
||||
p.formatList = true
|
||||
}
|
||||
|
||||
@@ -310,15 +318,17 @@ func (p *ReportCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}
|
||||
|
||||
if p.toLocalFile {
|
||||
reports = append(reports, reporter.LocalFileWriter{
|
||||
CurrentDir: dir,
|
||||
DiffPlus: config.Conf.DiffPlus,
|
||||
DiffMinus: config.Conf.DiffMinus,
|
||||
FormatJSON: p.formatJSON,
|
||||
FormatCsv: p.formatCsv,
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
Gzip: p.gzip,
|
||||
CurrentDir: dir,
|
||||
DiffPlus: config.Conf.DiffPlus,
|
||||
DiffMinus: config.Conf.DiffMinus,
|
||||
FormatJSON: p.formatJSON,
|
||||
FormatCsv: p.formatCsv,
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
FormatCycloneDXJSON: p.formatCycloneDXJSON,
|
||||
FormatCycloneDXXML: p.formatCycloneDXXML,
|
||||
Gzip: p.gzip,
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
372
subcmds/report_windows.go
Normal file
372
subcmds/report_windows.go
Normal file
@@ -0,0 +1,372 @@
|
||||
//go:build !scanner && windows
|
||||
|
||||
package subcmds
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/aquasecurity/trivy/pkg/utils"
|
||||
"github.com/google/subcommands"
|
||||
"github.com/k0kubun/pp"
|
||||
|
||||
"github.com/future-architect/vuls/config"
|
||||
"github.com/future-architect/vuls/detector"
|
||||
"github.com/future-architect/vuls/logging"
|
||||
"github.com/future-architect/vuls/models"
|
||||
"github.com/future-architect/vuls/reporter"
|
||||
)
|
||||
|
||||
// ReportCmd is subcommand for reporting
|
||||
type ReportCmd struct {
|
||||
configPath string
|
||||
|
||||
formatJSON bool
|
||||
formatOneEMail bool
|
||||
formatCsv bool
|
||||
formatFullText bool
|
||||
formatOneLineText bool
|
||||
formatList bool
|
||||
formatCycloneDXJSON bool
|
||||
formatCycloneDXXML bool
|
||||
gzip bool
|
||||
|
||||
toSlack bool
|
||||
toChatWork bool
|
||||
toGoogleChat bool
|
||||
toTelegram bool
|
||||
toEmail bool
|
||||
toLocalFile bool
|
||||
toS3 bool
|
||||
toAzureBlob bool
|
||||
toHTTP bool
|
||||
}
|
||||
|
||||
// Name return subcommand name
|
||||
func (*ReportCmd) Name() string { return "report" }
|
||||
|
||||
// Synopsis return synopsis
|
||||
func (*ReportCmd) Synopsis() string { return "Reporting" }
|
||||
|
||||
// Usage return usage
|
||||
func (*ReportCmd) Usage() string {
|
||||
return `report:
|
||||
report
|
||||
[-lang=en|ja]
|
||||
[-config=/path/to/config.toml]
|
||||
[-results-dir=/path/to/results]
|
||||
[-log-to-file]
|
||||
[-log-dir=/path/to/log]
|
||||
[-refresh-cve]
|
||||
[-cvss-over=7]
|
||||
[-confidence-over=80]
|
||||
[-diff]
|
||||
[-diff-minus]
|
||||
[-diff-plus]
|
||||
[-ignore-unscored-cves]
|
||||
[-ignore-unfixed]
|
||||
[-to-email]
|
||||
[-to-http]
|
||||
[-to-slack]
|
||||
[-to-chatwork]
|
||||
[-to-googlechat]
|
||||
[-to-telegram]
|
||||
[-to-localfile]
|
||||
[-to-s3]
|
||||
[-to-azure-blob]
|
||||
[-format-json]
|
||||
[-format-one-email]
|
||||
[-format-one-line-text]
|
||||
[-format-list]
|
||||
[-format-full-text]
|
||||
[-format-csv]
|
||||
[-format-cyclonedx-json]
|
||||
[-format-cyclonedx-xml]
|
||||
[-gzip]
|
||||
[-http-proxy=http://192.168.0.1:8080]
|
||||
[-debug]
|
||||
[-debug-sql]
|
||||
[-quiet]
|
||||
[-no-progress]
|
||||
[-pipe]
|
||||
[-http="http://vuls-report-server"]
|
||||
[-trivy-cachedb-dir=/path/to/dir]
|
||||
|
||||
[RFC3339 datetime format under results dir]
|
||||
`
|
||||
}
|
||||
|
||||
// SetFlags set flag
|
||||
func (p *ReportCmd) SetFlags(f *flag.FlagSet) {
|
||||
f.StringVar(&config.Conf.Lang, "lang", "en", "[en|ja]")
|
||||
f.BoolVar(&config.Conf.Debug, "debug", false, "debug mode")
|
||||
f.BoolVar(&config.Conf.DebugSQL, "debug-sql", false, "SQL debug mode")
|
||||
f.BoolVar(&config.Conf.Quiet, "quiet", false, "Quiet mode. No output on stdout")
|
||||
f.BoolVar(&config.Conf.NoProgress, "no-progress", false, "Suppress progress bar")
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
defaultConfPath := filepath.Join(wd, "config.toml")
|
||||
f.StringVar(&p.configPath, "config", defaultConfPath, "/path/to/toml")
|
||||
|
||||
defaultResultsDir := filepath.Join(wd, "results")
|
||||
f.StringVar(&config.Conf.ResultsDir, "results-dir", defaultResultsDir, "/path/to/results")
|
||||
|
||||
defaultLogDir := logging.GetDefaultLogDir()
|
||||
f.StringVar(&config.Conf.LogDir, "log-dir", defaultLogDir, "/path/to/log")
|
||||
f.BoolVar(&config.Conf.LogToFile, "log-to-file", false, "Output log to file")
|
||||
|
||||
f.BoolVar(&config.Conf.RefreshCve, "refresh-cve", false,
|
||||
"Refresh CVE information in JSON file under results dir")
|
||||
|
||||
f.Float64Var(&config.Conf.CvssScoreOver, "cvss-over", 0,
|
||||
"-cvss-over=6.5 means reporting CVSS Score 6.5 and over (default: 0 (means report all))")
|
||||
|
||||
f.IntVar(&config.Conf.ConfidenceScoreOver, "confidence-over", 80,
|
||||
"-confidence-over=40 means reporting Confidence Score 40 and over (default: 80)")
|
||||
|
||||
f.BoolVar(&config.Conf.DiffMinus, "diff-minus", false,
|
||||
"Minus Difference between previous result and current result")
|
||||
|
||||
f.BoolVar(&config.Conf.DiffPlus, "diff-plus", false,
|
||||
"Plus Difference between previous result and current result")
|
||||
|
||||
f.BoolVar(&config.Conf.Diff, "diff", false,
|
||||
"Plus & Minus Difference between previous result and current result")
|
||||
|
||||
f.BoolVar(&config.Conf.IgnoreUnscoredCves, "ignore-unscored-cves", false,
|
||||
"Don't report the unscored CVEs")
|
||||
|
||||
f.BoolVar(&config.Conf.IgnoreUnfixed, "ignore-unfixed", false,
|
||||
"Don't report the unfixed CVEs")
|
||||
|
||||
f.StringVar(
|
||||
&config.Conf.HTTPProxy, "http-proxy", "",
|
||||
"http://proxy-url:port (default: empty)")
|
||||
|
||||
f.BoolVar(&p.formatJSON, "format-json", false, "JSON format")
|
||||
f.BoolVar(&p.formatCsv, "format-csv", false, "CSV format")
|
||||
f.BoolVar(&p.formatOneEMail, "format-one-email", false,
|
||||
"Send all the host report via only one EMail (Specify with -to-email)")
|
||||
f.BoolVar(&p.formatOneLineText, "format-one-line-text", false,
|
||||
"One line summary in plain text")
|
||||
f.BoolVar(&p.formatList, "format-list", false, "Display as list format")
|
||||
f.BoolVar(&p.formatFullText, "format-full-text", false,
|
||||
"Detail report in plain text")
|
||||
f.BoolVar(&p.formatCycloneDXJSON, "format-cyclonedx-json", false, "CycloneDX JSON format")
|
||||
f.BoolVar(&p.formatCycloneDXXML, "format-cyclonedx-xml", false, "CycloneDX XML format")
|
||||
|
||||
f.BoolVar(&p.toSlack, "to-slack", false, "Send report via Slack")
|
||||
f.BoolVar(&p.toChatWork, "to-chatwork", false, "Send report via chatwork")
|
||||
f.BoolVar(&p.toGoogleChat, "to-googlechat", false, "Send report via Google Chat")
|
||||
f.BoolVar(&p.toTelegram, "to-telegram", false, "Send report via Telegram")
|
||||
f.BoolVar(&p.toEmail, "to-email", false, "Send report via Email")
|
||||
f.BoolVar(&p.toLocalFile, "to-localfile", false, "Write report to localfile")
|
||||
f.BoolVar(&p.toS3, "to-s3", false, "Write report to S3 (bucket/yyyyMMdd_HHmm/servername.json/txt)")
|
||||
f.BoolVar(&p.toHTTP, "to-http", false, "Send report via HTTP POST")
|
||||
f.BoolVar(&p.toAzureBlob, "to-azure-blob", false,
|
||||
"Write report to Azure Storage blob (container/yyyyMMdd_HHmm/servername.json/txt)")
|
||||
|
||||
f.BoolVar(&p.gzip, "gzip", false, "gzip compression")
|
||||
f.BoolVar(&config.Conf.Pipe, "pipe", false, "Use args passed via PIPE")
|
||||
|
||||
f.StringVar(&config.Conf.TrivyCacheDBDir, "trivy-cachedb-dir",
|
||||
utils.DefaultCacheDir(), "/path/to/dir")
|
||||
}
|
||||
|
||||
// Execute execute
|
||||
func (p *ReportCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{}) subcommands.ExitStatus {
|
||||
logging.Log = logging.NewCustomLogger(config.Conf.Debug, config.Conf.Quiet, config.Conf.LogToFile, config.Conf.LogDir, "", "")
|
||||
logging.Log.Infof("vuls-%s-%s", config.Version, config.Revision)
|
||||
|
||||
if p.configPath == "" {
|
||||
for _, cnf := range []config.VulnDictInterface{
|
||||
&config.Conf.CveDict,
|
||||
&config.Conf.OvalDict,
|
||||
&config.Conf.Gost,
|
||||
&config.Conf.Exploit,
|
||||
&config.Conf.Metasploit,
|
||||
&config.Conf.KEVuln,
|
||||
} {
|
||||
cnf.Init()
|
||||
}
|
||||
} else {
|
||||
if err := config.Load(p.configPath); err != nil {
|
||||
logging.Log.Errorf("Error loading %s. err: %+v", p.configPath, err)
|
||||
return subcommands.ExitUsageError
|
||||
}
|
||||
}
|
||||
|
||||
config.Conf.Slack.Enabled = p.toSlack
|
||||
config.Conf.ChatWork.Enabled = p.toChatWork
|
||||
config.Conf.GoogleChat.Enabled = p.toGoogleChat
|
||||
config.Conf.Telegram.Enabled = p.toTelegram
|
||||
config.Conf.EMail.Enabled = p.toEmail
|
||||
config.Conf.AWS.Enabled = p.toS3
|
||||
config.Conf.Azure.Enabled = p.toAzureBlob
|
||||
config.Conf.HTTP.Enabled = p.toHTTP
|
||||
|
||||
if config.Conf.Diff {
|
||||
config.Conf.DiffPlus, config.Conf.DiffMinus = true, true
|
||||
}
|
||||
|
||||
var dir string
|
||||
var err error
|
||||
if config.Conf.DiffPlus || config.Conf.DiffMinus {
|
||||
dir, err = reporter.JSONDir(config.Conf.ResultsDir, []string{})
|
||||
} else {
|
||||
dir, err = reporter.JSONDir(config.Conf.ResultsDir, f.Args())
|
||||
}
|
||||
if err != nil {
|
||||
logging.Log.Errorf("Failed to read from JSON: %+v", err)
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
|
||||
logging.Log.Info("Validating config...")
|
||||
if !config.Conf.ValidateOnReport() {
|
||||
return subcommands.ExitUsageError
|
||||
}
|
||||
|
||||
if !(p.formatJSON || p.formatOneLineText ||
|
||||
p.formatList || p.formatFullText || p.formatCsv ||
|
||||
p.formatCycloneDXJSON || p.formatCycloneDXXML) {
|
||||
p.formatList = true
|
||||
}
|
||||
|
||||
var loaded models.ScanResults
|
||||
if loaded, err = reporter.LoadScanResults(dir); err != nil {
|
||||
logging.Log.Error(err)
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
logging.Log.Infof("Loaded: %s", dir)
|
||||
|
||||
var res models.ScanResults
|
||||
hasError := false
|
||||
for _, r := range loaded {
|
||||
if len(r.Errors) == 0 {
|
||||
res = append(res, r)
|
||||
} else {
|
||||
logging.Log.Errorf("Ignored since errors occurred during scanning: %s, err: %v",
|
||||
r.ServerName, r.Errors)
|
||||
hasError = true
|
||||
}
|
||||
}
|
||||
|
||||
if len(res) == 0 {
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
|
||||
for _, r := range res {
|
||||
logging.Log.Debugf("%s: %s",
|
||||
r.ServerInfo(), pp.Sprintf("%s", config.Conf.Servers[r.ServerName]))
|
||||
}
|
||||
|
||||
if res, err = detector.Detect(res, dir); err != nil {
|
||||
logging.Log.Errorf("%+v", err)
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
|
||||
// report
|
||||
reports := []reporter.ResultWriter{
|
||||
reporter.StdoutWriter{
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
},
|
||||
}
|
||||
|
||||
if p.toSlack {
|
||||
reports = append(reports, reporter.SlackWriter{
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
Cnf: config.Conf.Slack,
|
||||
Proxy: config.Conf.HTTPProxy,
|
||||
})
|
||||
}
|
||||
|
||||
if p.toChatWork {
|
||||
reports = append(reports, reporter.ChatWorkWriter{Cnf: config.Conf.ChatWork, Proxy: config.Conf.HTTPProxy})
|
||||
}
|
||||
|
||||
if p.toGoogleChat {
|
||||
reports = append(reports, reporter.GoogleChatWriter{Cnf: config.Conf.GoogleChat, Proxy: config.Conf.HTTPProxy})
|
||||
}
|
||||
|
||||
if p.toTelegram {
|
||||
reports = append(reports, reporter.TelegramWriter{Cnf: config.Conf.Telegram})
|
||||
}
|
||||
|
||||
if p.toEmail {
|
||||
reports = append(reports, reporter.EMailWriter{
|
||||
FormatOneEMail: p.formatOneEMail,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
Cnf: config.Conf.EMail,
|
||||
})
|
||||
}
|
||||
|
||||
if p.toHTTP {
|
||||
reports = append(reports, reporter.HTTPRequestWriter{URL: config.Conf.HTTP.URL})
|
||||
}
|
||||
|
||||
if p.toLocalFile {
|
||||
reports = append(reports, reporter.LocalFileWriter{
|
||||
CurrentDir: dir,
|
||||
DiffPlus: config.Conf.DiffPlus,
|
||||
DiffMinus: config.Conf.DiffMinus,
|
||||
FormatJSON: p.formatJSON,
|
||||
FormatCsv: p.formatCsv,
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
FormatCycloneDXJSON: p.formatCycloneDXJSON,
|
||||
FormatCycloneDXXML: p.formatCycloneDXXML,
|
||||
Gzip: p.gzip,
|
||||
})
|
||||
}
|
||||
|
||||
if p.toS3 {
|
||||
w := reporter.S3Writer{
|
||||
FormatJSON: p.formatJSON,
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
Gzip: p.gzip,
|
||||
AWSConf: config.Conf.AWS,
|
||||
}
|
||||
if err := w.Validate(); err != nil {
|
||||
logging.Log.Errorf("Check if there is a bucket beforehand: %s, err: %+v", config.Conf.AWS.S3Bucket, err)
|
||||
return subcommands.ExitUsageError
|
||||
}
|
||||
reports = append(reports, w)
|
||||
}
|
||||
|
||||
if p.toAzureBlob {
|
||||
w := reporter.AzureBlobWriter{
|
||||
FormatJSON: p.formatJSON,
|
||||
FormatFullText: p.formatFullText,
|
||||
FormatOneLineText: p.formatOneLineText,
|
||||
FormatList: p.formatList,
|
||||
Gzip: p.gzip,
|
||||
AzureConf: config.Conf.Azure,
|
||||
}
|
||||
if err := w.Validate(); err != nil {
|
||||
logging.Log.Errorf("Check if there is a container beforehand: %s, err: %+v", config.Conf.Azure.ContainerName, err)
|
||||
return subcommands.ExitUsageError
|
||||
}
|
||||
reports = append(reports, w)
|
||||
}
|
||||
|
||||
for _, w := range reports {
|
||||
if err := w.Write(res...); err != nil {
|
||||
logging.Log.Errorf("Failed to report. err: %+v", err)
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
}
|
||||
|
||||
if hasError {
|
||||
return subcommands.ExitFailure
|
||||
}
|
||||
|
||||
return subcommands.ExitSuccess
|
||||
}
|
||||
@@ -91,9 +91,8 @@ func (p *SaaSCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{})
|
||||
var res models.ScanResults
|
||||
hasError := false
|
||||
for _, r := range loaded {
|
||||
if len(r.Errors) == 0 {
|
||||
res = append(res, r)
|
||||
} else {
|
||||
res = append(res, r)
|
||||
if len(r.Errors) != 0 {
|
||||
logging.Log.Errorf("Ignored since errors occurred during scanning: %s, err: %v",
|
||||
r.ServerName, r.Errors)
|
||||
hasError = true
|
||||
@@ -129,11 +128,6 @@ func (p *SaaSCmd) Execute(_ context.Context, f *flag.FlagSet, _ ...interface{})
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
logging.Log.Warnf("Failed to remove %s. err: %+v", dir, err)
|
||||
}
|
||||
symlink := filepath.Join(config.Conf.ResultsDir, "current")
|
||||
err := os.Remove(symlink)
|
||||
if err != nil {
|
||||
logging.Log.Warnf("Failed to remove %s. err: %+v", dir, err)
|
||||
}
|
||||
}
|
||||
|
||||
return subcommands.ExitSuccess
|
||||
|
||||
@@ -614,6 +614,7 @@ func summaryLines(r models.ScanResult) string {
|
||||
pkgNames = append(pkgNames, vinfo.GitHubSecurityAlerts.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.WpPackageFixStats.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.LibraryFixedIns.Names()...)
|
||||
pkgNames = append(pkgNames, vinfo.WindowsKBFixedIns...)
|
||||
|
||||
av := vinfo.AttackVector()
|
||||
for _, pname := range vinfo.AffectedPackages.Names() {
|
||||
@@ -745,7 +746,7 @@ func setChangelogLayout(g *gocui.Gui) error {
|
||||
}
|
||||
|
||||
for _, alert := range vinfo.GitHubSecurityAlerts {
|
||||
lines = append(lines, "* "+alert.PackageName)
|
||||
lines = append(lines, "* "+alert.RepoURLPackageName())
|
||||
}
|
||||
|
||||
r := currentScanResult
|
||||
|
||||
Reference in New Issue
Block a user