Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

refactor: Follow revive rules across the repo #1263

Merged
merged 1 commit into from
Sep 20, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
46 changes: 46 additions & 0 deletions .golangci.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,57 @@ linters-settings:
disabled-checks:
- ifElseChain
revive:
# enable-all-rules: true
rules:
# Overrides ----
# This is just here for documentation purposes, as all rules are disabled by default
- name: increment-decrement
disabled: true
# Defaults ----
- name: blank-imports
disabled: false
- name: context-as-argument
disabled: false
- name: context-keys-type
disabled: false
- name: dot-imports
disabled: false
- name: empty-block
disabled: false
- name: error-naming
disabled: false
- name: error-return
disabled: false
- name: error-strings
disabled: false
- name: errorf
disabled: false
- name: exported
disabled: false
- name: indent-error-flow
disabled: false
- name: package-comments
disabled: false
- name: range
disabled: false
- name: receiver-naming
disabled: false
- name: redefines-builtin-id
disabled: false
- name: superfluous-else
disabled: false
- name: time-naming
disabled: false
- name: unexported-return
disabled: false
- name: unreachable-code
disabled: false
- name: unused-parameter
disabled: false
- name: var-declaration
disabled: false
- name: var-naming
disabled: false
nlreturn:
# Size of the block (including return statement that is still "OK")
# so no return split required.
Expand Down
2 changes: 1 addition & 1 deletion cmd/osv-scanner/scan/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func Command(stdout, stderr io.Writer, r *reporter.Reporter) *cli.Command {
Aliases: []string{"f"},
Usage: "sets the output format; value can be: " + strings.Join(reporter.Format(), ", "),
Value: "table",
Action: func(context *cli.Context, s string) error {
Action: func(_ *cli.Context, s string) error {
if slices.Contains(reporter.Format(), s) {
return nil
}
Expand Down
24 changes: 12 additions & 12 deletions internal/image/extractor.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func findArtifactExtractor(path string) []extractorPair {
return extractors
}

func extractArtifactDeps(path string, layer *imgLayer) (lockfile.Lockfile, error) {
func extractArtifactDeps(path string, layer *Layer) (lockfile.Lockfile, error) {
foundExtractors := findArtifactExtractor(path)
if len(foundExtractors) == 0 {
return lockfile.Lockfile{}, fmt.Errorf("%w for %s", lockfile.ErrExtractorNotFound, path)
Expand Down Expand Up @@ -89,15 +89,15 @@ func extractArtifactDeps(path string, layer *imgLayer) (lockfile.Lockfile, error
}, nil
}

// A ImageFile represents a file that exists in an image
type ImageFile struct {
// A File represents a file that exists in an image
type File struct {
*os.File

layer *imgLayer
layer *Layer
path string
}

func (f ImageFile) Open(openPath string) (lockfile.NestedDepFile, error) {
func (f File) Open(openPath string) (lockfile.NestedDepFile, error) {
// use path instead of filepath, because container is always in Unix paths (for now)
if path.IsAbs(openPath) {
return OpenLayerFile(openPath, f.layer)
Expand All @@ -108,27 +108,27 @@ func (f ImageFile) Open(openPath string) (lockfile.NestedDepFile, error) {
return OpenLayerFile(absPath, f.layer)
}

func (f ImageFile) Path() string {
func (f File) Path() string {
return f.path
}

func OpenLayerFile(path string, layer *imgLayer) (ImageFile, error) {
func OpenLayerFile(path string, layer *Layer) (File, error) {
fileNode, err := layer.getFileNode(path)
if err != nil {
return ImageFile{}, err
return File{}, err
}

file, err := fileNode.Open()
if err != nil {
return ImageFile{}, err
return File{}, err
}

return ImageFile{
return File{
File: file,
path: path,
layer: layer,
}, nil
}

var _ lockfile.DepFile = ImageFile{}
var _ lockfile.NestedDepFile = ImageFile{}
var _ lockfile.DepFile = File{}
var _ lockfile.NestedDepFile = File{}
14 changes: 7 additions & 7 deletions internal/image/image.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@ type ScanResults struct {

type Image struct {
// Final layer is the last element in the slice
layers []imgLayer
layers []Layer
innerImage *v1.Image
extractDir string
baseImageIndex int
Expand Down Expand Up @@ -61,7 +61,7 @@ func (img *Image) layerIDToCommand(id string) (string, error) {
return img.configFile.History[i-1].CreatedBy, nil
}

func (img *Image) LastLayer() *imgLayer {
func (img *Image) LastLayer() *Layer {
return &img.layers[len(img.layers)-1]
}

Expand Down Expand Up @@ -97,7 +97,7 @@ func LoadImage(imagePath string) (*Image, error) {
outputImage := Image{
extractDir: tempPath,
innerImage: &image,
layers: make([]imgLayer, len(layers)),
layers: make([]Layer, len(layers)),
layerIDToIndex: make(map[string]int),
configFile: configFile,
baseImageIndex: guessBaseImageIndex(configFile.History),
Expand All @@ -111,7 +111,7 @@ func LoadImage(imagePath string) (*Image, error) {
return &outputImage, err
}

outputImage.layers[i] = imgLayer{
outputImage.layers[i] = Layer{
fileNodeTrie: trie.NewPathTrie(),
id: hash.Hex,
rootImage: &outputImage,
Expand Down Expand Up @@ -235,7 +235,7 @@ func LoadImage(imagePath string) (*Image, error) {
continue
}

currentMap.fileNodeTrie.Put(virtualPath, fileNode{
currentMap.fileNodeTrie.Put(virtualPath, FileNode{
rootImage: &outputImage,
// Select the original layer of the file
originLayer: &outputImage.layers[i],
Expand All @@ -255,7 +255,7 @@ func LoadImage(imagePath string) (*Image, error) {
return &outputImage, nil
}

func inWhiteoutDir(fileMap imgLayer, filePath string) bool {
func inWhiteoutDir(fileMap Layer, filePath string) bool {
for {
if filePath == "" {
break
Expand All @@ -265,7 +265,7 @@ func inWhiteoutDir(fileMap imgLayer, filePath string) bool {
break
}
val := fileMap.fileNodeTrie.Get(dirname)
item, ok := val.(fileNode)
item, ok := val.(FileNode)
if ok && item.isWhiteout {
return true
}
Expand Down
30 changes: 15 additions & 15 deletions internal/image/layer.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,53 +15,53 @@ const (
Dir
)

// fileNode represents a file on a specific layer, mapping the contents to an extracted file on disk
type fileNode struct {
// FileNode represents a file on a specific layer, mapping the contents to an extracted file on disk
type FileNode struct {
// TODO: Determine the performance implications of having a pointer to base image in every fileNode
rootImage *Image
fileType fileType
isWhiteout bool
originLayer *imgLayer
originLayer *Layer
virtualPath string
permission fs.FileMode
}

func (f *fileNode) Open() (*os.File, error) {
func (f *FileNode) Open() (*os.File, error) {
if f.isWhiteout {
return nil, fs.ErrNotExist
}

return os.Open(f.absoluteDiskPath())
}

func (f *fileNode) absoluteDiskPath() string {
func (f *FileNode) absoluteDiskPath() string {
return filepath.Join(f.rootImage.extractDir, f.originLayer.id, f.virtualPath)
}

// imgLayer represents all the files on a layer
type imgLayer struct {
// Layer represents all the files on a layer
type Layer struct {
// id is the sha256 digest of the layer
id string
fileNodeTrie *trie.PathTrie
rootImage *Image
// TODO: Use hashmap to speed up path lookups
}

func (filemap imgLayer) getFileNode(path string) (fileNode, error) {
node, ok := filemap.fileNodeTrie.Get(path).(fileNode)
func (filemap Layer) getFileNode(path string) (FileNode, error) {
node, ok := filemap.fileNodeTrie.Get(path).(FileNode)
if !ok {
return fileNode{}, fs.ErrNotExist
return FileNode{}, fs.ErrNotExist
}

return node, nil
}

// AllFiles return all files that exist on the layer the FileMap is representing
func (filemap imgLayer) AllFiles() []fileNode {
allFiles := []fileNode{}
func (filemap Layer) AllFiles() []FileNode {
allFiles := []FileNode{}
// No need to check error since we are not returning any errors
_ = filemap.fileNodeTrie.Walk(func(key string, value interface{}) error {
node := value.(fileNode)
_ = filemap.fileNodeTrie.Walk(func(_ string, value interface{}) error {
node := value.(FileNode)
if node.fileType != RegularFile { // Only add regular files
return nil
}
Expand All @@ -70,7 +70,7 @@ func (filemap imgLayer) AllFiles() []fileNode {
return nil
}

allFiles = append(allFiles, value.(fileNode))
allFiles = append(allFiles, value.(FileNode))

return nil
})
Expand Down
18 changes: 9 additions & 9 deletions internal/local/zip_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ func TestNewZippedDB_Offline_WithoutCache(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(_ http.ResponseWriter, _ *http.Request) {
t.Errorf("a server request was made when running offline")
})

Expand All @@ -165,7 +165,7 @@ func TestNewZippedDB_Offline_WithCache(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(_ http.ResponseWriter, _ *http.Request) {
t.Errorf("a server request was made when running offline")
})

Expand All @@ -191,7 +191,7 @@ func TestNewZippedDB_BadZip(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write([]byte("this is not a zip"))
})

Expand Down Expand Up @@ -227,7 +227,7 @@ func TestNewZippedDB_Online_WithoutCache(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = writeOSVsZip(t, w, map[string]models.Vulnerability{
"GHSA-1.json": {ID: "GHSA-1"},
"GHSA-2.json": {ID: "GHSA-2"},
Expand Down Expand Up @@ -259,7 +259,7 @@ func TestNewZippedDB_Online_WithoutCacheAndNoHashHeader(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write(zipOSVs(t, map[string]models.Vulnerability{
"GHSA-1.json": {ID: "GHSA-1"},
"GHSA-2.json": {ID: "GHSA-2"},
Expand Down Expand Up @@ -329,7 +329,7 @@ func TestNewZippedDB_Online_WithDifferentCache(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = writeOSVsZip(t, w, map[string]models.Vulnerability{
"GHSA-1.json": {ID: "GHSA-1"},
"GHSA-2.json": {ID: "GHSA-2"},
Expand Down Expand Up @@ -359,7 +359,7 @@ func TestNewZippedDB_Online_WithCacheButNoHashHeader(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = w.Write(zipOSVs(t, map[string]models.Vulnerability{
"GHSA-1.json": {ID: "GHSA-1"},
"GHSA-2.json": {ID: "GHSA-2"},
Expand Down Expand Up @@ -393,7 +393,7 @@ func TestNewZippedDB_Online_WithBadCache(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = writeOSVsZip(t, w, map[string]models.Vulnerability{
"GHSA-1.json": {ID: "GHSA-1"},
"GHSA-2.json": {ID: "GHSA-2"},
Expand All @@ -419,7 +419,7 @@ func TestNewZippedDB_FileChecks(t *testing.T) {

testDir := testutility.CreateTestDir(t)

ts := createZipServer(t, func(w http.ResponseWriter, r *http.Request) {
ts := createZipServer(t, func(w http.ResponseWriter, _ *http.Request) {
_, _ = writeOSVsZip(t, w, map[string]models.Vulnerability{
"file.json": {ID: "GHSA-1234"},
// only files with .json suffix should be loaded
Expand Down
4 changes: 4 additions & 0 deletions pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,10 @@ import (

const osvScannerConfigName = "osv-scanner.toml"

// Ignore stuttering as that would be a breaking change
// TODO: V2 rename?
//
//nolint:revive
type ConfigManager struct {
// Override to replace all other configs
OverrideConfig *Config
Expand Down
2 changes: 1 addition & 1 deletion pkg/lockfile/osv-vuln-results.go
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ func ParseOSVScannerResults(pathToLockfile string) ([]PackageDetails, error) {

type OSVScannerResultsExtractor struct{}

func (e OSVScannerResultsExtractor) ShouldExtract(path string) bool {
func (e OSVScannerResultsExtractor) ShouldExtract(_ string) bool {
// The output will always be a custom json file, so don't return a default should extract
return false
}
Expand Down
Loading