From cfd61128751cca9adee14162ee4642556b768140 Mon Sep 17 00:00:00 2001 From: Frikky Date: Mon, 19 Aug 2024 22:27:24 +0200 Subject: [PATCH] Pipeline + detection example fixes --- db-connector.go | 12 ++- detection.go | 197 +++++++++++++++++++++++++++++++++++++++++++----- files.go | 98 +++++++++--------------- pipelines.go | 20 +++-- shared.go | 38 +--------- structs.go | 27 ++++--- 6 files changed, 252 insertions(+), 140 deletions(-) diff --git a/db-connector.go b/db-connector.go index 06d079f..2f5afb7 100755 --- a/db-connector.go +++ b/db-connector.go @@ -6824,6 +6824,11 @@ func SetWorkflowQueue(ctx context.Context, executionRequest ExecutionRequest, en env = strings.ReplaceAll(env, " ", "-") nameKey := fmt.Sprintf("workflowqueue-%s", env) + if project.Environment == "cloud" { + //log.Printf("[DEBUG] Adding execution to queue: %s", nameKey) + } + + // New struct, to not add body, author etc if project.DbType == "opensearch" { data, err := json.Marshal(executionRequest) @@ -8823,8 +8828,9 @@ func GetHook(ctx context.Context, hookId string) (*Hook, error) { if err == nil && len(hook.Id) > 0 { return hook, nil } else { - //log.Printf("[ERROR] Failed unmarshalling cache for hook: %s", err) - return hook, errors.New(fmt.Sprintf("No good cache for hook %s", hookId)) + if len(hook.Id) == 0 && len(cacheData) > 0 { + return hook, errors.New(fmt.Sprintf("No good cache for hook %s", hookId)) + } } } else { //log.Printf("[DEBUG] Failed getting cache for hook: %s", err) @@ -8834,10 +8840,8 @@ func GetHook(ctx context.Context, hookId string) (*Hook, error) { var err error if project.DbType == "opensearch" { - //log.Printf("GETTING ES USER %s", res, err := project.Es.Get(strings.ToLower(GetESIndexPrefix(nameKey)), hookId) if err != nil { - log.Printf("[WARNING] Error for %s: %s", cacheKey, err) log.Printf("[WARNING] Error for %s: %s", cacheKey, err) return &Hook{}, err } diff --git a/detection.go b/detection.go index d5daf36..ac874f5 100644 --- a/detection.go +++ b/detection.go @@ -13,7 +13,9 @@ import ( "sort" "strings" "time" + "errors" + uuid "github.com/satori/go.uuid" "gopkg.in/yaml.v2" ) @@ -33,9 +35,9 @@ func HandleTenzirHealthUpdate(resp http.ResponseWriter, request *http.Request) { fmt.Fprintf(resp, "Failed to decode JSON: %v", err) return } + ctx := context.Background() status := healthUpdate.Status - result, err := GetDisabledRules(ctx) if (err != nil && err.Error() == "rules doesn't exist") || err == nil { result.IsTenzirActive = status @@ -52,6 +54,7 @@ func HandleTenzirHealthUpdate(resp http.ResponseWriter, request *http.Request) { resp.Write([]byte(fmt.Sprintf(`{"success": true}`))) return } + resp.WriteHeader(500) resp.Write([]byte(`{"success": false}`)) return @@ -98,6 +101,8 @@ func HandleGetDetectionRules(resp http.ResponseWriter, request *http.Request) { return } + log.Printf("[DEBUG] Loaded %d files for user %s from namespace %s", len(files), user.Username, detectionType) + disabledRules, err := GetDisabledRules(ctx) if err != nil && err.Error() != "rules doesn't exist" { log.Printf("[ERROR] Failed to get disabled rules: %s", err) @@ -110,12 +115,6 @@ func HandleGetDetectionRules(resp http.ResponseWriter, request *http.Request) { return files[i].UpdatedAt > files[j].UpdatedAt }) - type DetectionResponse struct { - DetectionInfo []DetectionFileInfo `json:"detection_info"` - FolderDisabled bool `json:"folder_disabled"` - IsConnectorActive bool `json:"is_connector_active"` - } - var sigmaFileInfo []DetectionFileInfo for _, file := range files { @@ -127,7 +126,7 @@ func HandleGetDetectionRules(resp http.ResponseWriter, request *http.Request) { if file.Encrypted { if project.Environment == "cloud" || file.StorageArea == "google_storage" { - log.Printf("[ERROR] No namespace handler for cloud decryption!") + log.Printf("[ERROR] No namespace handler for cloud decryption (detection)!") //continue } else { Openfile, err := os.Open(file.DownloadPath) @@ -214,8 +213,12 @@ func HandleGetDetectionRules(resp http.ResponseWriter, request *http.Request) { } response := DetectionResponse{ - DetectionInfo: sigmaFileInfo, - FolderDisabled: disabledRules.DisabledFolder, + DetectionName: detectionType, + Category: "", + OrgId: user.ActiveOrg.Id, + + DetectionInfo: sigmaFileInfo, + FolderDisabled: disabledRules.DisabledFolder, IsConnectorActive: isTenzirAlive, } @@ -333,7 +336,7 @@ func HandleToggleRule(resp http.ResponseWriter, request *http.Request) { execType = "ENABLE_SIGMA_FILE" } - err = SetExecRequest(ctx, execType, file.Filename) + err = SetDetectionOrborusRequest(ctx, user.ActiveOrg.Id, execType, file.Filename, "SIGMA", "SHUFFLE_DISCOVER") if err != nil { log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) resp.WriteHeader(500) @@ -351,6 +354,21 @@ func HandleFolderToggle(resp http.ResponseWriter, request *http.Request) { return } + user, err := HandleApiAuthentication(resp, request) + if err != nil { + log.Printf("[WARNING] Api authentication failed in toggle folder: %s", err) + resp.WriteHeader(401) + resp.Write([]byte(`{"success": false}`)) + return + } + + if user.Role == "org-reader" { + log.Printf("[WARNING] Org-reader doesn't have access to toggle folder: %s (%s)", user.Username, user.Id) + resp.WriteHeader(403) + resp.Write([]byte(`{"success": false, "reason": "Read only user"}`)) + return + } + location := strings.Split(request.URL.String(), "/") if location[1] != "api" || len(location) < 6 { log.Printf("Path too short or incorrect: %s", request.URL.String()) @@ -396,7 +414,7 @@ func HandleFolderToggle(resp http.ResponseWriter, request *http.Request) { execType = "CATEGORY_UPDATE" } - err = SetExecRequest(ctx, execType, "") + err = SetDetectionOrborusRequest(ctx, user.ActiveOrg.Id, execType, "", "SIGMA", "SHUFFLE_DISCOVER") if err != nil { log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) resp.WriteHeader(500) @@ -456,7 +474,7 @@ func enableRule(file File) error { if innerFile.Id == file.Id { resp.Files = append(resp.Files[:i], resp.Files[i+1:]...) found = true - break + break } } @@ -552,7 +570,7 @@ func HandleSaveSelectedRules(resp http.ResponseWriter, request *http.Request) { triggerId := location[4] selectedRules := SelectedDetectionRules{} - + decoder := json.NewDecoder(request.Body) err = decoder.Decode(&selectedRules) if err != nil { @@ -564,10 +582,10 @@ func HandleSaveSelectedRules(resp http.ResponseWriter, request *http.Request) { err = StoreSelectedRules(request.Context(), triggerId, selectedRules) if err != nil { - log.Printf("[ERROR] Error storing selected rules for %s: %s", triggerId, err) - resp.WriteHeader(http.StatusInternalServerError) - resp.Write([]byte(`{"success": false}`)) - return + log.Printf("[ERROR] Error storing selected rules for %s: %s", triggerId, err) + resp.WriteHeader(http.StatusInternalServerError) + resp.Write([]byte(`{"success": false}`)) + return } responseData, err := json.Marshal(selectedRules) @@ -581,3 +599,146 @@ func HandleSaveSelectedRules(resp http.ResponseWriter, request *http.Request) { resp.WriteHeader(http.StatusOK) resp.Write(responseData) } + +// FIXME: Should be generic - not just for SIEM/Sigma +// E.g. try for Email/Sublime +func HandleDetectionAutoConnect(resp http.ResponseWriter, request *http.Request) { + cors := HandleCors(resp, request) + if cors { + return + } + + user, err := HandleApiAuthentication(resp, request) + if err != nil { + log.Printf("[WARNING] Api authentication failed in conenct siem: %s", err) + resp.WriteHeader(401) + resp.Write([]byte(`{"success": false}`)) + return + } + + if user.Role == "org-reader" { + resp.WriteHeader(403) + resp.Write([]byte(`{"success": false, "reason": "Org reader does not have permission to connect to SIEM"}`)) + return + } + + // Check if url is /api/v1/detections/siem/ + location := strings.Split(request.URL.String(), "/") + if len(location) < 5 { + log.Printf("[WARNING] Path too short: %d", len(location)) + resp.WriteHeader(401) + resp.Write([]byte(`{"success": false}`)) + return + } + + detectionType := strings.ToLower(location[4]) + if detectionType == "siem" { + log.Printf("[AUDIT] User '%s' (%s) is trying to connect to SIEM", user.Username, user.Id) + + ctx := GetContext(request) + execType := "START_TENZIR" + err = SetDetectionOrborusRequest(ctx, user.ActiveOrg.Id, execType, "", "SIGMA", "SHUFFLE_DISCOVER") + if err != nil { + if strings.Contains(strings.ToLower(err.Error()), "must be started") { + resp.WriteHeader(200) + resp.Write([]byte(`{"success": true, "reason": "Please start the environment by running the relevant command.", "action": "environment_start"}`)) + return + } + + + log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) + if strings.Contains(strings.ToLower(err.Error()), "no valid environments") { + resp.WriteHeader(400) + resp.Write([]byte(`{"success": false, "reason": "No valid environments found. Go to /admin?tab=environments to create one.", "action": "environment_create"}`)) + return + } + + resp.WriteHeader(500) + resp.Write([]byte(`{"success": false}`)) + return + } + } else { + resp.WriteHeader(400) + resp.Write([]byte(fmt.Sprintf(`{"success": false, "reason": "Detection Type '%s' not implemented"}`, detectionType))) + } + + resp.WriteHeader(200) + resp.Write([]byte(`{"success": true}`)) +} + +func SetDetectionOrborusRequest(ctx context.Context, orgId, execType, fileName, executionSource, environmentName string) error { + if len(orgId) == 0 { + return fmt.Errorf("No org ID provided") + } + + environments, err := GetEnvironments(ctx, orgId) + if err != nil { + log.Printf("[ERROR] Failed to get environments: %s", err) + return err + } + + selectedEnvironments := []Environment{} + for _, env := range environments { + if env.Archived { + continue + } + + if env.Name == "cloud" || env.Name == "Cloud" { + continue + } + + if env.Name != environmentName && environmentName != "SHUFFLE_DISCOVER" { + continue + } + + selectedEnvironments = append(selectedEnvironments, env) + } + + log.Printf("[DEBUG] Found %d potentially valid environment(s)", len(selectedEnvironments)) + + /* + if len(selectedEnvironments) == 0 || environmentName == "SHUFFLE_DISCOVER" { + // FIXME: Get based on the Organisation. This is only tested onprem so far, so there's a lot to do to make this stable ROFL + log.Printf("[DEBUG] Automatically discovering the right environment from '%s'", environmentName) + } + */ + + if len(selectedEnvironments) == 0 { + log.Printf("[ERROR] No valid environments found") + return fmt.Errorf("No valid environments found") + } + + deployedToActiveEnv := false + for _, env := range selectedEnvironments { + execRequest := ExecutionRequest{ + Type: execType, + ExecutionId: uuid.NewV4().String(), + ExecutionSource: executionSource, + ExecutionArgument: fileName, + Priority: 11, + } + + parsedEnv := fmt.Sprintf("%s_%s", strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(env.Name, " ", "-"), "_", "-")), orgId) + if project.Environment != "cloud" { + parsedEnv = strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(env.Name, " ", "-"), "_", "-")) + } + + err = SetWorkflowQueue(ctx, execRequest, parsedEnv) + if err != nil { + log.Printf("[ERROR] Failed to set workflow queue: %s", err) + return err + } else { + if env.RunningIp != "" { + deployedToActiveEnv = true + } + } + } + + if !deployedToActiveEnv { + return errors.New("This environment must be started first. Please start the environment by running it onprem") + } + + go DeleteCache(ctx, fmt.Sprintf("environments_%s", orgId)) + + return nil +} diff --git a/files.go b/files.go index 9f7dd44..f4dfad0 100755 --- a/files.go +++ b/files.go @@ -438,7 +438,7 @@ func LoadStandardFromGithub(client *github.Client, owner, repo, path, filename s ctx := context.Background() files := []*github.RepositoryContent{} - cacheKey := fmt.Sprintf("github_%s_%s_%s", owner, repo, path) + cacheKey := fmt.Sprintf("github_%s_%s_%s_%s", owner, repo, path, filename) if project.CacheDb { cache, err := GetCache(ctx, cacheKey) if err == nil { @@ -458,20 +458,22 @@ func LoadStandardFromGithub(client *github.Client, owner, repo, path, filename s } } + log.Printf("\n\n[DEBUG] Got %d file(s): %s\n\n", len(files), path) + if len(files) == 0 { log.Printf("[ERROR] No files found in namespace '%s' on Github - Used for integration framework", path) return []*github.RepositoryContent{}, nil } - if len(filename) == 0 { - return []*github.RepositoryContent{}, nil - } - - matchingFiles := []*github.RepositoryContent{} - for _, item := range files { - if len(filename) > 0 && strings.HasPrefix(*item.Name, filename) { - matchingFiles = append(matchingFiles, item) + if len(filename) > 0 { + matchingFiles := []*github.RepositoryContent{} + for _, item := range files { + if len(filename) > 0 && strings.HasPrefix(*item.Name, filename) { + matchingFiles = append(matchingFiles, item) + } } + + files = matchingFiles } if project.CacheDb { @@ -487,24 +489,6 @@ func LoadStandardFromGithub(client *github.Client, owner, repo, path, filename s } } - return matchingFiles, nil -} - -func LoadStandardFromGithub2(client *github.Client, owner, repo, path string) ([]*github.RepositoryContent, error) { - ctx := context.Background() - - // Fetch the contents of the specified path from the repository - _, files, _, err := client.Repositories.GetContents(ctx, owner, repo, path, nil) - if err != nil { - log.Printf("[WARNING] Failed getting files from GitHub: %s", err) - return nil, err - } - - if len(files) == 0 { - log.Printf("[ERROR] No files found in path '%s' in the repository '%s/%s'", path, owner, repo) - return nil, nil - } - return files, nil } @@ -762,7 +746,7 @@ func HandleGetFileNamespace(resp http.ResponseWriter, request *http.Request) { var filedata = []byte{} if file.Encrypted { if project.Environment == "cloud" || file.StorageArea == "google_storage" { - log.Printf("[ERROR] No namespace handler for cloud decryption!") + log.Printf("[ERROR] No namespace handler for cloud decryption (files)!") } else { Openfile, err := os.Open(file.DownloadPath) defer Openfile.Close() //Close after function return @@ -848,23 +832,6 @@ func HandleGetFileNamespace(resp http.ResponseWriter, request *http.Request) { io.Copy(resp, buf) } -func SetExecRequest(ctx context.Context, execType string, fileName string) error { - - execRequest := ExecutionRequest{ - Type: execType, - ExecutionId: uuid.NewV4().String(), - ExecutionSource: "SIGMA", - ExecutionArgument: fileName, - Priority: 11, - } - - err := SetWorkflowQueue(ctx, execRequest, "shuffle") - if err != nil { - return err - } - return nil -} - func HandleGetFileContent(resp http.ResponseWriter, request *http.Request) { cors := HandleCors(resp, request) if cors { @@ -1292,13 +1259,15 @@ func HandleEditFile(resp http.ResponseWriter, request *http.Request) { log.Printf("[INFO] Successfully edited file ID %s", file.Id) - execType := "CATEGORY_UPDATE" - err = SetExecRequest(ctx, execType, file.Filename) - if err != nil { - log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) - resp.WriteHeader(500) - resp.Write([]byte(`{"success": false}`)) - return + if file.Namespace == "sigma" { + execType := "CATEGORY_UPDATE" + err = SetDetectionOrborusRequest(ctx, user.ActiveOrg.Id, execType, file.Filename, "SIGMA", "SHUFFLE_DISCOVER") + if err != nil { + log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) + resp.WriteHeader(500) + resp.Write([]byte(`{"success": false}`)) + return + } } resp.WriteHeader(200) @@ -1459,14 +1428,15 @@ func HandleUploadFile(resp http.ResponseWriter, request *http.Request) { log.Printf("[INFO] Successfully uploaded file ID %s", file.Id) if file.Namespace == "sigma" { - execType := "CATEGORY_UPDATE" - err = SetExecRequest(ctx, execType, file.Filename) - if err != nil { - log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) - resp.WriteHeader(500) - resp.Write([]byte(`{"success": false}`)) - return - }} + execType := "CATEGORY_UPDATE" + err = SetDetectionOrborusRequest(ctx, user.ActiveOrg.Id, execType, file.Filename, "SIGMA", "SHUFFLE_DISCOVER") + if err != nil { + log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) + resp.WriteHeader(500) + resp.Write([]byte(`{"success": false}`)) + return + } + } resp.WriteHeader(200) resp.Write([]byte(fmt.Sprintf(`{"success": true, "file_id": "%s"}`, fileId))) @@ -1870,13 +1840,12 @@ func HandleDownloadRemoteFiles(resp http.ResponseWriter, request *http.Request) Field2 string `json:"field_2"` // Password Field3 string `json:"field_3"` // Branch Path string `json:"path"` - } var input tmpStruct err = json.Unmarshal(body, &input) if err != nil { - log.Printf("Error with unmarshal tmpBody: %s", err) + log.Printf("[DEBUG] Error with unmarshal tmpBody: %s", err) resp.WriteHeader(401) resp.Write([]byte(`{"success": false}`)) return @@ -1905,8 +1874,7 @@ func HandleDownloadRemoteFiles(resp http.ResponseWriter, request *http.Request) } } - log.Printf("[DEBUG] Loading standard from github: %s/%s/%s", owner, repo, path) - + log.Printf("[DEBUG] Loading standard with git: %s/%s/%s", owner, repo, path) files, err := LoadStandardFromGithub(client, owner, repo, path, "") if err != nil { log.Printf("[DEBUG] Failed to load standard from github: %s", err) @@ -1915,6 +1883,8 @@ func HandleDownloadRemoteFiles(resp http.ResponseWriter, request *http.Request) return } + log.Printf("[DEBUG] Found %d files in %s/%s/%s", len(files), owner, repo, path) + if len(files) > 50 { files = files[:50] } diff --git a/pipelines.go b/pipelines.go index 45610b2..5cca0e6 100644 --- a/pipelines.go +++ b/pipelines.go @@ -138,15 +138,19 @@ func HandleNewPipelineRegister(resp http.ResponseWriter, request *http.Request) } //parsedId := fmt.Sprintf("%s_%s", strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(pipeline.Environment, " ", "-"), "_", "-")), user.ActiveOrg.Id) - parsedId := strings.ToLower(pipeline.Environment) + parsedEnv := fmt.Sprintf("%s_%s", strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(pipeline.Environment, " ", "-"), "_", "-")), user.ActiveOrg.Id) + if project.Environment != "cloud" { + parsedEnv = strings.ToLower(strings.ReplaceAll(strings.ReplaceAll(pipeline.Environment, " ", "-"), "_", "-")) + } + formattedType := fmt.Sprintf("PIPELINE_%s", startCommand) - existingQueue, _ := GetWorkflowQueue(ctx, parsedId, 10) + existingQueue, _ := GetWorkflowQueue(ctx, parsedEnv, 10) for _, queue := range existingQueue.Data { if strings.HasPrefix(queue.Type, "PIPELINE") { - log.Printf("[WARNING] Pipeline type already exists: %s", formattedType) - resp.WriteHeader(400) - resp.Write([]byte(`{"success": false, "reason": "Pipeline type already exists. Please wait for existing Pipeline request to be fullfilled by Orborus (could take a few seconds)."}`)) - return + //log.Printf("[WARNING] Pipeline type already exists: %s", formattedType) + //resp.WriteHeader(400) + //resp.Write([]byte(`{"success": false, "reason": "Pipeline type already exists. Please wait for existing Pipeline request to be fullfilled by Orborus (could take a few seconds)."}`)) + //return } } @@ -207,7 +211,7 @@ func HandleNewPipelineRegister(resp http.ResponseWriter, request *http.Request) log.Printf("[INFO] Set up pipeline with trigger ID %s and environment %s", pipeline.TriggerId, pipeline.Environment) } - err = SetWorkflowQueue(ctx, execRequest, parsedId) + err = SetWorkflowQueue(ctx, execRequest, parsedEnv) if err != nil { log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) resp.WriteHeader(500) @@ -236,4 +240,4 @@ func deletePipeline(ctx context.Context, pipeline Pipeline) error { log.Printf("[INFO] Successfully deleted pipeline %s", pipeline.TriggerId) return nil -} \ No newline at end of file +} diff --git a/shared.go b/shared.go index 4d66a54..d8346bb 100755 --- a/shared.go +++ b/shared.go @@ -4963,41 +4963,6 @@ func HandleGetHooks(resp http.ResponseWriter, request *http.Request) { resp.Write(newjson) } -func HandleConnectSiem(resp http.ResponseWriter, request *http.Request) { - cors := HandleCors(resp, request) - if cors { - return - } - - user, err := HandleApiAuthentication(resp, request) - if err != nil { - log.Printf("[WARNING] Api authentication failed in conenct siem: %s", err) - resp.WriteHeader(401) - resp.Write([]byte(`{"success": false}`)) - return - } - - if user.Role == "org-reader" { - resp.WriteHeader(401) - resp.Write([]byte(`{"success": false, "reason": "org reader dont have permission"}`)) - return - } - - ctx := GetContext(request) - execType := "START_TENZIR" - err = SetExecRequest(ctx, execType, "") - if err != nil { - log.Printf("[ERROR] Failed setting workflow queue for env: %s", err) - resp.WriteHeader(500) - resp.Write([]byte(`{"success": false}`)) - return - } - - resp.WriteHeader(200) - resp.Write([]byte(`{"success": true}`)) - -} - func HandleUpdateUser(resp http.ResponseWriter, request *http.Request) { cors := HandleCors(resp, request) if cors { @@ -12911,8 +12876,7 @@ func HandleLogin(resp http.ResponseWriter, request *http.Request) { return } - log.Printf("[AUDIT] Handling SSO login of %s", data.Username) - + log.Printf("[AUDIT] Handling login of username %s", data.Username) data.Username = strings.ToLower(strings.TrimSpace(data.Username)) err = checkUsername(data.Username) if err != nil { diff --git a/structs.go b/structs.go index 4ab15fd..52d4762 100755 --- a/structs.go +++ b/structs.go @@ -836,7 +836,7 @@ type Priority struct { URL string `json:"url" datastore:"url"` Severity int `json:"severity" datastore:"severity"` // 1 = high, 2 = mid, 3 = low - Time int64 `json:"time" datastore:"time"` + Time int64 `json:"time" datastore:"time"` } type LeadInfo struct { @@ -2465,13 +2465,13 @@ type DisabledHookWrapper struct { } type SelectedRulesWrapper struct { - Index string `json:"_index"` - Type string `json:"_type"` - ID string `json:"_id"` - Version int `json:"_version"` - SeqNo int `json:"_seq_no"` - PrimaryTerm int `json:"_primary_term"` - Found bool `json:"found"` + Index string `json:"_index"` + Type string `json:"_type"` + ID string `json:"_id"` + Version int `json:"_version"` + SeqNo int `json:"_seq_no"` + PrimaryTerm int `json:"_primary_term"` + Found bool `json:"found"` Source SelectedDetectionRules `json:"_source"` } @@ -3695,7 +3695,7 @@ type CategoryAction struct { // Optional~ AppVersion string `json:"app_version"` - AppId string `json:"app_id"` + AppId string `json:"app_id"` ActionName string `json:"action_name"` Category string `json:"category"` OptionalFields []Valuereplace `json:"optional_fields"` @@ -4066,3 +4066,12 @@ type AppParser struct { OpenAPI []byte `json:"openapi"` App []byte `json:"app"` } + +type DetectionResponse struct { + DetectionName string `json:"detection_name"` + Category string `json:"category"` + OrgId string `json:"org_id"` + DetectionInfo []DetectionFileInfo `json:"detection_info"` + FolderDisabled bool `json:"folder_disabled"` + IsConnectorActive bool `json:"is_connector_active"` +}