From 0a5f2d450ae98d74e8f9f18d5eeb23d10d1433af Mon Sep 17 00:00:00 2001 From: Brendan Playford <34052452+teslashibe@users.noreply.github.com> Date: Fri, 12 Jul 2024 07:51:50 +0100 Subject: [PATCH 1/8] fix: nil pointer dereference in updateRecords function (#409) * Update workers.go to handle nil pointers * handle errors from db and update breaking changes --- pkg/api/handlers_node.go | 132 +++++++++++++++++++++------------------ pkg/db/operations.go | 10 +-- pkg/workers/workers.go | 74 ++++++++++++---------- 3 files changed, 119 insertions(+), 97 deletions(-) diff --git a/pkg/api/handlers_node.go b/pkg/api/handlers_node.go index 19d6c0b7..8738c468 100644 --- a/pkg/api/handlers_node.go +++ b/pkg/api/handlers_node.go @@ -264,26 +264,38 @@ func (api *API) GetFromDHT() gin.HandlerFunc { }) return } - sharedData := db.SharedData{} - nv := db.ReadData(api.Node, keyStr) - err := json.Unmarshal(nv, &sharedData) + + nv, err := db.ReadData(api.Node, keyStr) if err != nil { - if IsBase64(string(nv)) { - decodedString, _ := base64.StdEncoding.DecodeString(string(nv)) - _ = json.Unmarshal(decodedString, &sharedData) - c.JSON(http.StatusOK, gin.H{ - "success": true, - "message": sharedData, - }) - return - } else { - c.JSON(http.StatusOK, gin.H{ - "success": true, - "message": string(nv), - }) - return + logrus.WithFields(logrus.Fields{ + "key": keyStr, + "error": err, + }).Error("Failed to read data from DHT") + c.JSON(http.StatusInternalServerError, gin.H{ + "success": false, + "message": "failed to read data", + }) + return + } + + var sharedData db.SharedData + if err := json.Unmarshal(nv, &sharedData); err != nil { + if decodedString, decodeErr := base64.StdEncoding.DecodeString(string(nv)); decodeErr == nil { + if json.Unmarshal(decodedString, &sharedData) == nil { + c.JSON(http.StatusOK, gin.H{ + "success": true, + "message": sharedData, + }) + return + } } + c.JSON(http.StatusOK, gin.H{ + "success": true, + "message": string(nv), + }) + return } + c.JSON(http.StatusOK, gin.H{ "success": true, "message": sharedData, @@ -376,50 +388,50 @@ func (api *API) ChatPageHandler() gin.HandlerFunc { // an HTML page displaying the node's status and uptime info. func (api *API) NodeStatusPageHandler() gin.HandlerFunc { return func(c *gin.Context) { - peers := api.Node.Host.Network().Peers() - nodeData := api.Node.NodeTracker.GetNodeData(api.Node.Host.ID().String()) - if nodeData == nil { - c.HTML(http.StatusOK, "index.html", gin.H{ - "TotalPeers": 0, - "Name": "Masa Status Page", - "PeerID": api.Node.Host.ID().String(), - "IsStaked": false, - "IsTwitterScraper": false, - "IsDiscordScraper": false, - "IsWebScraper": false, - "FirstJoined": api.Node.FromUnixTime(time.Now().Unix()), - "LastJoined": api.Node.FromUnixTime(time.Now().Unix()), - "CurrentUptime": "0", - "Rewards": "Coming Soon!", - "BytesScraped": 0, - }) - return - } else { - nd := *nodeData - nd.CurrentUptime = nodeData.GetCurrentUptime() - nd.AccumulatedUptime = nodeData.GetAccumulatedUptime() - nd.CurrentUptimeStr = pubsub.PrettyDuration(nd.CurrentUptime) - nd.AccumulatedUptimeStr = pubsub.PrettyDuration(nd.AccumulatedUptime) - - sharedData := db.SharedData{} - nv := db.ReadData(api.Node, api.Node.Host.ID().String()) - _ = json.Unmarshal(nv, &sharedData) - bytesScraped, _ := strconv.Atoi(fmt.Sprintf("%v", sharedData["bytesScraped"])) - c.HTML(http.StatusOK, "index.html", gin.H{ - "TotalPeers": len(peers), - "Name": "Masa Status Page", - "PeerID": api.Node.Host.ID().String(), - "IsStaked": nd.IsStaked, - "IsTwitterScraper": nd.IsTwitterScraper, - "IsDiscordScraper": nd.IsDiscordScraper, - "IsWebScraper": nd.IsWebScraper, - "FirstJoined": api.Node.FromUnixTime(nd.FirstJoinedUnix), - "LastJoined": api.Node.FromUnixTime(nd.LastJoinedUnix), - "CurrentUptime": nd.CurrentUptimeStr, - "TotalUptime": nd.AccumulatedUptimeStr, - "BytesScraped": fmt.Sprintf("%.4f MB", float64(bytesScraped)/(1024*1024)), - }) + // Initialize default values for the template data + templateData := gin.H{ + "TotalPeers": 0, + "Name": "Masa Status Page", + "PeerID": api.Node.Host.ID().String(), + "IsStaked": false, + "IsTwitterScraper": false, + "IsDiscordScraper": false, + "IsWebScraper": false, + "FirstJoined": api.Node.FromUnixTime(time.Now().Unix()), + "LastJoined": api.Node.FromUnixTime(time.Now().Unix()), + "CurrentUptime": "0", + "TotalUptime": "0", + "Rewards": "Coming Soon!", + "BytesScraped": "0 MB", } + + if api.Node != nil && api.Node.Host != nil { + peers := api.Node.Host.Network().Peers() + templateData["TotalPeers"] = len(peers) + + if nodeData := api.Node.NodeTracker.GetNodeData(api.Node.Host.ID().String()); nodeData != nil { + nd := *nodeData + templateData["IsStaked"] = nd.IsStaked + templateData["IsTwitterScraper"] = nd.IsTwitterScraper + templateData["IsDiscordScraper"] = nd.IsDiscordScraper + templateData["IsWebScraper"] = nd.IsWebScraper + templateData["FirstJoined"] = api.Node.FromUnixTime(nd.FirstJoinedUnix) + templateData["LastJoined"] = api.Node.FromUnixTime(nd.LastJoinedUnix) + templateData["CurrentUptime"] = pubsub.PrettyDuration(nd.GetCurrentUptime()) + templateData["TotalUptime"] = pubsub.PrettyDuration(nd.GetAccumulatedUptime()) + + if nv, err := db.ReadData(api.Node, api.Node.Host.ID().String()); err == nil { + var sharedData db.SharedData + if json.Unmarshal(nv, &sharedData) == nil { + if bytesScraped, err := strconv.Atoi(fmt.Sprintf("%v", sharedData["bytesScraped"])); err == nil { + templateData["BytesScraped"] = fmt.Sprintf("%.4f MB", float64(bytesScraped)/(1024*1024)) + } + } + } + } + } + + c.HTML(http.StatusOK, "index.html", templateData) } } diff --git a/pkg/db/operations.go b/pkg/db/operations.go index b354a187..c0fd92c3 100644 --- a/pkg/db/operations.go +++ b/pkg/db/operations.go @@ -63,12 +63,12 @@ func WriteData(node *masa.OracleNode, key string, value []byte) error { // ReadData reads the value for the given key from the database. // It requires the host for access control verification before reading. -func ReadData(node *masa.OracleNode, key string) []byte { +func ReadData(node *masa.OracleNode, key string) ([]byte, error) { logrus.WithFields(logrus.Fields{ "nodeID": node.Host.ID().String(), "isAuthorized": true, "ReadData": true, - }) + }).Info("Attempting to read data") ctx, cancel := context.WithTimeout(context.Background(), time.Second*60) defer cancel() @@ -91,9 +91,9 @@ func ReadData(node *masa.OracleNode, key string) []byte { if err != nil { logrus.WithFields(logrus.Fields{ "error": err, - }).Debug("Failed to read from the database") - return val + }).Error("Failed to read from the database") + return nil, err } - return val + return val, nil } diff --git a/pkg/workers/workers.go b/pkg/workers/workers.go index 7a18a631..1cb48c9f 100644 --- a/pkg/workers/workers.go +++ b/pkg/workers/workers.go @@ -183,65 +183,75 @@ func isBootnode(ipAddr string) bool { // - node: A pointer to the OracleNode instance whose records need to be updated. // - data: The data to be written for the specified key. // - key: The key under which the data should be stored. +// +// The function checks if the data already exists in the database. If it does not, it writes the new data. +// It then retrieves the node data from the cache or the node tracker. If the node data is not found, it logs an error. +// The function updates the node data with the new CID and bytes scraped, and writes the updated node data back to the database. func updateRecords(node *masa.OracleNode, workEvent db.WorkEvent) { + if node == nil { + logrus.Error("Node is nil") + return + } - exists := db.ReadData(node, workEvent.CID) + exists, err := db.ReadData(node, workEvent.CID) + if err != nil { + logrus.Errorf("Failed to read data for CID %s: %v", workEvent.CID, err) + return + } if exists == nil { - _ = db.WriteData(node, workEvent.CID, workEvent.Payload) + err := db.WriteData(node, workEvent.CID, workEvent.Payload) + if err != nil { + logrus.Errorf("Failed to write data for CID %s: %v", workEvent.CID, err) + return + } } var nodeData pubsub.NodeData nodeDataBytes, err := db.GetCache(context.Background(), workEvent.PeerId) - if err != nil { - nodeData = *node.NodeTracker.GetNodeData(workEvent.PeerId) + if err != nil || nodeDataBytes == nil { + nodeDataPtr := node.NodeTracker.GetNodeData(workEvent.PeerId) + if nodeDataPtr == nil { + logrus.Errorf("Node data not found for peer ID: %s", workEvent.PeerId) + return + } + nodeData = *nodeDataPtr } else { err = json.Unmarshal(nodeDataBytes, &nodeData) if err != nil { - logrus.Error(err) + logrus.Errorf("Failed to unmarshal node data bytes: %v", err) return } } - nodeData.BytesScraped = nodeData.BytesScraped + len(workEvent.Payload) - - newCID := CID{ - RecordId: workEvent.CID, - Duration: workEvent.Duration, - Timestamp: time.Now().Unix(), + if nodeData.Records == nil { + nodeData.Records = []CID{} } - records := nodeData.Records - - if records == nil { - recordsSlice, ok := records.([]CID) - if !ok { - recordsSlice = []CID{} + if exists == nil { + nodeData.BytesScraped += len(workEvent.Payload) + newCID := CID{ + RecordId: workEvent.CID, + Duration: workEvent.Duration, + Timestamp: time.Now().Unix(), } - recordsSlice = append(recordsSlice, newCID) - nodeData.Records = recordsSlice + nodeData.Records = append(nodeData.Records.([]CID), newCID) err = node.NodeTracker.AddOrUpdateNodeData(&nodeData, true) if err != nil { - logrus.Error(err) + logrus.Errorf("Failed to update node data: %v", err) return } - } else { - if exists == nil { - records = append(nodeData.Records.([]interface{}), newCID) - nodeData.Records = records - err = node.NodeTracker.AddOrUpdateNodeData(&nodeData, true) - if err != nil { - logrus.Error(err) - return - } - } } jsonData, err := json.Marshal(nodeData) if err != nil { - logrus.Error(err) + logrus.Errorf("Failed to marshal node data: %v", err) + return + } + err = db.WriteData(node, workEvent.PeerId, jsonData) + if err != nil { + logrus.Errorf("Failed to write node data for peer ID %s: %v", workEvent.PeerId, err) return } - _ = db.WriteData(node, workEvent.PeerId, jsonData) logrus.Infof("[+] Updated records key %s for node %s", workEvent.CID, workEvent.PeerId) } From 9c77e377f89dcd8b4bdd85b717a0463228b5a2f3 Mon Sep 17 00:00:00 2001 From: Brendan Playford <34052452+teslashibe@users.noreply.github.com> Date: Fri, 12 Jul 2024 07:55:35 +0100 Subject: [PATCH 2/8] fix: error handling in `MonitorWorkers` (#411) * Update workers.go to handle nil pointers * handle errors from db and update breaking changes * Update monitorworkers and refactor code --- pkg/workers/workers.go | 104 +++++++++++++++++++++++------------------ 1 file changed, 58 insertions(+), 46 deletions(-) diff --git a/pkg/workers/workers.go b/pkg/workers/workers.go index 1cb48c9f..5974ea74 100644 --- a/pkg/workers/workers.go +++ b/pkg/workers/workers.go @@ -377,6 +377,18 @@ func SubscribeToWorkers(node *masa.OracleNode) { // marshals the data to JSON, and writes it to the database using the WriteData function. // The monitoring continues until the context is done. func MonitorWorkers(ctx context.Context, node *masa.OracleNode) { + if node == nil { + logrus.Error("MonitorWorkers: node is nil") + return + } + if node.ActorRemote == nil { + logrus.Error("MonitorWorkers: node.ActorRemote is nil") + return + } + if node.WorkerTracker == nil || node.WorkerTracker.WorkerStatusCh == nil { + logrus.Error("MonitorWorkers: WorkerTracker or WorkerStatusCh is nil") + return + } // Register self as a remote node for the network node.ActorRemote.Register("peer", actor.PropsFromProducer(NewWorker(node))) @@ -389,7 +401,11 @@ func MonitorWorkers(ctx context.Context, node *masa.OracleNode) { select { case <-ticker.C: logrus.Debug("tick") - case work := <-node.WorkerTracker.WorkerStatusCh: + case work, ok := <-node.WorkerTracker.WorkerStatusCh: + if !ok { + logrus.Error("WorkerStatusCh channel was closed") + return + } logrus.Info("[+] Sending work to network") var workData map[string]string err := json.Unmarshal(work.Data, &workData) @@ -399,7 +415,11 @@ func MonitorWorkers(ctx context.Context, node *masa.OracleNode) { } startTime = time.Now() go SendWork(node, work) - case data := <-workerDoneCh: + case data, ok := <-workerDoneCh: + if !ok { + logrus.Error("workerDoneCh channel was closed") + return + } validatorDataMap, ok := data.ValidatorData.(map[string]interface{}) if !ok { logrus.Errorf("Error asserting type: %v", ok) @@ -418,52 +438,44 @@ func MonitorWorkers(ctx context.Context, node *masa.OracleNode) { logrus.Debugf("Error processing data.ValidatorData: %v", data.ValidatorData) } - if validatorDataMap, ok := data.ValidatorData.(map[string]interface{}); ok { - if response, ok := validatorDataMap["Response"].(map[string]interface{}); ok { - if _, ok := response["error"].(string); ok { - logrus.Infof("[+] Work failed %s", response["error"]) - } else if work, ok := response["data"].(string); ok { - key, _ := computeCid(work) - logrus.Infof("[+] Work done %s", key) - - endTime := time.Now() - duration := endTime.Sub(startTime) - - workEvent := db.WorkEvent{ - CID: key, - PeerId: data.ID, - Payload: []byte(work), - Duration: duration.Seconds(), - Timestamp: time.Now().Unix(), - } - - updateRecords(node, workEvent) - } else if w, ok := response["data"].(map[string]interface{}); ok { - work, err := json.Marshal(w) - if err != nil { - logrus.Errorf("Error marshalling data.ValidatorData: %v", err) - continue - } - key, _ := computeCid(string(work)) - logrus.Infof("[+] Work done %s", key) - - endTime := time.Now() - duration := endTime.Sub(startTime) - - workEvent := db.WorkEvent{ - CID: key, - PeerId: data.ID, - Payload: work, - Duration: duration.Seconds(), - Timestamp: time.Now().Unix(), - } - - updateRecords(node, workEvent) - } - } - } + processValidatorData(data, validatorDataMap, &startTime, node) case <-ctx.Done(): return } } } + +func processValidatorData(data *pubsub2.Message, validatorDataMap map[string]interface{}, startTime *time.Time, node *masa.OracleNode) { + if response, ok := validatorDataMap["Response"].(map[string]interface{}); ok { + if _, ok := response["error"].(string); ok { + logrus.Infof("[+] Work failed %s", response["error"]) + } else if work, ok := response["data"].(string); ok { + processWork(data, work, startTime, node) + } else if w, ok := response["data"].(map[string]interface{}); ok { + work, err := json.Marshal(w) + if err != nil { + logrus.Errorf("Error marshalling data.ValidatorData: %v", err) + return + } + processWork(data, string(work), startTime, node) + } + } +} + +func processWork(data *pubsub2.Message, work string, startTime *time.Time, node *masa.OracleNode) { + key, _ := computeCid(work) + logrus.Infof("[+] Work done %s", key) + + endTime := time.Now() + duration := endTime.Sub(*startTime) + + workEvent := db.WorkEvent{ + CID: key, + PeerId: data.ID, + Payload: []byte(work), + Duration: duration.Seconds(), + Timestamp: time.Now().Unix(), + } + + updateRecords(node, workEvent) +} From 56805231d65781b712cd97c726e172dff124044d Mon Sep 17 00:00:00 2001 From: Brendan Playford <34052452+teslashibe@users.noreply.github.com> Date: Fri, 12 Jul 2024 08:34:41 +0100 Subject: [PATCH 3/8] fix: better error handling in `SendWork` (#412) * Update workers.go to handle nil pointers * handle errors from db and update breaking changes * Update monitorworkers and refactor code * Fix sendWork nill pointer --- pkg/workers/workers.go | 39 ++++++++++++++++++++++----------------- 1 file changed, 22 insertions(+), 17 deletions(-) diff --git a/pkg/workers/workers.go b/pkg/workers/workers.go index 5974ea74..df7deacf 100644 --- a/pkg/workers/workers.go +++ b/pkg/workers/workers.go @@ -319,29 +319,34 @@ func SendWork(node *masa.OracleNode, m *pubsub2.Message) { for _, addr := range p.Multiaddrs { ipAddr, _ := addr.ValueForProtocol(multiaddr.P_IP4) logrus.Infof("[+] Worker Address: %s", ipAddr) - if !isBootnode(ipAddr) && p.IsTwitterScraper || p.IsWebScraper || p.IsDiscordScraper { + if !isBootnode(ipAddr) && (p.IsTwitterScraper || p.IsWebScraper || p.IsDiscordScraper) { wg.Add(1) - go func() { + go func(p pubsub.NodeData) { defer wg.Done() spawned, err := node.ActorRemote.SpawnNamed(fmt.Sprintf("%s:4001", ipAddr), "worker", "peer", -1) if err != nil { logrus.Debugf("Spawned error %v", err) - } else { - spawnedPID := spawned.Pid - client := node.ActorEngine.Spawn(props) - node.ActorEngine.Send(spawnedPID, &messages.Connect{ - Sender: client, - }) - future := node.ActorEngine.RequestFuture(spawnedPID, message, 30*time.Second) - result, err := future.Result() - if err != nil { - logrus.Debugf("Error receiving response: %v", err) - return - } - response := result.(*messages.Response) - node.ActorEngine.Send(spawnedPID, response) + return } - }() + spawnedPID := spawned.Pid + // Check if spawnedPID is nil + if spawnedPID == nil { + logrus.Errorf("spawnedPID is nil for IP: %s", ipAddr) + return + } + client := node.ActorEngine.Spawn(props) + node.ActorEngine.Send(spawnedPID, &messages.Connect{ + Sender: client, + }) + future := node.ActorEngine.RequestFuture(spawnedPID, message, 30*time.Second) + result, err := future.Result() + if err != nil { + logrus.Debugf("Error receiving response: %v", err) + return + } + response := result.(*messages.Response) + node.ActorEngine.Send(spawnedPID, response) + }(p) } } } From 9ded4d00d3daf9e79ba776ef6c085d0375b1408a Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 12 Jul 2024 09:45:59 +0200 Subject: [PATCH 4/8] Update and rename commitlint.yaml to prlint.yaml --- .github/workflows/commitlint.yaml | 13 ------------- .github/workflows/prlint.yaml | 19 +++++++++++++++++++ 2 files changed, 19 insertions(+), 13 deletions(-) delete mode 100644 .github/workflows/commitlint.yaml create mode 100644 .github/workflows/prlint.yaml diff --git a/.github/workflows/commitlint.yaml b/.github/workflows/commitlint.yaml deleted file mode 100644 index ba2e8a68..00000000 --- a/.github/workflows/commitlint.yaml +++ /dev/null @@ -1,13 +0,0 @@ -name: Commitlint - -on: - push: - pull_request: - -jobs: - commitlint: - runs-on: ubuntu-latest - name: Commitlint - steps: - - name: Run commitlint - uses: opensource-nepal/commitlint@v1 diff --git a/.github/workflows/prlint.yaml b/.github/workflows/prlint.yaml new file mode 100644 index 00000000..bedaa8e4 --- /dev/null +++ b/.github/workflows/prlint.yaml @@ -0,0 +1,19 @@ +name: Check PR title + +on: + pull_request_target: + types: + - opened + - reopened + - edited + - synchronize + +jobs: + lint: + runs-on: ubuntu-latest + permissions: + statuses: write + steps: + - uses: aslafy-z/conventional-pr-title-action@v3 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From b4fad00ecfb54800f3e0cb9339c999fd612a87fa Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 12 Jul 2024 10:00:55 +0200 Subject: [PATCH 5/8] ci: check PR description in prlint --- .github/workflows/prlint.yaml | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/.github/workflows/prlint.yaml b/.github/workflows/prlint.yaml index bedaa8e4..471985f6 100644 --- a/.github/workflows/prlint.yaml +++ b/.github/workflows/prlint.yaml @@ -1,4 +1,4 @@ -name: Check PR title +name: Check PR style on: pull_request_target: @@ -9,7 +9,7 @@ on: - synchronize jobs: - lint: + title-lint: runs-on: ubuntu-latest permissions: statuses: write @@ -17,3 +17,12 @@ jobs: - uses: aslafy-z/conventional-pr-title-action@v3 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + check-pr-description: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: jadrol/pr-description-checker-action@v1.0.0 + id: description-checker + with: + repo-token: ${{ secrets.GITHUB_TOKEN }} + exempt-labels: no qa From 2639b8c54b1a76a990686e423fe2eab0c048da8d Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 12 Jul 2024 10:13:57 +0200 Subject: [PATCH 6/8] ci: add secscan (#415) --- .github/workflows/secscan.yml | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 .github/workflows/secscan.yml diff --git a/.github/workflows/secscan.yml b/.github/workflows/secscan.yml new file mode 100644 index 00000000..d9743d9e --- /dev/null +++ b/.github/workflows/secscan.yml @@ -0,0 +1,30 @@ +name: "Security Scan" + +# Run workflow each time code is pushed to your repository and on a schedule. +# The scheduled workflow runs every at 00:00 on Sunday UTC time. +on: + push: + schedule: + - cron: '0 0 * * 0' + +jobs: + tests: + runs-on: ubuntu-latest + env: + GO111MODULE: on + steps: + - name: Checkout Source + uses: actions/checkout@v4 + if: ${{ github.actor != 'dependabot[bot]' }} + - name: Run Gosec Security Scanner + if: ${{ github.actor != 'dependabot[bot]' }} + uses: securego/gosec@master + with: + # we let the report trigger content trigger a failure using the GitHub Security features. + args: '-no-fail -fmt sarif -out results.sarif ./...' + - name: Upload SARIF file + if: ${{ github.actor != 'dependabot[bot]' }} + uses: github/codeql-action/upload-sarif@v3 + with: + # Path to SARIF file relative to the root of the repository + sarif_file: results.sarif From 6510f9066cf0f39ad1b236075a8c9a8fa38310e3 Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 12 Jul 2024 10:20:40 +0200 Subject: [PATCH 7/8] ci: run secscan on PRs --- .github/workflows/secscan.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/secscan.yml b/.github/workflows/secscan.yml index d9743d9e..a3415d2c 100644 --- a/.github/workflows/secscan.yml +++ b/.github/workflows/secscan.yml @@ -4,6 +4,7 @@ name: "Security Scan" # The scheduled workflow runs every at 00:00 on Sunday UTC time. on: push: + pull_request: schedule: - cron: '0 0 * * 0' From 4744de318e7c76f28307e75e99e5f902c9c993dc Mon Sep 17 00:00:00 2001 From: Ettore Di Giacinto Date: Fri, 12 Jul 2024 10:36:48 +0200 Subject: [PATCH 8/8] ci: add PR template (#418) This adds a simple template for PRs. --- .github/PULL_REQUEST_TEMPLATE.md | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 .github/PULL_REQUEST_TEMPLATE.md diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..eb4cf1ba --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,29 @@ +**Description** + +This PR fixes # + +**Notes for Reviewers** + + +**[Signed commits](../CONTRIBUTING.md#signing-off-on-commits-developer-certificate-of-origin)** +- [ ] Yes, I signed my commits. + +