Skip to content

Commit

Permalink
Merge branch 'main' into katie.hockman/128-bit-bug
Browse files Browse the repository at this point in the history
  • Loading branch information
ahmed-mez authored Aug 17, 2023
2 parents 4643c3d + 472db90 commit 5d659d3
Show file tree
Hide file tree
Showing 7 changed files with 119 additions and 15 deletions.
17 changes: 17 additions & 0 deletions .github/workflows/stale.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
name: 'Close stale PRs'
# Docs: https://github.com/marketplace/actions/close-stale-issues
on:
schedule:
- cron: '30 1 * * *'

jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@v8
with:
stale-pr-message: 'This PR is stale because it has been open 20 days with no activity. Remove stale label or comment or this will be closed in 10 days.'
close-pr-message: 'This PR was closed because it has been open for 30 days with no activity.'
start-date: '2023-08-20T00:00:00Z' #PRs before stale bot introduced should be ignored
days-before-pr-close: 30
days-before-pr-stale: 20
13 changes: 3 additions & 10 deletions contrib/database/sql/exec_trace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,9 +71,9 @@ func TestExecutionTraceAnnotations(t *testing.T) {
rows.Close()
stmt, err := conn.PrepareContext(ctx, "prepared")
require.NoError(t, err, "preparing mock statement")
_, err = stmt.Exec()
_, err = stmt.ExecContext(ctx)
require.NoError(t, err, "executing mock perpared statement")
rows, err = stmt.Query()
rows, err = stmt.QueryContext(ctx)
require.NoError(t, err, "executing mock perpared query")
rows.Close()
tx, err := conn.BeginTx(ctx, nil)
Expand All @@ -90,9 +90,7 @@ func TestExecutionTraceAnnotations(t *testing.T) {
tasks, err := tasksFromTrace(buf)
require.NoError(t, err, "getting tasks from trace")

expectedParentChildTasks := []string{"Connect", "Exec", "Query", "Prepare", "Begin", "Exec"}
expectedPreparedStatementTasks := []string{"Exec", "Query"}

expectedParentChildTasks := []string{"Connect", "Exec", "Query", "Prepare", "Exec", "Query", "Begin", "Exec"}
var foundParent, foundPrepared bool
for id, task := range tasks {
t.Logf("task %d: %+v", id, task)
Expand All @@ -106,11 +104,6 @@ func TestExecutionTraceAnnotations(t *testing.T) {
assert.ElementsMatch(t, expectedParentChildTasks, gotParentChildTasks)
case "Prepare":
foundPrepared = true
var gotPerparedStatementTasks []string
for _, id := range tasks[id].children {
gotPerparedStatementTasks = append(gotPerparedStatementTasks, tasks[id].name)
}
assert.ElementsMatch(t, expectedPreparedStatementTasks, gotPerparedStatementTasks)
assert.GreaterOrEqual(t, task.duration, sleepDuration, "task %s", task.name)
case "Connect", "Exec", "Begin", "Commit", "Query":
assert.GreaterOrEqual(t, task.duration, sleepDuration, "task %s", task.name)
Expand Down
8 changes: 4 additions & 4 deletions contrib/database/sql/stmt.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ func (s *tracedStmt) Close() (err error) {
func (s *tracedStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (res driver.Result, err error) {
start := time.Now()
if stmtExecContext, ok := s.Stmt.(driver.StmtExecContext); ok {
ctx, end := startTraceTask(s.ctx, QueryTypeExec)
ctx, end := startTraceTask(ctx, QueryTypeExec)
defer end()
res, err := stmtExecContext.ExecContext(ctx, args)
s.tryTrace(ctx, QueryTypeExec, s.query, start, err)
Expand All @@ -51,7 +51,7 @@ func (s *tracedStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
return nil, ctx.Err()
default:
}
ctx, end := startTraceTask(s.ctx, QueryTypeExec)
ctx, end := startTraceTask(ctx, QueryTypeExec)
defer end()
res, err = s.Exec(dargs)
s.tryTrace(ctx, QueryTypeExec, s.query, start, err)
Expand All @@ -62,7 +62,7 @@ func (s *tracedStmt) ExecContext(ctx context.Context, args []driver.NamedValue)
func (s *tracedStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (rows driver.Rows, err error) {
start := time.Now()
if stmtQueryContext, ok := s.Stmt.(driver.StmtQueryContext); ok {
ctx, end := startTraceTask(s.ctx, QueryTypeQuery)
ctx, end := startTraceTask(ctx, QueryTypeQuery)
defer end()
rows, err := stmtQueryContext.QueryContext(ctx, args)
s.tryTrace(ctx, QueryTypeQuery, s.query, start, err)
Expand All @@ -77,7 +77,7 @@ func (s *tracedStmt) QueryContext(ctx context.Context, args []driver.NamedValue)
return nil, ctx.Err()
default:
}
ctx, end := startTraceTask(s.ctx, QueryTypeQuery)
ctx, end := startTraceTask(ctx, QueryTypeQuery)
defer end()
rows, err = s.Query(dargs)
s.tryTrace(ctx, QueryTypeQuery, s.query, start, err)
Expand Down
33 changes: 33 additions & 0 deletions contrib/database/sql/stmt_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
// Unless explicitly stated otherwise all files in this repository are licensed
// under the Apache License Version 2.0.
// This product includes software developed at Datadog (https://www.datadoghq.com/).
// Copyright 2016 Datadog, Inc.

package sql

import (
"context"
"testing"

"github.com/mattn/go-sqlite3"
"github.com/stretchr/testify/require"
)

func TestUseStatementContext(t *testing.T) {
Register("sqlite3", &sqlite3.SQLiteDriver{})
db, err := Open("sqlite3", "file::memory:?cache=shared")
require.NoError(t, err)
defer db.Close()

// Prepare using ctx1
ctx1, cancel := context.WithCancel(context.Background())
stmt, err := db.PrepareContext(ctx1, "SELECT 1")
require.NoError(t, err)
defer stmt.Close()
cancel()

// Query stmt using ctx2
ctx2 := context.Background()
var result int
require.NoError(t, stmt.QueryRowContext(ctx2).Scan(&result))
}
2 changes: 1 addition & 1 deletion ddtrace/tracer/telemetry.go
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ func startTelemetry(c *config) {
telemetryConfigs := []telemetry.Configuration{
{Name: "trace_debug_enabled", Value: c.debug},
{Name: "agent_feature_drop_p0s", Value: c.agent.DropP0s},
{Name: "stats_computation_enabled", Value: c.agent.Stats},
{Name: "stats_computation_enabled", Value: c.canComputeStats()},
{Name: "dogstatsd_port", Value: c.agent.StatsdPort},
{Name: "lambda_mode", Value: c.logToStdout},
{Name: "send_retries", Value: c.sendRetries},
Expand Down
1 change: 1 addition & 0 deletions ddtrace/tracer/telemetry_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ func TestTelemetryEnabled(t *testing.T) {
telemetry.Check(t, telemetryClient.Configuration, "service", "test-serv")
telemetry.Check(t, telemetryClient.Configuration, "env", "test-env")
telemetry.Check(t, telemetryClient.Configuration, "runtime_metrics_enabled", true)
telemetry.Check(t, telemetryClient.Configuration, "stats_computation_enabled", false)
if metrics, ok := telemetryClient.Metrics[telemetry.NamespaceGeneral]; ok {
if initTime, ok := metrics["init_time"]; ok {
assert.True(t, initTime > 0)
Expand Down
60 changes: 60 additions & 0 deletions profiler/doc.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,3 +6,63 @@
// Package profiler periodically collects and sends profiles to the Datadog API.
// Use Start to start the profiler.
package profiler // import "gopkg.in/DataDog/dd-trace-go.v1/profiler"

/*
Developer documentation:
The Go profiler client library works as follows:
* The user configures and initiates profile collection by calling the
profiler.Start function, typically in main(). Collection
starts asynchronously, so profiler.Start will return quickly and the
actual data collection is done in the background.
* The user calls profiler.Stop at the end of the program to end profile
collection and uploading. This is typically done via a defer statement
in main() after calling profiler.Start.
* The profiler initiates two goroutines: one which will collect the
configured profiles every minute, and another one which receives profile
batches from the first goroutine and sends them to the agent. The
collection and uploading processes are separate so that delays in
uploading to not prevent profile collection.
* The collection goroutine is a for loop, which collects each profile
type (see profile.go for the implementations), batches them together,
and passes the batch to a channel which will be read by the upload
goroutine, and then waits until the next minute to collect again. The
loop also checks whether the profiler is stopped by checking if the
p.exit channel is closed.
* The upload goroutine loops, each iteration waiting to receive a batch
of profiles or for p.exit to be closed. Upon receiving a batch, the
upload loop calls an upload function. This function constructs the
message containing the profiles and metadata, and attempts to upload it
to the agent's profile proxy. This will be retried several times.
The code is laid out in the following files:
* profiler.go: contains the implementation of the primary profiler
object, which holds the configuration and manages profile collection.
* profile.go: contains the implementation of profile collection for
specific profile types. All profile types are collected via Go runtime
APIs, and some require post-processing for our use. The primary kind of
post-processing is computing "deltas", which are the difference between
two profiles to show the change in resource usage (memory allocations,
blocking time, etc) over the last minute. Delta computation is in
"internal/fastdelta".
* upload.go: implements uploading a batch of profiles to our agent's
backend proxy, including bundling them together in the required
multi-part form layout and adding required metadata such as tags.
* options.go: implements configuration logic, including default values
and functional options which are passed to profiler.Start.
* telemetry.go: sends an instrumentation telemetry message containing
profiler configuration when profiling is started.
* metrics.go: collects some runtime metrics (GC-related) which are
included in the metrics.json attachment for each profile upload.
The code is tested in the "*_test.go" files. The profiler implementations
themselves are in the Go standard library, and are tested for correctness there.
The tests for this library are concerned with ensuring the expected behavior of
the client: Do we collect the configured profiles? Do we add the expected
metadata? Do we respect collection intervals? Are our manipulations of the
profiles (such as delta computation) correct? The test also include regression
tests from previously-identified bugs in the implementation.
*/

0 comments on commit 5d659d3

Please sign in to comment.