Skip to content

Commit

Permalink
caching for sourcemaps (#8354)
Browse files Browse the repository at this point in the history
## Summary

adds granular trace ingestion for private graph requests.
sourcemap processing has been slow due to repeated failing queries.

<img width="864" alt="Screenshot 2024-04-26 at 10 46 23"
src="https://github.com/highlight/highlight/assets/1351531/48f3bc44-f5ee-405f-aea5-49fec11f6288">

caching for 1 minute will help with perf while still updating frequently
when sourcemaps / js files are uploaded

## How did you test this change?

manual testing locally, unit tests
![Uploading image.png…]()


## Are there any deployment considerations?

no

## Does this work require review from our design team?

no
  • Loading branch information
Vadman97 committed Apr 27, 2024
1 parent 520d2f1 commit f6774a9
Show file tree
Hide file tree
Showing 8 changed files with 191 additions and 65 deletions.
9 changes: 9 additions & 0 deletions backend/clickhouse/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -549,6 +549,11 @@ func getFnStr(aggregator modelInputs.MetricAggregator, column string, useSamplin
}

func readMetrics[T ~string](ctx context.Context, client *Client, sampleableConfig sampleableTableConfig[T], projectID int, params modelInputs.QueryInput, column string, metricTypes []modelInputs.MetricAggregator, groupBy []string, bucketCount *int, bucketBy string, limit *int, limitAggregator *modelInputs.MetricAggregator, limitColumn *string) (*modelInputs.MetricsBuckets, error) {
span, ctx := util.StartSpanFromContext(ctx, "clickhouse.readMetrics")
span.SetAttribute("project_id", projectID)
span.SetAttribute("table", sampleableConfig.tableConfig.TableName)
defer span.Finish()

if len(metricTypes) == 0 {
return nil, errors.New("no metric types provided")
}
Expand Down Expand Up @@ -758,11 +763,15 @@ func readMetrics[T ~string](ctx context.Context, client *Client, sampleableConfi
Buckets: []*modelInputs.MetricBucket{},
}

span, ctx = util.StartSpanFromContext(ctx, "readMetrics.query")
span.SetAttribute("sql", sql)
span.SetAttribute("args", args)
rows, err := client.conn.Query(
ctx,
sql,
args...,
)
span.Finish(err)

if err != nil {
return nil, err
Expand Down
2 changes: 1 addition & 1 deletion backend/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ func main() {
})
privateServer.Use(private.NewGraphqlOAuthValidator(privateResolver.Store))
privateServer.Use(util.NewTracer(util.PrivateGraph))
privateServer.Use(htrace.NewGraphqlTracer(string(util.PrivateGraph)).WithRequestFieldLogging())
privateServer.Use(htrace.NewGraphqlTracer(string(util.PrivateGraph), trace.WithSpanKind(trace.SpanKindServer)).WithRequestFieldLogging())
privateServer.SetErrorPresenter(htrace.GraphQLErrorPresenter(string(util.PrivateGraph)))
privateServer.SetRecoverFunc(htrace.GraphQLRecoverFunc())
r.Handle("/",
Expand Down
22 changes: 18 additions & 4 deletions backend/redis/cache.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@ import (

type Config struct {
BypassCache bool
IgnoreError bool
StoreNil bool
}

type Option func(cfg *Config)
Expand All @@ -20,6 +22,18 @@ func WithBypassCache(bypass bool) Option {
}
}

func WithIgnoreError(ignoreError bool) Option {
return func(cfg *Config) {
cfg.IgnoreError = ignoreError
}
}

func WithStoreNil(storeNil bool) Option {
return func(cfg *Config) {
cfg.StoreNil = storeNil
}
}

// CachedEval will return the value at cacheKey if it exists.
// If it does not exist or is nil, CachedEval calls `fn()` to evaluate the result, and stores it at the cache key.
func CachedEval[T any](ctx context.Context, redis *Client, cacheKey string, lockTimeout, cacheExpiration time.Duration, fn func() (*T, error), opts ...Option) (value *T, err error) {
Expand All @@ -43,16 +57,16 @@ func CachedEval[T any](ctx context.Context, redis *Client, cacheKey string, lock
}
}()
}
if value, err = fn(); value == nil || err != nil {
if value, err = fn(); (!cfg.StoreNil && value == nil) || (!cfg.IgnoreError && err != nil) {
return
}
if err = redis.Cache.Set(&cache.Item{
if setError := redis.Cache.Set(&cache.Item{
Ctx: ctx,
Key: cacheKey,
Value: &value,
TTL: cacheExpiration,
}); err != nil {
return
}); setError != nil {
return nil, setError
}
}

Expand Down
43 changes: 43 additions & 0 deletions backend/redis/utils_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,54 @@ package redis
import (
"context"
"github.com/go-redsync/redsync/v4"
"github.com/openlyinc/pointy"
e "github.com/pkg/errors"
"github.com/stretchr/testify/assert"
"math/rand"
"testing"
"time"
)

func randomString() (*string, error) {
b := make([]byte, 32)
for i := range b {
b[i] = byte(rand.Intn(26) + 65)
}
return pointy.String(string(b)), nil
}
func randomError() (*string, error) {
s, _ := randomString()
return nil, e.New(*s)
}

func Test_CachedEval(t *testing.T) {
ctx := context.TODO()
r := NewClient()
key, _ := randomString()
v1, e1 := CachedEval(ctx, r, *key, time.Minute, time.Minute, randomString)
v2, e2 := CachedEval(ctx, r, *key, time.Minute, time.Minute, randomString)
assert.NoError(t, e1)
assert.NoError(t, e2)
assert.EqualValues(t, v1, v2)
assert.EqualValues(t, e1, e2)

key, _ = randomString()
v1, e1 = CachedEval(ctx, r, *key, time.Minute, time.Minute, randomError)
v2, e2 = CachedEval(ctx, r, *key, time.Minute, time.Minute, randomError)
assert.Error(t, e1)
assert.Error(t, e2)
assert.EqualValues(t, v1, v2)
assert.NotEqualValues(t, e1, e2)

key, _ = randomString()
v1, e1 = CachedEval(ctx, r, *key, time.Minute, time.Minute, randomError, WithStoreNil(true), WithIgnoreError(true))
v2, e2 = CachedEval(ctx, r, *key, time.Minute, time.Minute, randomError, WithStoreNil(true), WithIgnoreError(true))
assert.Error(t, e1)
assert.NoError(t, e2)
assert.EqualValues(t, v1, v2)
assert.NotEqualValues(t, e1, e2)
}

func TestLock(t *testing.T) {
r := NewClient()

Expand Down
99 changes: 55 additions & 44 deletions backend/stacktraces/enhancer.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,13 +6,15 @@ import (
"crypto/tls"
"encoding/base64"
"fmt"
"github.com/highlight-run/highlight/backend/redis"
"io"
"net/http"
"net/url"
"os"
"path"
"regexp"
"strings"
"time"

"github.com/andybalholm/brotli"
"github.com/go-sourcemap/sourcemap"
Expand Down Expand Up @@ -43,9 +45,9 @@ func init() {
customTransport := http.DefaultTransport.(*http.Transport).Clone()
customTransport.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
client := &http.Client{Transport: customTransport}
fetch = NetworkFetcher{client: client}
fetch = NetworkFetcher{client: client, redis: redis.NewClient()}
} else {
fetch = NetworkFetcher{}
fetch = NetworkFetcher{redis: redis.NewClient()}
}
}

Expand All @@ -54,7 +56,7 @@ var fetch fetcher
type DiskFetcher struct{}

func (n DiskFetcher) fetchFile(ctx context.Context, href string) ([]byte, error) {
span, _ := util.StartSpanFromContext(ctx, "disk.fetchFile")
span, _ := util.StartSpanFromContext(ctx, "disk.fetchFile", util.Tag("href", href))
defer span.Finish()

inputBytes, err := os.ReadFile(href)
Expand All @@ -66,50 +68,59 @@ func (n DiskFetcher) fetchFile(ctx context.Context, href string) ([]byte, error)

type NetworkFetcher struct {
client *http.Client
redis *redis.Client
}

func (n NetworkFetcher) fetchFile(ctx context.Context, href string) ([]byte, error) {
span, ctx := util.StartSpanFromContext(ctx, "network.fetchFile")
span, ctx := util.StartSpanFromContext(ctx, "network.fetchFileCached", util.Tag("href", href))
defer span.Finish()

// check if source is a URL
_, err := url.ParseRequestURI(href)
if err != nil {
return nil, err
}
// get minified file
if n.client == nil {
n.client = http.DefaultClient
}
res, err := n.client.Get(href)
if err != nil {
return nil, e.Wrap(err, "error getting source file")
}
defer func(Body io.ReadCloser) {
err := Body.Close()
b, err := redis.CachedEval(ctx, n.redis, href, time.Second, time.Minute, func() (*[]byte, error) {
// check if source is a URL
_, err := url.ParseRequestURI(href)
if err != nil {
log.WithContext(ctx).Errorf("failed to close network reader %+v", err)
return nil, err
}
// get minified file
if n.client == nil {
n.client = http.DefaultClient
}
res, err := n.client.Get(href)
if err != nil {
return nil, e.Wrap(err, "error getting source file")
}
defer func(Body io.ReadCloser) {
err := Body.Close()
if err != nil {
log.WithContext(ctx).Errorf("failed to close network reader %+v", err)
}
}(res.Body)
if res.StatusCode != http.StatusOK {
return nil, e.New("status code not OK")
}
}(res.Body)
if res.StatusCode != http.StatusOK {
return nil, e.New("status code not OK")
}

if res.Header.Get("Content-Encoding") == "br" {
out := &bytes.Buffer{}
if _, err := io.Copy(out, brotli.NewReader(res.Body)); err != nil {
return nil, e.New("failed to read brotli content")
if res.Header.Get("Content-Encoding") == "br" {
out := &bytes.Buffer{}
if _, err := io.Copy(out, brotli.NewReader(res.Body)); err != nil {
return nil, e.New("failed to read brotli content")
}
bt := out.Bytes()
return &bt, nil
}
return out.Bytes(), nil
}

// unpack file into slice
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return nil, e.Wrap(err, "error reading response body")
}
// unpack file into slice
bodyBytes, err := io.ReadAll(res.Body)
if err != nil {
return nil, e.Wrap(err, "error reading response body")
}

return &bodyBytes, nil
}, redis.WithStoreNil(true), redis.WithIgnoreError(true))

return bodyBytes, nil
if b == nil || err != nil {
return nil, err
}
return *b, nil
}

func limitMaxSize(value *string) *string {
Expand Down Expand Up @@ -170,8 +181,8 @@ func getFileSourcemap(ctx context.Context, projectId int, version *string, stack
sourcemapFetchStrategy := "S3"
stackTraceError.SourcemapFetchStrategy = &sourcemapFetchStrategy
for sourceMapFileBytes == nil {
sourceMapFileBytes, err = storageClient.ReadSourceMapFile(ctx, projectId, version, pathSubpath)
if err != nil {
sourceMapFileBytes, err = storageClient.ReadSourceMapFileCached(ctx, projectId, version, pathSubpath)
if sourceMapFileBytes == nil || err != nil {
if pathSubpath == "" {
// SOURCEMAP_ERROR: could not find source map file in s3
// (user-facing error message can include all the paths searched)
Expand All @@ -190,18 +201,18 @@ func getFileSourcemap(ctx context.Context, projectId int, version *string, stack

func getURLSourcemap(ctx context.Context, projectId int, version *string, stackTraceFileURL string, stackTraceFilePath string, stackFileNameIndex int, storageClient storage.Client, stackTraceError *privateModel.SourceMappingError) (string, []byte, error) {
// try to get file from s3
minifiedFileBytes, err := storageClient.ReadSourceMapFile(ctx, projectId, version, stackTraceFilePath)
minifiedFileBytes, err := storageClient.ReadSourceMapFileCached(ctx, projectId, version, stackTraceFilePath)
minifiedFetchStrategy := "S3"
var stackTraceErrorCode privateModel.SourceMappingErrorCode
stackTraceError.MinifiedFetchStrategy = &minifiedFetchStrategy
stackTraceError.ActualMinifiedFetchedPath = &stackTraceFilePath

if err != nil {
if minifiedFileBytes == nil || err != nil {
// if not in s3, get from url and put in s3
minifiedFileBytes, err = fetch.fetchFile(ctx, stackTraceFileURL)
minifiedFetchStrategy = "URL"
stackTraceError.MinifiedFetchStrategy = &minifiedFetchStrategy
if err != nil {
if minifiedFileBytes == nil || err != nil {
// fallback if we can't get the source file at all
// SOURCEMAP_ERROR: minified file does not exist in S3 and could not be found at the URL
// (user-facing error message can include the S3 path and URL that was searched)
Expand Down Expand Up @@ -290,15 +301,15 @@ func getURLSourcemap(ctx context.Context, projectId int, version *string, stackT

// fetch source map file
// try to get file from s3
sourceMapFileBytes, err = storageClient.ReadSourceMapFile(ctx, projectId, version, sourceMapFilePath)
sourceMapFileBytes, err = storageClient.ReadSourceMapFileCached(ctx, projectId, version, sourceMapFilePath)
sourcemapFetchStrategy := "S3"
stackTraceError.SourcemapFetchStrategy = &sourcemapFetchStrategy
if err != nil {
if sourceMapFileBytes == nil || err != nil {
// if not in s3, get from url and put in s3
sourceMapFileBytes, err = fetch.fetchFile(ctx, sourceMapURL)
sourcemapFetchStrategy = "URL"
stackTraceError.SourcemapFetchStrategy = &sourcemapFetchStrategy
if err != nil {
if sourceMapFileBytes == nil || err != nil {
// fallback if we can't get the source file at all
// SOURCEMAP_ERROR: source map file does not exist in S3 and could not be found at the URL
// (user-facing error message can include the S3 path and URL that was searched)
Expand Down
15 changes: 9 additions & 6 deletions backend/stacktraces/enhancer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
"github.com/highlight-run/highlight/backend/redis"
"github.com/openlyinc/pointy"
"github.com/stretchr/testify/assert"
"testing"
Expand Down Expand Up @@ -107,7 +108,7 @@ func TestEnhanceStackTrace(t *testing.T) {
FunctionName: ptr.String("arrayIncludesWith"),
},
},
fetcher: NetworkFetcher{},
fetcher: NetworkFetcher{redis: redis.NewClient()},
err: e.New(""),
},
"test source mapping invalid trace:no related source map": {
Expand Down Expand Up @@ -157,7 +158,7 @@ func TestEnhanceStackTrace(t *testing.T) {
},
},
},
fetcher: NetworkFetcher{},
fetcher: NetworkFetcher{redis: redis.NewClient()},
err: e.New(""),
},
"test source mapping invalid trace:filename is not a url": {
Expand All @@ -182,7 +183,7 @@ func TestEnhanceStackTrace(t *testing.T) {
},
},
},
fetcher: NetworkFetcher{},
fetcher: NetworkFetcher{redis: redis.NewClient()},
err: e.New(""),
},
"test source mapping invalid trace:trace is nil": {
Expand Down Expand Up @@ -258,7 +259,7 @@ func TestEnhanceStackTrace(t *testing.T) {
FunctionName: ptr.String("Error"),
},
},
fetcher: NetworkFetcher{},
fetcher: NetworkFetcher{redis: redis.NewClient()},
err: e.New(""),
},
}
Expand All @@ -274,9 +275,11 @@ func TestEnhanceStackTrace(t *testing.T) {
}

// run tests
redisClient := redis.NewClient()
for _, client := range []storage.Client{s3Client, fsClient} {
for name, tc := range tests {
t.Run(fmt.Sprintf("%s/%v", name, client), func(t *testing.T) {
_ = redisClient.FlushDB(ctx)
if tc.projectID == 0 {
tc.projectID = 1
} else if client == s3Client {
Expand Down Expand Up @@ -313,7 +316,7 @@ func TestEnhanceBackendNextServerlessTrace(t *testing.T) {
t.Fatalf("error creating storage client: %v", err)
}

fetch = NetworkFetcher{}
fetch = NetworkFetcher{redis: redis.NewClient()}

var stackFrameInput []*publicModelInput.StackFrameInput
err = json.Unmarshal([]byte(`[{"fileName":"/var/task/apps/magicsky/.next/server/app/(route-group-test)/[slug]/page-router-edge-test.js","lineNumber":1,"functionName":"s","columnNumber":3344,"error":"Error: 🎉 SSR Error with use-server: src/app-router/ssr/page.tsx"}]`), &stackFrameInput)
Expand Down Expand Up @@ -351,7 +354,7 @@ func TestEnhanceStackTraceProd(t *testing.T) {
t.Fatalf("error creating storage client: %v", err)
}

fetch = NetworkFetcher{}
fetch = NetworkFetcher{redis: redis.NewClient()}
mappedStackTrace, err := EnhanceStackTrace(ctx, []*publicModelInput.StackFrameInput{
{
FunctionName: nil,
Expand Down
Loading

0 comments on commit f6774a9

Please sign in to comment.