Skip to content

Commit

Permalink
Global cache for documents + top level jsonnet objects (#153)
Browse files Browse the repository at this point in the history
* Global cache for documents + top level jsonnet objects
Closes #133

There are two caches currently:
- One for protocol documents. This one is instantiated by the server and maintained up-to-date as documents are opened, changed, and closed.
- One for jsonnet objects. This one is a global var and is only added to. Modified objects are never removed/modified from the cache.

By merging the two caches, we can expand the first cache's behavior to also invalidate modified objects from the global cache when a document is changed.

* Simplify processing args (#154)

Instead of carrying a `cache` and `vm` around on each function, create a `Processor` struct to contain those

* Fix linting
  • Loading branch information
julienduchesne authored Aug 26, 2024
1 parent 4c756e3 commit 6f0feae
Show file tree
Hide file tree
Showing 15 changed files with 185 additions and 139 deletions.
3 changes: 1 addition & 2 deletions .golangci.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,13 @@
[linters]
enable = [
"copyloopvar",
"dogsled",
"exportloopref",
"forcetypeassert",
"goconst",
"gocritic",
"gocyclo",
"goimports",
"goprintffuncname",
"gosec",
"gosimple",
"govet",
"ineffassign",
Expand Down
38 changes: 18 additions & 20 deletions pkg/ast/processing/find_field.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,13 +5,12 @@ import (
"reflect"
"strings"

"github.com/google/go-jsonnet"
"github.com/google/go-jsonnet/ast"
"github.com/grafana/jsonnet-language-server/pkg/nodestack"
log "github.com/sirupsen/logrus"
)

func FindRangesFromIndexList(stack *nodestack.NodeStack, indexList []string, vm *jsonnet.VM, partialMatchFields bool) ([]ObjectRange, error) {
func (p *Processor) FindRangesFromIndexList(stack *nodestack.NodeStack, indexList []string, partialMatchFields bool) ([]ObjectRange, error) {
var foundDesugaredObjects []*ast.DesugaredObject
// First element will be super, self, or var name
start, indexList := indexList[0], indexList[1:]
Expand All @@ -31,13 +30,13 @@ func FindRangesFromIndexList(stack *nodestack.NodeStack, indexList []string, vm
if _, ok := tmpStack.Peek().(*ast.Binary); ok {
tmpStack.Pop()
}
foundDesugaredObjects = filterSelfScope(FindTopLevelObjects(tmpStack, vm))
foundDesugaredObjects = filterSelfScope(p.FindTopLevelObjects(tmpStack))
case start == "std":
return nil, fmt.Errorf("cannot get definition of std lib")
case start == "$":
foundDesugaredObjects = FindTopLevelObjects(nodestack.NewNodeStack(stack.From), vm)
foundDesugaredObjects = p.FindTopLevelObjects(nodestack.NewNodeStack(stack.From))
case strings.Contains(start, "."):
foundDesugaredObjects = FindTopLevelObjectsInFile(vm, start, "")
foundDesugaredObjects = p.FindTopLevelObjectsInFile(start, "")

default:
if strings.Count(start, "(") == 1 && strings.Count(start, ")") == 1 {
Expand Down Expand Up @@ -65,15 +64,15 @@ func FindRangesFromIndexList(stack *nodestack.NodeStack, indexList []string, vm
foundDesugaredObjects = append(foundDesugaredObjects, bodyNode)
case *ast.Self:
tmpStack := nodestack.NewNodeStack(stack.From)
foundDesugaredObjects = FindTopLevelObjects(tmpStack, vm)
foundDesugaredObjects = p.FindTopLevelObjects(tmpStack)
case *ast.Import:
filename := bodyNode.File.Value
foundDesugaredObjects = FindTopLevelObjectsInFile(vm, filename, "")
foundDesugaredObjects = p.FindTopLevelObjectsInFile(filename, "")

case *ast.Index, *ast.Apply:
tempStack := nodestack.NewNodeStack(bodyNode)
indexList = append(tempStack.BuildIndexList(), indexList...)
return FindRangesFromIndexList(stack, indexList, vm, partialMatchFields)
return p.FindRangesFromIndexList(stack, indexList, partialMatchFields)
case *ast.Function:
// If the function's body is an object, it means we can look for indexes within the function
if funcBody := findChildDesugaredObject(bodyNode.Body); funcBody != nil {
Expand All @@ -84,10 +83,10 @@ func FindRangesFromIndexList(stack *nodestack.NodeStack, indexList []string, vm
}
}

return extractObjectRangesFromDesugaredObjs(vm, foundDesugaredObjects, indexList, partialMatchFields)
return p.extractObjectRangesFromDesugaredObjs(foundDesugaredObjects, indexList, partialMatchFields)
}

func extractObjectRangesFromDesugaredObjs(vm *jsonnet.VM, desugaredObjs []*ast.DesugaredObject, indexList []string, partialMatchFields bool) ([]ObjectRange, error) {
func (p *Processor) extractObjectRangesFromDesugaredObjs(desugaredObjs []*ast.DesugaredObject, indexList []string, partialMatchFields bool) ([]ObjectRange, error) {
var ranges []ObjectRange
for len(indexList) > 0 {
index := indexList[0]
Expand All @@ -111,7 +110,7 @@ func extractObjectRangesFromDesugaredObjs(vm *jsonnet.VM, desugaredObjs []*ast.D
return ranges, nil
}

fieldNodes, err := unpackFieldNodes(vm, foundFields)
fieldNodes, err := p.unpackFieldNodes(foundFields)
if err != nil {
return nil, err
}
Expand All @@ -125,7 +124,7 @@ func extractObjectRangesFromDesugaredObjs(vm *jsonnet.VM, desugaredObjs []*ast.D
// The target is a function and will be found by FindVarReference on the next loop
fieldNodes = append(fieldNodes, fieldNode.Target)
case *ast.Var:
varReference, err := FindVarReference(fieldNode, vm)
varReference, err := p.FindVarReference(fieldNode)
if err != nil {
return nil, err
}
Expand All @@ -142,11 +141,11 @@ func extractObjectRangesFromDesugaredObjs(vm *jsonnet.VM, desugaredObjs []*ast.D
// if we're trying to find the a definition which is an index,
// we need to find it from itself, meaning that we need to create a stack
// from the index's target and search from there
rootNode, _, _ := vm.ImportAST("", fieldNode.LocRange.FileName)
rootNode, _, _ := p.vm.ImportAST("", fieldNode.LocRange.FileName)
stack, _ := FindNodeByPosition(rootNode, fieldNode.Target.Loc().Begin)
if stack != nil {
additionalIndexList := append(nodestack.NewNodeStack(fieldNode).BuildIndexList(), indexList...)
result, _ := FindRangesFromIndexList(stack, additionalIndexList, vm, partialMatchFields)
result, _ := p.FindRangesFromIndexList(stack, additionalIndexList, partialMatchFields)
if len(result) > 0 {
return result, err
}
Expand All @@ -157,7 +156,7 @@ func extractObjectRangesFromDesugaredObjs(vm *jsonnet.VM, desugaredObjs []*ast.D
desugaredObjs = append(desugaredObjs, findChildDesugaredObject(fieldNode.Body))
case *ast.Import:
filename := fieldNode.File.Value
newObjs := FindTopLevelObjectsInFile(vm, filename, string(fieldNode.Loc().File.DiagnosticFileName))
newObjs := p.FindTopLevelObjectsInFile(filename, string(fieldNode.Loc().File.DiagnosticFileName))
desugaredObjs = append(desugaredObjs, newObjs...)
}
i++
Expand All @@ -177,13 +176,13 @@ func flattenBinary(node ast.Node) []ast.Node {
// unpackFieldNodes extracts nodes from fields
// - Binary nodes. A field could be either in the left or right side of the binary
// - Self nodes. We want the object self refers to, not the self node itself
func unpackFieldNodes(vm *jsonnet.VM, fields []*ast.DesugaredObjectField) ([]ast.Node, error) {
func (p *Processor) unpackFieldNodes(fields []*ast.DesugaredObjectField) ([]ast.Node, error) {
var fieldNodes []ast.Node
for _, foundField := range fields {
switch fieldNode := foundField.Body.(type) {
case *ast.Self:
filename := fieldNode.LocRange.FileName
rootNode, _, _ := vm.ImportAST("", filename)
rootNode, _, _ := p.vm.ImportAST("", filename)
tmpStack, err := FindNodeByPosition(rootNode, fieldNode.LocRange.Begin)
if err != nil {
return nil, err
Expand Down Expand Up @@ -220,7 +219,6 @@ func findObjectFieldsInObject(objectNode *ast.DesugaredObject, index string, par

var matchingFields []*ast.DesugaredObjectField
for _, field := range objectNode.Fields {
field := field
literalString, isString := field.Name.(*ast.LiteralString)
if !isString {
continue
Expand Down Expand Up @@ -253,8 +251,8 @@ func findChildDesugaredObject(node ast.Node) *ast.DesugaredObject {

// FindVarReference finds the object that the variable is referencing
// To do so, we get the stack where the var is used and search that stack for the var's definition
func FindVarReference(varNode *ast.Var, vm *jsonnet.VM) (ast.Node, error) {
varFileNode, _, _ := vm.ImportAST("", varNode.LocRange.FileName)
func (p *Processor) FindVarReference(varNode *ast.Var) (ast.Node, error) {
varFileNode, _, _ := p.vm.ImportAST("", varNode.LocRange.FileName)
varStack, err := FindNodeByPosition(varFileNode, varNode.Loc().Begin)
if err != nil {
return nil, fmt.Errorf("got the following error when finding the bind for %s: %w", varNode.Id, err)
Expand Down
18 changes: 18 additions & 0 deletions pkg/ast/processing/processor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
package processing

import (
"github.com/google/go-jsonnet"
"github.com/grafana/jsonnet-language-server/pkg/cache"
)

type Processor struct {
cache *cache.Cache
vm *jsonnet.VM
}

func NewProcessor(cache *cache.Cache, vm *jsonnet.VM) *Processor {
return &Processor{
cache: cache,
vm: vm,
}
}
27 changes: 12 additions & 15 deletions pkg/ast/processing/top_level_objects.go
Original file line number Diff line number Diff line change
@@ -1,26 +1,23 @@
package processing

import (
"github.com/google/go-jsonnet"
"github.com/google/go-jsonnet/ast"
"github.com/grafana/jsonnet-language-server/pkg/nodestack"
log "github.com/sirupsen/logrus"
)

var fileTopLevelObjectsCache = make(map[string][]*ast.DesugaredObject)

func FindTopLevelObjectsInFile(vm *jsonnet.VM, filename, importedFrom string) []*ast.DesugaredObject {
cacheKey := importedFrom + ":" + filename
if _, ok := fileTopLevelObjectsCache[cacheKey]; !ok {
rootNode, _, _ := vm.ImportAST(importedFrom, filename)
fileTopLevelObjectsCache[cacheKey] = FindTopLevelObjects(nodestack.NewNodeStack(rootNode), vm)
func (p *Processor) FindTopLevelObjectsInFile(filename, importedFrom string) []*ast.DesugaredObject {
v, ok := p.cache.GetTopLevelObject(filename, importedFrom)
if !ok {
rootNode, _, _ := p.vm.ImportAST(importedFrom, filename)
v = p.FindTopLevelObjects(nodestack.NewNodeStack(rootNode))
p.cache.PutTopLevelObject(filename, importedFrom, v)
}

return fileTopLevelObjectsCache[cacheKey]
return v
}

// Find all ast.DesugaredObject's from NodeStack
func FindTopLevelObjects(stack *nodestack.NodeStack, vm *jsonnet.VM) []*ast.DesugaredObject {
func (p *Processor) FindTopLevelObjects(stack *nodestack.NodeStack) []*ast.DesugaredObject {
var objects []*ast.DesugaredObject
for !stack.IsEmpty() {
curr := stack.Pop()
Expand All @@ -34,7 +31,7 @@ func FindTopLevelObjects(stack *nodestack.NodeStack, vm *jsonnet.VM) []*ast.Desu
stack.Push(curr.Body)
case *ast.Import:
filename := curr.File.Value
rootNode, _, _ := vm.ImportAST(string(curr.Loc().File.DiagnosticFileName), filename)
rootNode, _, _ := p.vm.ImportAST(string(curr.Loc().File.DiagnosticFileName), filename)
stack.Push(rootNode)
case *ast.Index:
indexValue, indexIsString := curr.Index.(*ast.LiteralString)
Expand All @@ -45,7 +42,7 @@ func FindTopLevelObjects(stack *nodestack.NodeStack, vm *jsonnet.VM) []*ast.Desu
var container ast.Node
// If our target is a var, the container for the index is the var ref
if varTarget, targetIsVar := curr.Target.(*ast.Var); targetIsVar {
ref, err := FindVarReference(varTarget, vm)
ref, err := p.FindVarReference(varTarget)
if err != nil {
log.WithError(err).Errorf("Error finding var reference, ignoring this node")
continue
Expand All @@ -62,7 +59,7 @@ func FindTopLevelObjects(stack *nodestack.NodeStack, vm *jsonnet.VM) []*ast.Desu
if containerObj, containerIsObj := container.(*ast.DesugaredObject); containerIsObj {
possibleObjects = []*ast.DesugaredObject{containerObj}
} else if containerImport, containerIsImport := container.(*ast.Import); containerIsImport {
possibleObjects = FindTopLevelObjectsInFile(vm, containerImport.File.Value, string(containerImport.Loc().File.DiagnosticFileName))
possibleObjects = p.FindTopLevelObjectsInFile(containerImport.File.Value, string(containerImport.Loc().File.DiagnosticFileName))
}

for _, obj := range possibleObjects {
Expand All @@ -71,7 +68,7 @@ func FindTopLevelObjects(stack *nodestack.NodeStack, vm *jsonnet.VM) []*ast.Desu
}
}
case *ast.Var:
varReference, err := FindVarReference(curr, vm)
varReference, err := p.FindVarReference(curr)
if err != nil {
log.WithError(err).Errorf("Error finding var reference, ignoring this node")
continue
Expand Down
90 changes: 56 additions & 34 deletions pkg/server/cache.go → pkg/cache/cache.go
Original file line number Diff line number Diff line change
@@ -1,69 +1,74 @@
package server
package cache

import (
"errors"
"fmt"
"os"
"path/filepath"
"strings"
"sync"

"github.com/google/go-jsonnet/ast"
"github.com/jdbaldry/go-language-server-protocol/lsp/protocol"
)

type document struct {
type Document struct {
// From DidOpen and DidChange
item protocol.TextDocumentItem
Item protocol.TextDocumentItem

// Contains the last successfully parsed AST. If doc.err is not nil, it's out of date.
ast ast.Node
linesChangedSinceAST map[int]bool
AST ast.Node
LinesChangedSinceAST map[int]bool

// From diagnostics
val string
err error
diagnostics []protocol.Diagnostic
Val string
Err error
Diagnostics []protocol.Diagnostic
}

// newCache returns a document cache.
func newCache() *cache {
return &cache{
mu: sync.RWMutex{},
docs: make(map[protocol.DocumentURI]*document),
diagQueue: make(map[protocol.DocumentURI]struct{}),
}
// Cache caches documents.
type Cache struct {
mu sync.RWMutex
docs map[protocol.DocumentURI]*Document
topLevelObjects map[string][]*ast.DesugaredObject
}

// cache caches documents.
type cache struct {
mu sync.RWMutex
docs map[protocol.DocumentURI]*document

diagMutex sync.RWMutex
diagQueue map[protocol.DocumentURI]struct{}
diagRunning sync.Map
// New returns a document cache.
func New() *Cache {
return &Cache{
mu: sync.RWMutex{},
docs: make(map[protocol.DocumentURI]*Document),
topLevelObjects: make(map[string][]*ast.DesugaredObject),
}
}

// put adds or replaces a document in the cache.
func (c *cache) put(new *document) error {
// Put adds or replaces a document in the cache.
func (c *Cache) Put(new *Document) error {
c.mu.Lock()
defer c.mu.Unlock()

uri := new.item.URI
uri := new.Item.URI
if old, ok := c.docs[uri]; ok {
if old.item.Version > new.item.Version {
if old.Item.Version > new.Item.Version {
return errors.New("newer version of the document is already in the cache")
}
}
c.docs[uri] = new

// Invalidate the TopLevelObject cache
for k := range c.topLevelObjects {
if strings.HasSuffix(k, filepath.Base(uri.SpanURI().Filename())) {
delete(c.topLevelObjects, k)
}
}

return nil
}

// get retrieves a document from the cache.
func (c *cache) get(uri protocol.DocumentURI) (*document, error) {
c.mu.Lock()
defer c.mu.Unlock()
// Get retrieves a document from the cache.
func (c *Cache) Get(uri protocol.DocumentURI) (*Document, error) {
c.mu.RLock()
defer c.mu.RUnlock()

doc, ok := c.docs[uri]
if !ok {
Expand All @@ -73,11 +78,11 @@ func (c *cache) get(uri protocol.DocumentURI) (*document, error) {
return doc, nil
}

func (c *cache) getContents(uri protocol.DocumentURI, position protocol.Range) (string, error) {
func (c *Cache) GetContents(uri protocol.DocumentURI, position protocol.Range) (string, error) {
text := ""
doc, err := c.get(uri)
doc, err := c.Get(uri)
if err == nil {
text = doc.item.Text
text = doc.Item.Text
} else {
// Read the file from disk (TODO: cache this)
bytes, err := os.ReadFile(uri.SpanURI().Filename())
Expand Down Expand Up @@ -118,3 +123,20 @@ func (c *cache) getContents(uri protocol.DocumentURI, position protocol.Range) (

return contentBuilder.String(), nil
}

func (c *Cache) GetTopLevelObject(filename, importedFrom string) ([]*ast.DesugaredObject, bool) {
c.mu.RLock()
defer c.mu.RUnlock()

cacheKey := importedFrom + ":" + filename
v, ok := c.topLevelObjects[cacheKey]
return v, ok
}

func (c *Cache) PutTopLevelObject(filename, importedFrom string, objects []*ast.DesugaredObject) {
c.mu.Lock()
defer c.mu.Unlock()

cacheKey := importedFrom + ":" + filename
c.topLevelObjects[cacheKey] = objects
}
Loading

0 comments on commit 6f0feae

Please sign in to comment.