diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..6cff153 --- /dev/null +++ b/.gitignore @@ -0,0 +1,5 @@ +*.html +*.txt +*.out +.vscode/ +cmd/dumpspm/dumpspm \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..043d25a --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Vikesh Raj C + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..d771514 --- /dev/null +++ b/Makefile @@ -0,0 +1,14 @@ +.PHONY: test clean + +sentencepiece/sentencepiece_model.pb.go: sentencepiece/sentencepiece_model.proto + protoc --go_out=. $< + + +cmd/dumpspm/dumpspm: cmd/dumpspm/main.go + cd cmd/dumpspm && go build + +test: + go test -cover -coverprofile=c.out ./sentencepiece && go tool cover -html=c.out -o coverage.html + +clean: + rm -f *.out coverage.html cmd/dumpspm/dumpspm \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..7f1bc77 --- /dev/null +++ b/README.md @@ -0,0 +1,17 @@ +Sentence Piece Encoder +====================== + +This is pure go implementation of the sentencepiece encoder. +It takes a sentencepiece model and tokenizes it. + +Example: + +```go + +import "github.com/vikesh-raj/go-sentencepiece-encoder/sentencepiece" + +text := "This is a sample text" +spm, _ := sentencepiece.NewSentencepieceFromFile("spm.model", false) +tokens := spm.Tokenize(text) + +``` diff --git a/cmd/dumpspm/go.mod b/cmd/dumpspm/go.mod new file mode 100644 index 0000000..2b7cf14 --- /dev/null +++ b/cmd/dumpspm/go.mod @@ -0,0 +1,10 @@ +module github.com/vikesh-raj/go-sentencepiece-encoder/cmd/dumpspm + +go 1.13 + +replace github.com/vikesh-raj/go-sentencepiece-encoder => ../.. + +require ( + github.com/vikesh-raj/go-sentencepiece-encoder v0.0.0-00010101000000-000000000000 + google.golang.org/protobuf v1.25.0 +) diff --git a/cmd/dumpspm/go.sum b/cmd/dumpspm/go.sum new file mode 100644 index 0000000..1759e37 --- /dev/null +++ b/cmd/dumpspm/go.sum @@ -0,0 +1,64 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/cmd/dumpspm/main.go b/cmd/dumpspm/main.go new file mode 100644 index 0000000..0c10c38 --- /dev/null +++ b/cmd/dumpspm/main.go @@ -0,0 +1,44 @@ +package main + +import ( + "fmt" + "io/ioutil" + "os" + + "github.com/vikesh-raj/go-sentencepiece-encoder/sentencepiece" + "google.golang.org/protobuf/proto" +) + +func dumpWords(filename string) error { + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return fmt.Errorf("Unable to read file : %s, err %v", filename, err) + } + var model sentencepiece.ModelProto + err = proto.Unmarshal(bytes, &model) + if err != nil { + return fmt.Errorf("Unable to read model file : %s, err %v", filename, err) + } + + count := 0 + for i, piece := range model.GetPieces() { + word := piece.GetPiece() + fmt.Println(word, piece.GetScore(), i) + count++ + } + + fmt.Println("Total words :", count) + return nil +} + +func main() { + if len(os.Args) != 2 { + fmt.Printf("Expected Sentencepiece model as argument.\n") + os.Exit(1) + } + modelFile := os.Args[1] + if err := dumpWords(modelFile); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..5457af9 --- /dev/null +++ b/go.mod @@ -0,0 +1,9 @@ +module github.com/vikesh-raj/go-sentencepiece-encoder + +go 1.15 + +require ( + github.com/golang/protobuf v1.4.1 + golang.org/x/text v0.3.0 + google.golang.org/protobuf v1.25.0 +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..0e55cb5 --- /dev/null +++ b/go.sum @@ -0,0 +1,72 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1 h1:ZFgWrT+bLgsYPirOnRfKLYJLvssAegOj/hgyMFdJZe0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/sentencepiece/normalize.go b/sentencepiece/normalize.go new file mode 100644 index 0000000..d3c1875 --- /dev/null +++ b/sentencepiece/normalize.go @@ -0,0 +1,62 @@ +package sentencepiece + +import ( + "strings" + "unicode" + + "golang.org/x/text/unicode/norm" +) + +func sanitize(s string) string { + return norm.NFKC.String(s) +} + +func normalize(s string) string { + replacer := func(r rune) rune { + if isControl(r) || r == 0 { + return -1 + } + if unicode.IsSpace(r) { + return ' ' + } + return r + } + return sanitize(strings.Map(replacer, s)) +} + +var controlChars []rune = []rune{ + 0x007F, 0x00AD, 0x0600, 0x0601, 0x0602, 0x0603, 0x0604, 0x0605, 0x061C, 0x06DD, 0x070F, + 0x08E2, 0x180E, 0x200B, 0x200C, 0x200D, 0x200E, 0x200F, 0x202A, 0x202B, 0x202C, 0x202D, + 0x202E, 0x2060, 0x2061, 0x2062, 0x2063, 0x2064, 0x2066, 0x2067, 0x2068, 0x2069, 0x206A, + 0x206B, 0x206C, 0x206D, 0x206E, 0x206F, 0xFEFF, 0xFFF9, 0xFFFA, 0xFFFB, 0x110BD, + 0x110CD, 0x13430, 0x13431, 0x13432, 0x13433, 0x13434, 0x13435, 0x13436, 0x13437, + 0x13438, 0x1BCA0, 0x1BCA1, 0x1BCA2, 0x1BCA3, 0x1D173, 0x1D174, 0x1D175, 0x1D176, + 0x1D177, 0x1D178, 0x1D179, 0x1D17A, 0xE0001, +} + +func isControl(c rune) bool { + if c == ' ' || c == '\n' || c == '\r' || c == '\t' { + return false + } + if c <= 0x001F || (c >= 0x0080 && c <= 0x009F) || + (c >= 0xE0020 && c <= 0xE007F) || + (c >= 0xE000 && c <= 0xF8FF) || + (c >= 0xF0000 && c <= 0xFFFFD) || + (c >= 0x100000 && c <= 0x10FFFD) || + (c >= 0xD800 && c <= 0xDB7F) || + (c >= 0xDB80 && c <= 0xDBFF) || + (c >= 0xDC00 && c <= 0xDFFF) || + isControlChar(c) { + return true + } + return false +} + +func isControlChar(c rune) bool { + for _, ch := range controlChars { + if ch == c { + return true + } + } + return false +} diff --git a/sentencepiece/sentencepiece.go b/sentencepiece/sentencepiece.go new file mode 100644 index 0000000..f6f4cb4 --- /dev/null +++ b/sentencepiece/sentencepiece.go @@ -0,0 +1,254 @@ +package sentencepiece + +import ( + "fmt" + "math" + "strings" + "unicode" + "unicode/utf8" +) + +const minScore float32 = -math.MaxFloat32 +const sep rune = 0x2581 +const unknown string = "" + +type slice struct { + score float32 + index int64 + start int + end int +} + +func findOffset(position int, q string) int { + count := 0 + for i := range q { + if count == position { + return i + } + } + return -1 +} + +func text(s slice, q string) string { + startOffset := findOffset(s.start, q) + endOffset := findOffset(s.end, q) + if startOffset == -1 || endOffset == -1 { + return "" + } + return q[startOffset:endOffset] +} + +type trieNode struct { + text string + level int + score float32 + index int64 + end bool + children map[rune]trieNode +} + +func newTrieNode(text string, level int) trieNode { + return trieNode{ + text: text, + level: level, + score: 0.0, + index: 0, + end: false, + children: make(map[rune]trieNode), + } +} + +// Sentencepiece holds the model +type Sentencepiece struct { + root trieNode + lowercase bool + unknown int64 +} + +// NewEmptySentencepiece creates an empty sentencepiece model +func NewEmptySentencepiece(lowercase bool) Sentencepiece { + return Sentencepiece{root: newTrieNode("", 0), lowercase: lowercase} +} + +// SetUnknownIndex sets the index for the unknown id +func (s *Sentencepiece) SetUnknownIndex(index int64) { + s.unknown = index +} + +// Tokenize tokenizes text into pieces +func (s *Sentencepiece) Tokenize(text string) []Token { + text = normalize(text) + if s.lowercase { + text = strings.ToLower(text) + } + runes := torunes(text) + replaceWhiteSpace(runes) + slices := s.decodeForwardToken(runes) + slices = s.decodeBackwards(slices) + offsets := s.sliceToTokens(slices) + tokens := makeTokens(offsets, runes) + return tokens +} + +// TokenizeToIDs tokenizes text into ids from the vocab +func (s *Sentencepiece) TokenizeToIDs(text string) []int64 { + tokens := s.Tokenize(text) + ids := make([]int64, len(tokens)) + for i, token := range tokens { + ids[i] = token.ID + } + return ids +} + +func (s *Sentencepiece) insert(word string, score float32, index int64) { + _, size := utf8.DecodeLastRuneInString(word) + charCount := len(word) + node := &s.root + for i, r := range word { + text := node.text + cnode, ok := node.children[r] + if !ok { + newText := addChar(text, r) + cnode = newTrieNode(newText, node.level+1) + } + if i == charCount-size { + cnode.end = true + cnode.score = score + cnode.index = index + } + node.children[r] = cnode + node = &cnode + } +} + +func (s *Sentencepiece) commonPrefixSearch(runes []rune) []trieNode { + output := make([]trieNode, 0, len(runes)) + node := &s.root + for _, r := range runes { + cnode, ok := node.children[r] + if !ok { + break + } + if cnode.end { + output = append(output, cnode) + } + node = &cnode + } + return output +} + +func (s *Sentencepiece) decodeBackwards(slices []slice) []slice { + best := make([]slice, len(slices)) + len := len(slices) - 1 + i := len + index := len + for ; i >= 0; i-- { + s := slices[index] + if s.start == -1 { + i++ + break + } + best[i] = s + index = s.start + } + return best[i : len+1] +} + +func (s *Sentencepiece) decodeForwardToken(runes []rune) []slice { + scores := initScores(len(runes) + 1) + slices := s.initSlices(len(runes) + 1) + scores[0] = 0.0 + for i := range runes { + matches := s.commonPrefixSearch(runes[i:]) + for _, node := range matches { + localScore := scores[i] + node.score + charEnd := i + node.level + if localScore > scores[charEnd] { + slices[charEnd] = slice{score: localScore, index: node.index, start: i, end: charEnd} + scores[charEnd] = localScore + } + } + if scores[i+1] <= minScore { + slices[i+1] = slice{score: minScore, index: s.unknown, start: i, end: i + 1} + scores[i+1] = 0.0 + } + } + return slices +} + +func (s *Sentencepiece) sliceToTokens(slices []slice) []tokenOffset { + tokens := make([]tokenOffset, 0, len(slices)+1) + isPrevUnknown := false + for _, slice := range slices { + if isPrevUnknown && slice.index == s.unknown { + prevToken := tokens[len(tokens)-1] + prevToken.end = slice.end + } else { + tokens = append(tokens, tokenOffset{id: slice.index, start: slice.start, end: slice.end}) + } + isPrevUnknown = slice.index == s.unknown + } + return tokens +} + +func initScores(len int) []float32 { + scores := make([]float32, len) + for i := range scores { + scores[i] = minScore + } + return scores +} + +func (s *Sentencepiece) initSlices(len int) []slice { + slices := make([]slice, len) + for i := range slices { + slices[i].start = -1 + slices[i].index = s.unknown + } + return slices +} + +func replaceWhiteSpace(runes []rune) { + for i, r := range runes { + if unicode.IsSpace(r) { + runes[i] = sep + } + } +} + +func replaceSeperator(s string) string { + replacer := func(r rune) rune { + if r == sep { + return ' ' + } + return r + } + return strings.Map(replacer, s) +} + +func torunes(text string) []rune { + runes := make([]rune, 0, len(text)+1) + + first, _ := utf8.DecodeRuneInString(text) + if first != sep { + runes = append(runes, sep) + } + + for _, r := range text { + runes = append(runes, r) + } + + return runes +} + +func makeTokens(offsets []tokenOffset, runes []rune) []Token { + tokens := make([]Token, len(offsets)) + for i, offset := range offsets { + tokens[i] = Token{ID: offset.id, Text: string(runes[offset.start:offset.end])} + } + return tokens +} + +func addChar(s string, r rune) string { + return fmt.Sprintf("%s%c", s, r) +} diff --git a/sentencepiece/sentencepiece_model.pb.go b/sentencepiece/sentencepiece_model.pb.go new file mode 100644 index 0000000..577b203 --- /dev/null +++ b/sentencepiece/sentencepiece_model.pb.go @@ -0,0 +1,1453 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.! + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.25.0 +// protoc v3.12.4 +// source: sentencepiece/sentencepiece_model.proto + +package sentencepiece + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoiface "google.golang.org/protobuf/runtime/protoiface" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Model type. only have UNIGRAM now. +type TrainerSpec_ModelType int32 + +const ( + TrainerSpec_UNIGRAM TrainerSpec_ModelType = 1 // Unigram language model with dynamic algorithm + TrainerSpec_BPE TrainerSpec_ModelType = 2 // Byte Pair Encoding + TrainerSpec_WORD TrainerSpec_ModelType = 3 // Delimitered by whitespace. + TrainerSpec_CHAR TrainerSpec_ModelType = 4 // tokenizes into character sequence +) + +// Enum value maps for TrainerSpec_ModelType. +var ( + TrainerSpec_ModelType_name = map[int32]string{ + 1: "UNIGRAM", + 2: "BPE", + 3: "WORD", + 4: "CHAR", + } + TrainerSpec_ModelType_value = map[string]int32{ + "UNIGRAM": 1, + "BPE": 2, + "WORD": 3, + "CHAR": 4, + } +) + +func (x TrainerSpec_ModelType) Enum() *TrainerSpec_ModelType { + p := new(TrainerSpec_ModelType) + *p = x + return p +} + +func (x TrainerSpec_ModelType) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (TrainerSpec_ModelType) Descriptor() protoreflect.EnumDescriptor { + return file_sentencepiece_sentencepiece_model_proto_enumTypes[0].Descriptor() +} + +func (TrainerSpec_ModelType) Type() protoreflect.EnumType { + return &file_sentencepiece_sentencepiece_model_proto_enumTypes[0] +} + +func (x TrainerSpec_ModelType) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *TrainerSpec_ModelType) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = TrainerSpec_ModelType(num) + return nil +} + +// Deprecated: Use TrainerSpec_ModelType.Descriptor instead. +func (TrainerSpec_ModelType) EnumDescriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{0, 0} +} + +type ModelProto_SentencePiece_Type int32 + +const ( + ModelProto_SentencePiece_NORMAL ModelProto_SentencePiece_Type = 1 // normal symbol + ModelProto_SentencePiece_UNKNOWN ModelProto_SentencePiece_Type = 2 // unknown symbol. only for now. + ModelProto_SentencePiece_CONTROL ModelProto_SentencePiece_Type = 3 // control symbols. , , <2ja> etc. + ModelProto_SentencePiece_USER_DEFINED ModelProto_SentencePiece_Type = 4 // user defined symbols. + // Typical usage of USER_DEFINED symbol + // is placeholder. + ModelProto_SentencePiece_BYTE ModelProto_SentencePiece_Type = 6 // byte symbols. Used when `byte_fallback` is true. + ModelProto_SentencePiece_UNUSED ModelProto_SentencePiece_Type = 5 // this piece is not used. +) + +// Enum value maps for ModelProto_SentencePiece_Type. +var ( + ModelProto_SentencePiece_Type_name = map[int32]string{ + 1: "NORMAL", + 2: "UNKNOWN", + 3: "CONTROL", + 4: "USER_DEFINED", + 6: "BYTE", + 5: "UNUSED", + } + ModelProto_SentencePiece_Type_value = map[string]int32{ + "NORMAL": 1, + "UNKNOWN": 2, + "CONTROL": 3, + "USER_DEFINED": 4, + "BYTE": 6, + "UNUSED": 5, + } +) + +func (x ModelProto_SentencePiece_Type) Enum() *ModelProto_SentencePiece_Type { + p := new(ModelProto_SentencePiece_Type) + *p = x + return p +} + +func (x ModelProto_SentencePiece_Type) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ModelProto_SentencePiece_Type) Descriptor() protoreflect.EnumDescriptor { + return file_sentencepiece_sentencepiece_model_proto_enumTypes[1].Descriptor() +} + +func (ModelProto_SentencePiece_Type) Type() protoreflect.EnumType { + return &file_sentencepiece_sentencepiece_model_proto_enumTypes[1] +} + +func (x ModelProto_SentencePiece_Type) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Do not use. +func (x *ModelProto_SentencePiece_Type) UnmarshalJSON(b []byte) error { + num, err := protoimpl.X.UnmarshalJSONEnum(x.Descriptor(), b) + if err != nil { + return err + } + *x = ModelProto_SentencePiece_Type(num) + return nil +} + +// Deprecated: Use ModelProto_SentencePiece_Type.Descriptor instead. +func (ModelProto_SentencePiece_Type) EnumDescriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{3, 0, 0} +} + +// BEGIN GOOGLE-INTERNAL +// LINT.IfChange +// END GOOGLE-INTERNAL +// TrainerSpec encodes a various parameters for SentencePiece training. +type TrainerSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + /////////////////////////////////////////////////////////////////// + // General parameters + // + // Input corpus files. + // Trainer accepts the following two formats: + // A) Monolingual: plain text, one sentence per line. + // B) Bilingual: TSV, source sentence target sentence + // When bilingual data is passed, shared vocabulary model is built. + // Note that the input file must be raw corpus, not a preprocessed corpus. + // Trainer only loads the first `input_sentence_size` sentences specified + // with this parameter. + Input []string `protobuf:"bytes,1,rep,name=input" json:"input,omitempty"` + // Input corpus format: + // "text": one-sentence-per-line text format (default) + // "tsv": sentence freq + InputFormat *string `protobuf:"bytes,7,opt,name=input_format,json=inputFormat" json:"input_format,omitempty"` + // Output model file prefix. + // .model and .vocab are generated. + ModelPrefix *string `protobuf:"bytes,2,opt,name=model_prefix,json=modelPrefix" json:"model_prefix,omitempty"` + ModelType *TrainerSpec_ModelType `protobuf:"varint,3,opt,name=model_type,json=modelType,enum=sentencepiece.TrainerSpec_ModelType,def=1" json:"model_type,omitempty"` + // Vocabulary size. 8k is the default size. + VocabSize *int32 `protobuf:"varint,4,opt,name=vocab_size,json=vocabSize,def=8000" json:"vocab_size,omitempty"` + // List of the languages this model can accept. + // Since the model is language-agnostic, this field is used as a reference. + AcceptLanguage []string `protobuf:"bytes,5,rep,name=accept_language,json=acceptLanguage" json:"accept_language,omitempty"` + // Size of self-test samples, which are encoded in the model file. + SelfTestSampleSize *int32 `protobuf:"varint,6,opt,name=self_test_sample_size,json=selfTestSampleSize,def=0" json:"self_test_sample_size,omitempty"` + /////////////////////////////////////////////////////////////////// + // Training parameters. + // + // Uses characters which cover the corpus with the ratio of `chars_coverage`. + // This parameter determines the set of basic Alphabet of sentence piece. + // 1.0 - `chars_coverage` characters are treated as UNK. + // See also required_chars field. + CharacterCoverage *float32 `protobuf:"fixed32,10,opt,name=character_coverage,json=characterCoverage,def=0.9995" json:"character_coverage,omitempty"` + // Maximum size of sentences the trainer loads from `input` parameter. + // Trainer simply loads the `input` files in sequence. + // It is better to shuffle the input corpus randomly. + InputSentenceSize *int32 `protobuf:"varint,11,opt,name=input_sentence_size,json=inputSentenceSize,def=0" json:"input_sentence_size,omitempty"` + ShuffleInputSentence *bool `protobuf:"varint,19,opt,name=shuffle_input_sentence,json=shuffleInputSentence,def=1" json:"shuffle_input_sentence,omitempty"` + // Maximum size of sentences to make seed sentence pieces. + // Extended suffix array is constructed to extract frequent + // sub-strings from the corpus. This uses 20N working space, + // where N is the size of corpus. + // + // Deprecated: Do not use. + MiningSentenceSize *int32 `protobuf:"varint,12,opt,name=mining_sentence_size,json=miningSentenceSize" json:"mining_sentence_size,omitempty"` + // Maximum size of sentences to train sentence pieces. + // + // Deprecated: Do not use. + TrainingSentenceSize *int32 `protobuf:"varint,13,opt,name=training_sentence_size,json=trainingSentenceSize" json:"training_sentence_size,omitempty"` + // The size of seed sentencepieces. + // `seed_sentencepiece_size` must be larger than `vocab_size`. + SeedSentencepieceSize *int32 `protobuf:"varint,14,opt,name=seed_sentencepiece_size,json=seedSentencepieceSize,def=1000000" json:"seed_sentencepiece_size,omitempty"` + // In every EM sub-iterations, keeps top + // `shrinking_factor` * `current sentencepieces size` with respect to + // the loss of the sentence piece. This value should be smaller than 1.0. + ShrinkingFactor *float32 `protobuf:"fixed32,15,opt,name=shrinking_factor,json=shrinkingFactor,def=0.75" json:"shrinking_factor,omitempty"` + // The maximum sentence length in byte. The sentences with the length + // larger than `max_sentence_length` is simply ignored. + // Longer input tends to bring the following risks: + // * Overflow during EM training (unigram language model only) + // * Performance drop because of O(n log n) cost in BPE. + MaxSentenceLength *int32 `protobuf:"varint,18,opt,name=max_sentence_length,json=maxSentenceLength,def=4192" json:"max_sentence_length,omitempty"` + // Number of threads in the training. + NumThreads *int32 `protobuf:"varint,16,opt,name=num_threads,json=numThreads,def=16" json:"num_threads,omitempty"` + // Number of EM sub iterations. + NumSubIterations *int32 `protobuf:"varint,17,opt,name=num_sub_iterations,json=numSubIterations,def=2" json:"num_sub_iterations,omitempty"` + /////////////////////////////////////////////////////////////////// + // SentencePiece parameters which control the shapes of sentence piece. + // + // Maximum length of sentencepiece. + MaxSentencepieceLength *int32 `protobuf:"varint,20,opt,name=max_sentencepiece_length,json=maxSentencepieceLength,def=16" json:"max_sentencepiece_length,omitempty"` + // Uses Unicode script to split sentence pieces. + // When `split_by_unicode_script` is true, we do not allow sentence piece to + // include multiple Unicode scripts, e.g. "F1" is not a valid piece. + // Exception: CJ characters (Hiragana/Katakana/Han) are all handled + // as one script type, since Japanese word can consist of multiple scripts. + // This exception is always applied regardless of the accept-language + // parameter. + SplitByUnicodeScript *bool `protobuf:"varint,21,opt,name=split_by_unicode_script,json=splitByUnicodeScript,def=1" json:"split_by_unicode_script,omitempty"` + // When `split_by_number` is true, put a boundary between number and + // non-number transition. If we want to treat "F1" is one token, set this flag + // to be false. + SplitByNumber *bool `protobuf:"varint,23,opt,name=split_by_number,json=splitByNumber,def=1" json:"split_by_number,omitempty"` + // Use a white space to split sentence pieces. + // When `split_by_whitespace` is false, we may have the piece containing + // a white space in the middle. e.g., "in_the". + SplitByWhitespace *bool `protobuf:"varint,22,opt,name=split_by_whitespace,json=splitByWhitespace,def=1" json:"split_by_whitespace,omitempty"` + // Adds whitespace symbol (_) as a suffix instead of prefix. e.g., _hello => + // hello_. When `treat_whitespace_as_suffix` is true, + // NormalizerSpec::add_dummy_prefix will add the dummy whitespace to the end + // of sentence. + TreatWhitespaceAsSuffix *bool `protobuf:"varint,24,opt,name=treat_whitespace_as_suffix,json=treatWhitespaceAsSuffix,def=0" json:"treat_whitespace_as_suffix,omitempty"` + // Split all digits (0-9) into separate pieces. + SplitDigits *bool `protobuf:"varint,25,opt,name=split_digits,json=splitDigits,def=0" json:"split_digits,omitempty"` + /////////////////////////////////////////////////////////////////// + // Vocabulary management + // + // Defines control symbols used as an indicator to + // change the behavior of the decoder. and are pre-defined. + // We can use this field to encode various meta information, + // including language indicator in multilingual model. + // These symbols are not visible to users, but visible to + // the decoder. Note that when the input sentence contains control symbols, + // they are not treated as one token, but segmented into normal pieces. + // Control symbols must be inserted independently from the segmentation. + ControlSymbols []string `protobuf:"bytes,30,rep,name=control_symbols,json=controlSymbols" json:"control_symbols,omitempty"` + // Defines user defined symbols. + // These symbols are added with extremely high score + // so they are always treated as one unique symbol in any context. + // Typical usage of user_defined_symbols is placeholder for named entities. + UserDefinedSymbols []string `protobuf:"bytes,31,rep,name=user_defined_symbols,json=userDefinedSymbols" json:"user_defined_symbols,omitempty"` + // Defines required characters. Each UTF8 character in this string is included + // in the character set regardless of character_coverage value. Unlike + // user_defined_symbols, these characters have scores based on the frequency + // on input sentences, and the model can form subwords using characters + // in this field. + RequiredChars *string `protobuf:"bytes,36,opt,name=required_chars,json=requiredChars" json:"required_chars,omitempty"` + // Decomposes unknown pieces into UTF-8 bytes. + ByteFallback *bool `protobuf:"varint,35,opt,name=byte_fallback,json=byteFallback,def=0" json:"byte_fallback,omitempty"` + // When creating the vocabulary file, defines whether or not to additionally + // output the score for each piece. + VocabularyOutputPieceScore *bool `protobuf:"varint,32,opt,name=vocabulary_output_piece_score,json=vocabularyOutputPieceScore,def=1" json:"vocabulary_output_piece_score,omitempty"` + // `vocab_size` is treated as hard limit. Crash if + // the model can not produce the vocab of size `vocab_size`, + // When `hard_vocab_limit` is false, vocab_size is treated + // as soft limit. Note that when model_type=char, + // always assumes hard_vocab_limit = false. + HardVocabLimit *bool `protobuf:"varint,33,opt,name=hard_vocab_limit,json=hardVocabLimit,def=1" json:"hard_vocab_limit,omitempty"` + // use all symbols for vocab extraction. This flag is valid + // if model type is either CHAR or WORD + UseAllVocab *bool `protobuf:"varint,34,opt,name=use_all_vocab,json=useAllVocab,def=0" json:"use_all_vocab,omitempty"` + /////////////////////////////////////////////////////////////////// + // Reserved special meta tokens. + // * -1 is not used. + // * unk_id must not be -1. + // Id must starts with 0 and be contigous. + UnkId *int32 `protobuf:"varint,40,opt,name=unk_id,json=unkId,def=0" json:"unk_id,omitempty"` // + BosId *int32 `protobuf:"varint,41,opt,name=bos_id,json=bosId,def=1" json:"bos_id,omitempty"` // + EosId *int32 `protobuf:"varint,42,opt,name=eos_id,json=eosId,def=2" json:"eos_id,omitempty"` // + PadId *int32 `protobuf:"varint,43,opt,name=pad_id,json=padId,def=-1" json:"pad_id,omitempty"` // (padding) + UnkPiece *string `protobuf:"bytes,45,opt,name=unk_piece,json=unkPiece,def=" json:"unk_piece,omitempty"` + BosPiece *string `protobuf:"bytes,46,opt,name=bos_piece,json=bosPiece,def=" json:"bos_piece,omitempty"` + EosPiece *string `protobuf:"bytes,47,opt,name=eos_piece,json=eosPiece,def=" json:"eos_piece,omitempty"` + PadPiece *string `protobuf:"bytes,48,opt,name=pad_piece,json=padPiece,def=" json:"pad_piece,omitempty"` + // Encodes into U+2047 (DOUBLE QUESTION MARK), + // since this character can be useful both for user and + // developer. We can easily figure out that is emitted. + UnkSurface *string `protobuf:"bytes,44,opt,name=unk_surface,json=unkSurface,def= ⁇ " json:"unk_surface,omitempty"` + // Increase bit depth to allow unigram model training on large + // (>10M sentences) corpora. A Side-effect of enabling this flag + // is increased memory usage. + TrainExtremelyLargeCorpus *bool `protobuf:"varint,49,opt,name=train_extremely_large_corpus,json=trainExtremelyLargeCorpus,def=0" json:"train_extremely_large_corpus,omitempty"` +} + +// Default values for TrainerSpec fields. +const ( + Default_TrainerSpec_ModelType = TrainerSpec_UNIGRAM + Default_TrainerSpec_VocabSize = int32(8000) + Default_TrainerSpec_SelfTestSampleSize = int32(0) + Default_TrainerSpec_CharacterCoverage = float32(0.9994999766349792) + Default_TrainerSpec_InputSentenceSize = int32(0) + Default_TrainerSpec_ShuffleInputSentence = bool(true) + Default_TrainerSpec_SeedSentencepieceSize = int32(1000000) + Default_TrainerSpec_ShrinkingFactor = float32(0.75) + Default_TrainerSpec_MaxSentenceLength = int32(4192) + Default_TrainerSpec_NumThreads = int32(16) + Default_TrainerSpec_NumSubIterations = int32(2) + Default_TrainerSpec_MaxSentencepieceLength = int32(16) + Default_TrainerSpec_SplitByUnicodeScript = bool(true) + Default_TrainerSpec_SplitByNumber = bool(true) + Default_TrainerSpec_SplitByWhitespace = bool(true) + Default_TrainerSpec_TreatWhitespaceAsSuffix = bool(false) + Default_TrainerSpec_SplitDigits = bool(false) + Default_TrainerSpec_ByteFallback = bool(false) + Default_TrainerSpec_VocabularyOutputPieceScore = bool(true) + Default_TrainerSpec_HardVocabLimit = bool(true) + Default_TrainerSpec_UseAllVocab = bool(false) + Default_TrainerSpec_UnkId = int32(0) + Default_TrainerSpec_BosId = int32(1) + Default_TrainerSpec_EosId = int32(2) + Default_TrainerSpec_PadId = int32(-1) + Default_TrainerSpec_UnkPiece = string("") + Default_TrainerSpec_BosPiece = string("") + Default_TrainerSpec_EosPiece = string("") + Default_TrainerSpec_PadPiece = string("") + Default_TrainerSpec_UnkSurface = string(" ⁇ ") + Default_TrainerSpec_TrainExtremelyLargeCorpus = bool(false) +) + +func (x *TrainerSpec) Reset() { + *x = TrainerSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TrainerSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TrainerSpec) ProtoMessage() {} + +func (x *TrainerSpec) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TrainerSpec.ProtoReflect.Descriptor instead. +func (*TrainerSpec) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{0} +} + +var extRange_TrainerSpec = []protoiface.ExtensionRangeV1{ + {Start: 200, End: 536870911}, +} + +// Deprecated: Use TrainerSpec.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*TrainerSpec) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_TrainerSpec +} + +func (x *TrainerSpec) GetInput() []string { + if x != nil { + return x.Input + } + return nil +} + +func (x *TrainerSpec) GetInputFormat() string { + if x != nil && x.InputFormat != nil { + return *x.InputFormat + } + return "" +} + +func (x *TrainerSpec) GetModelPrefix() string { + if x != nil && x.ModelPrefix != nil { + return *x.ModelPrefix + } + return "" +} + +func (x *TrainerSpec) GetModelType() TrainerSpec_ModelType { + if x != nil && x.ModelType != nil { + return *x.ModelType + } + return Default_TrainerSpec_ModelType +} + +func (x *TrainerSpec) GetVocabSize() int32 { + if x != nil && x.VocabSize != nil { + return *x.VocabSize + } + return Default_TrainerSpec_VocabSize +} + +func (x *TrainerSpec) GetAcceptLanguage() []string { + if x != nil { + return x.AcceptLanguage + } + return nil +} + +func (x *TrainerSpec) GetSelfTestSampleSize() int32 { + if x != nil && x.SelfTestSampleSize != nil { + return *x.SelfTestSampleSize + } + return Default_TrainerSpec_SelfTestSampleSize +} + +func (x *TrainerSpec) GetCharacterCoverage() float32 { + if x != nil && x.CharacterCoverage != nil { + return *x.CharacterCoverage + } + return Default_TrainerSpec_CharacterCoverage +} + +func (x *TrainerSpec) GetInputSentenceSize() int32 { + if x != nil && x.InputSentenceSize != nil { + return *x.InputSentenceSize + } + return Default_TrainerSpec_InputSentenceSize +} + +func (x *TrainerSpec) GetShuffleInputSentence() bool { + if x != nil && x.ShuffleInputSentence != nil { + return *x.ShuffleInputSentence + } + return Default_TrainerSpec_ShuffleInputSentence +} + +// Deprecated: Do not use. +func (x *TrainerSpec) GetMiningSentenceSize() int32 { + if x != nil && x.MiningSentenceSize != nil { + return *x.MiningSentenceSize + } + return 0 +} + +// Deprecated: Do not use. +func (x *TrainerSpec) GetTrainingSentenceSize() int32 { + if x != nil && x.TrainingSentenceSize != nil { + return *x.TrainingSentenceSize + } + return 0 +} + +func (x *TrainerSpec) GetSeedSentencepieceSize() int32 { + if x != nil && x.SeedSentencepieceSize != nil { + return *x.SeedSentencepieceSize + } + return Default_TrainerSpec_SeedSentencepieceSize +} + +func (x *TrainerSpec) GetShrinkingFactor() float32 { + if x != nil && x.ShrinkingFactor != nil { + return *x.ShrinkingFactor + } + return Default_TrainerSpec_ShrinkingFactor +} + +func (x *TrainerSpec) GetMaxSentenceLength() int32 { + if x != nil && x.MaxSentenceLength != nil { + return *x.MaxSentenceLength + } + return Default_TrainerSpec_MaxSentenceLength +} + +func (x *TrainerSpec) GetNumThreads() int32 { + if x != nil && x.NumThreads != nil { + return *x.NumThreads + } + return Default_TrainerSpec_NumThreads +} + +func (x *TrainerSpec) GetNumSubIterations() int32 { + if x != nil && x.NumSubIterations != nil { + return *x.NumSubIterations + } + return Default_TrainerSpec_NumSubIterations +} + +func (x *TrainerSpec) GetMaxSentencepieceLength() int32 { + if x != nil && x.MaxSentencepieceLength != nil { + return *x.MaxSentencepieceLength + } + return Default_TrainerSpec_MaxSentencepieceLength +} + +func (x *TrainerSpec) GetSplitByUnicodeScript() bool { + if x != nil && x.SplitByUnicodeScript != nil { + return *x.SplitByUnicodeScript + } + return Default_TrainerSpec_SplitByUnicodeScript +} + +func (x *TrainerSpec) GetSplitByNumber() bool { + if x != nil && x.SplitByNumber != nil { + return *x.SplitByNumber + } + return Default_TrainerSpec_SplitByNumber +} + +func (x *TrainerSpec) GetSplitByWhitespace() bool { + if x != nil && x.SplitByWhitespace != nil { + return *x.SplitByWhitespace + } + return Default_TrainerSpec_SplitByWhitespace +} + +func (x *TrainerSpec) GetTreatWhitespaceAsSuffix() bool { + if x != nil && x.TreatWhitespaceAsSuffix != nil { + return *x.TreatWhitespaceAsSuffix + } + return Default_TrainerSpec_TreatWhitespaceAsSuffix +} + +func (x *TrainerSpec) GetSplitDigits() bool { + if x != nil && x.SplitDigits != nil { + return *x.SplitDigits + } + return Default_TrainerSpec_SplitDigits +} + +func (x *TrainerSpec) GetControlSymbols() []string { + if x != nil { + return x.ControlSymbols + } + return nil +} + +func (x *TrainerSpec) GetUserDefinedSymbols() []string { + if x != nil { + return x.UserDefinedSymbols + } + return nil +} + +func (x *TrainerSpec) GetRequiredChars() string { + if x != nil && x.RequiredChars != nil { + return *x.RequiredChars + } + return "" +} + +func (x *TrainerSpec) GetByteFallback() bool { + if x != nil && x.ByteFallback != nil { + return *x.ByteFallback + } + return Default_TrainerSpec_ByteFallback +} + +func (x *TrainerSpec) GetVocabularyOutputPieceScore() bool { + if x != nil && x.VocabularyOutputPieceScore != nil { + return *x.VocabularyOutputPieceScore + } + return Default_TrainerSpec_VocabularyOutputPieceScore +} + +func (x *TrainerSpec) GetHardVocabLimit() bool { + if x != nil && x.HardVocabLimit != nil { + return *x.HardVocabLimit + } + return Default_TrainerSpec_HardVocabLimit +} + +func (x *TrainerSpec) GetUseAllVocab() bool { + if x != nil && x.UseAllVocab != nil { + return *x.UseAllVocab + } + return Default_TrainerSpec_UseAllVocab +} + +func (x *TrainerSpec) GetUnkId() int32 { + if x != nil && x.UnkId != nil { + return *x.UnkId + } + return Default_TrainerSpec_UnkId +} + +func (x *TrainerSpec) GetBosId() int32 { + if x != nil && x.BosId != nil { + return *x.BosId + } + return Default_TrainerSpec_BosId +} + +func (x *TrainerSpec) GetEosId() int32 { + if x != nil && x.EosId != nil { + return *x.EosId + } + return Default_TrainerSpec_EosId +} + +func (x *TrainerSpec) GetPadId() int32 { + if x != nil && x.PadId != nil { + return *x.PadId + } + return Default_TrainerSpec_PadId +} + +func (x *TrainerSpec) GetUnkPiece() string { + if x != nil && x.UnkPiece != nil { + return *x.UnkPiece + } + return Default_TrainerSpec_UnkPiece +} + +func (x *TrainerSpec) GetBosPiece() string { + if x != nil && x.BosPiece != nil { + return *x.BosPiece + } + return Default_TrainerSpec_BosPiece +} + +func (x *TrainerSpec) GetEosPiece() string { + if x != nil && x.EosPiece != nil { + return *x.EosPiece + } + return Default_TrainerSpec_EosPiece +} + +func (x *TrainerSpec) GetPadPiece() string { + if x != nil && x.PadPiece != nil { + return *x.PadPiece + } + return Default_TrainerSpec_PadPiece +} + +func (x *TrainerSpec) GetUnkSurface() string { + if x != nil && x.UnkSurface != nil { + return *x.UnkSurface + } + return Default_TrainerSpec_UnkSurface +} + +func (x *TrainerSpec) GetTrainExtremelyLargeCorpus() bool { + if x != nil && x.TrainExtremelyLargeCorpus != nil { + return *x.TrainExtremelyLargeCorpus + } + return Default_TrainerSpec_TrainExtremelyLargeCorpus +} + +// NormalizerSpec encodes a various parameters for string normalizaiton +type NormalizerSpec struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // name of normalization rule. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Pre-compiled normalization rule created by + // Builder::GetPrecompiledCharsMap() or Builder::CompileCharsMap() method. + // Usually this field is set by Builder::GetNormalizerSpec() method. + PrecompiledCharsmap []byte `protobuf:"bytes,2,opt,name=precompiled_charsmap,json=precompiledCharsmap" json:"precompiled_charsmap,omitempty"` + // Adds dummy whitespace at the beginning of text in order to + // treat "world" in "world" and "hello world" in the same way. + AddDummyPrefix *bool `protobuf:"varint,3,opt,name=add_dummy_prefix,json=addDummyPrefix,def=1" json:"add_dummy_prefix,omitempty"` + // Removes leading, trailing, and duplicate internal whitespace. + RemoveExtraWhitespaces *bool `protobuf:"varint,4,opt,name=remove_extra_whitespaces,json=removeExtraWhitespaces,def=1" json:"remove_extra_whitespaces,omitempty"` + // Replaces whitespace with meta symbol. + // This field must be true to train sentence piece model. + EscapeWhitespaces *bool `protobuf:"varint,5,opt,name=escape_whitespaces,json=escapeWhitespaces,def=1" json:"escape_whitespaces,omitempty"` + // Custom normalization rule file in TSV format. + // https://github.com/google/sentencepiece/blob/master/doc/normalization.md + // This field is only used in SentencePieceTrainer::Train() method, which + // compiles the rule into the binary rule stored in `precompiled_charsmap`. + NormalizationRuleTsv *string `protobuf:"bytes,6,opt,name=normalization_rule_tsv,json=normalizationRuleTsv" json:"normalization_rule_tsv,omitempty"` +} + +// Default values for NormalizerSpec fields. +const ( + Default_NormalizerSpec_AddDummyPrefix = bool(true) + Default_NormalizerSpec_RemoveExtraWhitespaces = bool(true) + Default_NormalizerSpec_EscapeWhitespaces = bool(true) +) + +func (x *NormalizerSpec) Reset() { + *x = NormalizerSpec{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *NormalizerSpec) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*NormalizerSpec) ProtoMessage() {} + +func (x *NormalizerSpec) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use NormalizerSpec.ProtoReflect.Descriptor instead. +func (*NormalizerSpec) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{1} +} + +var extRange_NormalizerSpec = []protoiface.ExtensionRangeV1{ + {Start: 200, End: 536870911}, +} + +// Deprecated: Use NormalizerSpec.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*NormalizerSpec) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_NormalizerSpec +} + +func (x *NormalizerSpec) GetName() string { + if x != nil && x.Name != nil { + return *x.Name + } + return "" +} + +func (x *NormalizerSpec) GetPrecompiledCharsmap() []byte { + if x != nil { + return x.PrecompiledCharsmap + } + return nil +} + +func (x *NormalizerSpec) GetAddDummyPrefix() bool { + if x != nil && x.AddDummyPrefix != nil { + return *x.AddDummyPrefix + } + return Default_NormalizerSpec_AddDummyPrefix +} + +func (x *NormalizerSpec) GetRemoveExtraWhitespaces() bool { + if x != nil && x.RemoveExtraWhitespaces != nil { + return *x.RemoveExtraWhitespaces + } + return Default_NormalizerSpec_RemoveExtraWhitespaces +} + +func (x *NormalizerSpec) GetEscapeWhitespaces() bool { + if x != nil && x.EscapeWhitespaces != nil { + return *x.EscapeWhitespaces + } + return Default_NormalizerSpec_EscapeWhitespaces +} + +func (x *NormalizerSpec) GetNormalizationRuleTsv() string { + if x != nil && x.NormalizationRuleTsv != nil { + return *x.NormalizationRuleTsv + } + return "" +} + +// Proto to store samples for self-testing. +type SelfTestData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + Samples []*SelfTestData_Sample `protobuf:"bytes,1,rep,name=samples" json:"samples,omitempty"` +} + +func (x *SelfTestData) Reset() { + *x = SelfTestData{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelfTestData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelfTestData) ProtoMessage() {} + +func (x *SelfTestData) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelfTestData.ProtoReflect.Descriptor instead. +func (*SelfTestData) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{2} +} + +var extRange_SelfTestData = []protoiface.ExtensionRangeV1{ + {Start: 200, End: 536870911}, +} + +// Deprecated: Use SelfTestData.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*SelfTestData) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_SelfTestData +} + +func (x *SelfTestData) GetSamples() []*SelfTestData_Sample { + if x != nil { + return x.Samples + } + return nil +} + +// ModelProto stores model parameters. +// SentencePieceProcessor is supposed to be self-contained. +// All settings/parameters which may change the behavior must be encoded +// in ModelProto. +type ModelProto struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + // Sentence pieces with scores. + Pieces []*ModelProto_SentencePiece `protobuf:"bytes,1,rep,name=pieces" json:"pieces,omitempty"` + // Spec used to generate this model file. + TrainerSpec *TrainerSpec `protobuf:"bytes,2,opt,name=trainer_spec,json=trainerSpec" json:"trainer_spec,omitempty"` + // Spec for text normalization. + NormalizerSpec *NormalizerSpec `protobuf:"bytes,3,opt,name=normalizer_spec,json=normalizerSpec" json:"normalizer_spec,omitempty"` + // Stores sample input and its expected segmentation to verify the model. + SelfTestData *SelfTestData `protobuf:"bytes,4,opt,name=self_test_data,json=selfTestData" json:"self_test_data,omitempty"` + // Spec for text de-normalization. + DenormalizerSpec *NormalizerSpec `protobuf:"bytes,5,opt,name=denormalizer_spec,json=denormalizerSpec" json:"denormalizer_spec,omitempty"` +} + +func (x *ModelProto) Reset() { + *x = ModelProto{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModelProto) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModelProto) ProtoMessage() {} + +func (x *ModelProto) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModelProto.ProtoReflect.Descriptor instead. +func (*ModelProto) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{3} +} + +var extRange_ModelProto = []protoiface.ExtensionRangeV1{ + {Start: 200, End: 536870911}, +} + +// Deprecated: Use ModelProto.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ModelProto) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ModelProto +} + +func (x *ModelProto) GetPieces() []*ModelProto_SentencePiece { + if x != nil { + return x.Pieces + } + return nil +} + +func (x *ModelProto) GetTrainerSpec() *TrainerSpec { + if x != nil { + return x.TrainerSpec + } + return nil +} + +func (x *ModelProto) GetNormalizerSpec() *NormalizerSpec { + if x != nil { + return x.NormalizerSpec + } + return nil +} + +func (x *ModelProto) GetSelfTestData() *SelfTestData { + if x != nil { + return x.SelfTestData + } + return nil +} + +func (x *ModelProto) GetDenormalizerSpec() *NormalizerSpec { + if x != nil { + return x.DenormalizerSpec + } + return nil +} + +type SelfTestData_Sample struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Input *string `protobuf:"bytes,1,opt,name=input" json:"input,omitempty"` + Expected *string `protobuf:"bytes,2,opt,name=expected" json:"expected,omitempty"` +} + +func (x *SelfTestData_Sample) Reset() { + *x = SelfTestData_Sample{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *SelfTestData_Sample) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*SelfTestData_Sample) ProtoMessage() {} + +func (x *SelfTestData_Sample) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use SelfTestData_Sample.ProtoReflect.Descriptor instead. +func (*SelfTestData_Sample) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{2, 0} +} + +func (x *SelfTestData_Sample) GetInput() string { + if x != nil && x.Input != nil { + return *x.Input + } + return "" +} + +func (x *SelfTestData_Sample) GetExpected() string { + if x != nil && x.Expected != nil { + return *x.Expected + } + return "" +} + +type ModelProto_SentencePiece struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + extensionFields protoimpl.ExtensionFields + + Piece *string `protobuf:"bytes,1,opt,name=piece" json:"piece,omitempty"` // piece must not be empty. + Score *float32 `protobuf:"fixed32,2,opt,name=score" json:"score,omitempty"` + Type *ModelProto_SentencePiece_Type `protobuf:"varint,3,opt,name=type,enum=sentencepiece.ModelProto_SentencePiece_Type,def=1" json:"type,omitempty"` +} + +// Default values for ModelProto_SentencePiece fields. +const ( + Default_ModelProto_SentencePiece_Type = ModelProto_SentencePiece_NORMAL +) + +func (x *ModelProto_SentencePiece) Reset() { + *x = ModelProto_SentencePiece{} + if protoimpl.UnsafeEnabled { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ModelProto_SentencePiece) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ModelProto_SentencePiece) ProtoMessage() {} + +func (x *ModelProto_SentencePiece) ProtoReflect() protoreflect.Message { + mi := &file_sentencepiece_sentencepiece_model_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ModelProto_SentencePiece.ProtoReflect.Descriptor instead. +func (*ModelProto_SentencePiece) Descriptor() ([]byte, []int) { + return file_sentencepiece_sentencepiece_model_proto_rawDescGZIP(), []int{3, 0} +} + +var extRange_ModelProto_SentencePiece = []protoiface.ExtensionRangeV1{ + {Start: 200, End: 536870911}, +} + +// Deprecated: Use ModelProto_SentencePiece.ProtoReflect.Descriptor.ExtensionRanges instead. +func (*ModelProto_SentencePiece) ExtensionRangeArray() []protoiface.ExtensionRangeV1 { + return extRange_ModelProto_SentencePiece +} + +func (x *ModelProto_SentencePiece) GetPiece() string { + if x != nil && x.Piece != nil { + return *x.Piece + } + return "" +} + +func (x *ModelProto_SentencePiece) GetScore() float32 { + if x != nil && x.Score != nil { + return *x.Score + } + return 0 +} + +func (x *ModelProto_SentencePiece) GetType() ModelProto_SentencePiece_Type { + if x != nil && x.Type != nil { + return *x.Type + } + return Default_ModelProto_SentencePiece_Type +} + +var File_sentencepiece_sentencepiece_model_proto protoreflect.FileDescriptor + +var file_sentencepiece_sentencepiece_model_proto_rawDesc = []byte{ + 0x0a, 0x27, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2f, + 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, 0x6d, 0x6f, + 0x64, 0x65, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0d, 0x73, 0x65, 0x6e, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x22, 0x96, 0x0f, 0x0a, 0x0b, 0x54, 0x72, 0x61, + 0x69, 0x6e, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x12, 0x21, + 0x0a, 0x0c, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x74, 0x18, 0x07, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x46, 0x6f, 0x72, 0x6d, 0x61, + 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, + 0x78, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x50, 0x72, + 0x65, 0x66, 0x69, 0x78, 0x12, 0x4c, 0x0a, 0x0a, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x24, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x65, + 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x54, 0x72, 0x61, 0x69, 0x6e, 0x65, 0x72, + 0x53, 0x70, 0x65, 0x63, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x07, + 0x55, 0x4e, 0x49, 0x47, 0x52, 0x41, 0x4d, 0x52, 0x09, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x54, 0x79, + 0x70, 0x65, 0x12, 0x23, 0x0a, 0x0a, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x5f, 0x73, 0x69, 0x7a, 0x65, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x04, 0x38, 0x30, 0x30, 0x30, 0x52, 0x09, 0x76, 0x6f, + 0x63, 0x61, 0x62, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x27, 0x0a, 0x0f, 0x61, 0x63, 0x63, 0x65, 0x70, + 0x74, 0x5f, 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x4c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, + 0x12, 0x34, 0x0a, 0x15, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x73, 0x61, + 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x05, 0x3a, + 0x01, 0x30, 0x52, 0x12, 0x73, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x53, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x35, 0x0a, 0x12, 0x63, 0x68, 0x61, 0x72, 0x61, 0x63, + 0x74, 0x65, 0x72, 0x5f, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x18, 0x0a, 0x20, 0x01, + 0x28, 0x02, 0x3a, 0x06, 0x30, 0x2e, 0x39, 0x39, 0x39, 0x35, 0x52, 0x11, 0x63, 0x68, 0x61, 0x72, + 0x61, 0x63, 0x74, 0x65, 0x72, 0x43, 0x6f, 0x76, 0x65, 0x72, 0x61, 0x67, 0x65, 0x12, 0x31, 0x0a, + 0x13, 0x69, 0x6e, 0x70, 0x75, 0x74, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x30, 0x52, 0x11, 0x69, + 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x53, 0x69, 0x7a, 0x65, + 0x12, 0x3a, 0x0a, 0x16, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x5f, 0x69, 0x6e, 0x70, 0x75, + 0x74, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x13, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x14, 0x73, 0x68, 0x75, 0x66, 0x66, 0x6c, 0x65, 0x49, + 0x6e, 0x70, 0x75, 0x74, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x34, 0x0a, 0x14, + 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, + 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x12, + 0x6d, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x53, 0x69, + 0x7a, 0x65, 0x12, 0x38, 0x0a, 0x16, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, 0x5f, 0x73, + 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0d, 0x20, 0x01, + 0x28, 0x05, 0x42, 0x02, 0x18, 0x01, 0x52, 0x14, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x69, 0x6e, 0x67, + 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x3f, 0x0a, 0x17, + 0x73, 0x65, 0x65, 0x64, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, + 0x63, 0x65, 0x5f, 0x73, 0x69, 0x7a, 0x65, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x07, 0x31, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x52, 0x15, 0x73, 0x65, 0x65, 0x64, 0x53, 0x65, 0x6e, 0x74, + 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x53, 0x69, 0x7a, 0x65, 0x12, 0x2f, 0x0a, + 0x10, 0x73, 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x69, 0x6e, 0x67, 0x5f, 0x66, 0x61, 0x63, 0x74, 0x6f, + 0x72, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x02, 0x3a, 0x04, 0x30, 0x2e, 0x37, 0x35, 0x52, 0x0f, 0x73, + 0x68, 0x72, 0x69, 0x6e, 0x6b, 0x69, 0x6e, 0x67, 0x46, 0x61, 0x63, 0x74, 0x6f, 0x72, 0x12, 0x34, + 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x5f, 0x6c, + 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x12, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x04, 0x34, 0x31, 0x39, + 0x32, 0x52, 0x11, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x4c, 0x65, + 0x6e, 0x67, 0x74, 0x68, 0x12, 0x23, 0x0a, 0x0b, 0x6e, 0x75, 0x6d, 0x5f, 0x74, 0x68, 0x72, 0x65, + 0x61, 0x64, 0x73, 0x18, 0x10, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x02, 0x31, 0x36, 0x52, 0x0a, 0x6e, + 0x75, 0x6d, 0x54, 0x68, 0x72, 0x65, 0x61, 0x64, 0x73, 0x12, 0x2f, 0x0a, 0x12, 0x6e, 0x75, 0x6d, + 0x5f, 0x73, 0x75, 0x62, 0x5f, 0x69, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, + 0x11, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x32, 0x52, 0x10, 0x6e, 0x75, 0x6d, 0x53, 0x75, 0x62, + 0x49, 0x74, 0x65, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x3c, 0x0a, 0x18, 0x6d, 0x61, + 0x78, 0x5f, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x5f, + 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x14, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x02, 0x31, 0x36, + 0x52, 0x16, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, + 0x63, 0x65, 0x4c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x12, 0x3b, 0x0a, 0x17, 0x73, 0x70, 0x6c, 0x69, + 0x74, 0x5f, 0x62, 0x79, 0x5f, 0x75, 0x6e, 0x69, 0x63, 0x6f, 0x64, 0x65, 0x5f, 0x73, 0x63, 0x72, + 0x69, 0x70, 0x74, 0x18, 0x15, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, + 0x14, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x42, 0x79, 0x55, 0x6e, 0x69, 0x63, 0x6f, 0x64, 0x65, 0x53, + 0x63, 0x72, 0x69, 0x70, 0x74, 0x12, 0x2c, 0x0a, 0x0f, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x62, + 0x79, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x17, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, + 0x74, 0x72, 0x75, 0x65, 0x52, 0x0d, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x42, 0x79, 0x4e, 0x75, 0x6d, + 0x62, 0x65, 0x72, 0x12, 0x34, 0x0a, 0x13, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x62, 0x79, 0x5f, + 0x77, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x16, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x11, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x42, 0x79, 0x57, + 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x42, 0x0a, 0x1a, 0x74, 0x72, 0x65, + 0x61, 0x74, 0x5f, 0x77, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x5f, 0x61, 0x73, + 0x5f, 0x73, 0x75, 0x66, 0x66, 0x69, 0x78, 0x18, 0x18, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x17, 0x74, 0x72, 0x65, 0x61, 0x74, 0x57, 0x68, 0x69, 0x74, 0x65, + 0x73, 0x70, 0x61, 0x63, 0x65, 0x41, 0x73, 0x53, 0x75, 0x66, 0x66, 0x69, 0x78, 0x12, 0x28, 0x0a, + 0x0c, 0x73, 0x70, 0x6c, 0x69, 0x74, 0x5f, 0x64, 0x69, 0x67, 0x69, 0x74, 0x73, 0x18, 0x19, 0x20, + 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x73, 0x70, 0x6c, 0x69, + 0x74, 0x44, 0x69, 0x67, 0x69, 0x74, 0x73, 0x12, 0x27, 0x0a, 0x0f, 0x63, 0x6f, 0x6e, 0x74, 0x72, + 0x6f, 0x6c, 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x18, 0x1e, 0x20, 0x03, 0x28, 0x09, + 0x52, 0x0e, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x6f, 0x6c, 0x53, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, + 0x12, 0x30, 0x0a, 0x14, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x64, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, + 0x5f, 0x73, 0x79, 0x6d, 0x62, 0x6f, 0x6c, 0x73, 0x18, 0x1f, 0x20, 0x03, 0x28, 0x09, 0x52, 0x12, + 0x75, 0x73, 0x65, 0x72, 0x44, 0x65, 0x66, 0x69, 0x6e, 0x65, 0x64, 0x53, 0x79, 0x6d, 0x62, 0x6f, + 0x6c, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x64, 0x5f, 0x63, + 0x68, 0x61, 0x72, 0x73, 0x18, 0x24, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x72, 0x65, 0x71, 0x75, + 0x69, 0x72, 0x65, 0x64, 0x43, 0x68, 0x61, 0x72, 0x73, 0x12, 0x2a, 0x0a, 0x0d, 0x62, 0x79, 0x74, + 0x65, 0x5f, 0x66, 0x61, 0x6c, 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x18, 0x23, 0x20, 0x01, 0x28, 0x08, + 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x46, 0x61, 0x6c, + 0x6c, 0x62, 0x61, 0x63, 0x6b, 0x12, 0x47, 0x0a, 0x1d, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x75, 0x6c, + 0x61, 0x72, 0x79, 0x5f, 0x6f, 0x75, 0x74, 0x70, 0x75, 0x74, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, + 0x5f, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x20, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, + 0x75, 0x65, 0x52, 0x1a, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x75, 0x6c, 0x61, 0x72, 0x79, 0x4f, 0x75, + 0x74, 0x70, 0x75, 0x74, 0x50, 0x69, 0x65, 0x63, 0x65, 0x53, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x2e, + 0x0a, 0x10, 0x68, 0x61, 0x72, 0x64, 0x5f, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x21, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, + 0x68, 0x61, 0x72, 0x64, 0x56, 0x6f, 0x63, 0x61, 0x62, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x29, + 0x0a, 0x0d, 0x75, 0x73, 0x65, 0x5f, 0x61, 0x6c, 0x6c, 0x5f, 0x76, 0x6f, 0x63, 0x61, 0x62, 0x18, + 0x22, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, 0x61, 0x6c, 0x73, 0x65, 0x52, 0x0b, 0x75, 0x73, + 0x65, 0x41, 0x6c, 0x6c, 0x56, 0x6f, 0x63, 0x61, 0x62, 0x12, 0x18, 0x0a, 0x06, 0x75, 0x6e, 0x6b, + 0x5f, 0x69, 0x64, 0x18, 0x28, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x30, 0x52, 0x05, 0x75, 0x6e, + 0x6b, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x06, 0x62, 0x6f, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x29, 0x20, + 0x01, 0x28, 0x05, 0x3a, 0x01, 0x31, 0x52, 0x05, 0x62, 0x6f, 0x73, 0x49, 0x64, 0x12, 0x18, 0x0a, + 0x06, 0x65, 0x6f, 0x73, 0x5f, 0x69, 0x64, 0x18, 0x2a, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x01, 0x32, + 0x52, 0x05, 0x65, 0x6f, 0x73, 0x49, 0x64, 0x12, 0x19, 0x0a, 0x06, 0x70, 0x61, 0x64, 0x5f, 0x69, + 0x64, 0x18, 0x2b, 0x20, 0x01, 0x28, 0x05, 0x3a, 0x02, 0x2d, 0x31, 0x52, 0x05, 0x70, 0x61, 0x64, + 0x49, 0x64, 0x12, 0x22, 0x0a, 0x09, 0x75, 0x6e, 0x6b, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x18, + 0x2d, 0x20, 0x01, 0x28, 0x09, 0x3a, 0x05, 0x3c, 0x75, 0x6e, 0x6b, 0x3e, 0x52, 0x08, 0x75, 0x6e, + 0x6b, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, 0x20, 0x0a, 0x09, 0x62, 0x6f, 0x73, 0x5f, 0x70, 0x69, + 0x65, 0x63, 0x65, 0x18, 0x2e, 0x20, 0x01, 0x28, 0x09, 0x3a, 0x03, 0x3c, 0x73, 0x3e, 0x52, 0x08, + 0x62, 0x6f, 0x73, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, 0x21, 0x0a, 0x09, 0x65, 0x6f, 0x73, 0x5f, + 0x70, 0x69, 0x65, 0x63, 0x65, 0x18, 0x2f, 0x20, 0x01, 0x28, 0x09, 0x3a, 0x04, 0x3c, 0x2f, 0x73, + 0x3e, 0x52, 0x08, 0x65, 0x6f, 0x73, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, 0x22, 0x0a, 0x09, 0x70, + 0x61, 0x64, 0x5f, 0x70, 0x69, 0x65, 0x63, 0x65, 0x18, 0x30, 0x20, 0x01, 0x28, 0x09, 0x3a, 0x05, + 0x3c, 0x70, 0x61, 0x64, 0x3e, 0x52, 0x08, 0x70, 0x61, 0x64, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, + 0x26, 0x0a, 0x0b, 0x75, 0x6e, 0x6b, 0x5f, 0x73, 0x75, 0x72, 0x66, 0x61, 0x63, 0x65, 0x18, 0x2c, + 0x20, 0x01, 0x28, 0x09, 0x3a, 0x05, 0x20, 0xe2, 0x81, 0x87, 0x20, 0x52, 0x0a, 0x75, 0x6e, 0x6b, + 0x53, 0x75, 0x72, 0x66, 0x61, 0x63, 0x65, 0x12, 0x46, 0x0a, 0x1c, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x5f, 0x65, 0x78, 0x74, 0x72, 0x65, 0x6d, 0x65, 0x6c, 0x79, 0x5f, 0x6c, 0x61, 0x72, 0x67, 0x65, + 0x5f, 0x63, 0x6f, 0x72, 0x70, 0x75, 0x73, 0x18, 0x31, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x05, 0x66, + 0x61, 0x6c, 0x73, 0x65, 0x52, 0x19, 0x74, 0x72, 0x61, 0x69, 0x6e, 0x45, 0x78, 0x74, 0x72, 0x65, + 0x6d, 0x65, 0x6c, 0x79, 0x4c, 0x61, 0x72, 0x67, 0x65, 0x43, 0x6f, 0x72, 0x70, 0x75, 0x73, 0x22, + 0x35, 0x0a, 0x09, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, + 0x55, 0x4e, 0x49, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x01, 0x12, 0x07, 0x0a, 0x03, 0x42, 0x50, 0x45, + 0x10, 0x02, 0x12, 0x08, 0x0a, 0x04, 0x57, 0x4f, 0x52, 0x44, 0x10, 0x03, 0x12, 0x08, 0x0a, 0x04, + 0x43, 0x48, 0x41, 0x52, 0x10, 0x04, 0x2a, 0x09, 0x08, 0xc8, 0x01, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0xbd, 0x02, 0x0a, 0x0e, 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, + 0x53, 0x70, 0x65, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x31, 0x0a, 0x14, 0x70, 0x72, 0x65, 0x63, + 0x6f, 0x6d, 0x70, 0x69, 0x6c, 0x65, 0x64, 0x5f, 0x63, 0x68, 0x61, 0x72, 0x73, 0x6d, 0x61, 0x70, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x13, 0x70, 0x72, 0x65, 0x63, 0x6f, 0x6d, 0x70, 0x69, + 0x6c, 0x65, 0x64, 0x43, 0x68, 0x61, 0x72, 0x73, 0x6d, 0x61, 0x70, 0x12, 0x2e, 0x0a, 0x10, 0x61, + 0x64, 0x64, 0x5f, 0x64, 0x75, 0x6d, 0x6d, 0x79, 0x5f, 0x70, 0x72, 0x65, 0x66, 0x69, 0x78, 0x18, + 0x03, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x0e, 0x61, 0x64, 0x64, + 0x44, 0x75, 0x6d, 0x6d, 0x79, 0x50, 0x72, 0x65, 0x66, 0x69, 0x78, 0x12, 0x3e, 0x0a, 0x18, 0x72, + 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x5f, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x77, 0x68, 0x69, 0x74, + 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, + 0x72, 0x75, 0x65, 0x52, 0x16, 0x72, 0x65, 0x6d, 0x6f, 0x76, 0x65, 0x45, 0x78, 0x74, 0x72, 0x61, + 0x57, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, 0x12, 0x33, 0x0a, 0x12, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x5f, 0x77, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x3a, 0x04, 0x74, 0x72, 0x75, 0x65, 0x52, 0x11, 0x65, + 0x73, 0x63, 0x61, 0x70, 0x65, 0x57, 0x68, 0x69, 0x74, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x73, + 0x12, 0x34, 0x0a, 0x16, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x5f, 0x72, 0x75, 0x6c, 0x65, 0x5f, 0x74, 0x73, 0x76, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x14, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x75, 0x6c, 0x65, 0x54, 0x73, 0x76, 0x2a, 0x09, 0x08, 0xc8, 0x01, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x22, 0x93, 0x01, 0x0a, 0x0c, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x3c, 0x0a, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, + 0x65, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, 0x61, + 0x2e, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x52, 0x07, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x73, + 0x1a, 0x3a, 0x0a, 0x06, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x69, 0x6e, + 0x70, 0x75, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x69, 0x6e, 0x70, 0x75, 0x74, + 0x12, 0x1a, 0x0a, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x08, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, 0x65, 0x64, 0x2a, 0x09, 0x08, 0xc8, + 0x01, 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x22, 0xd7, 0x04, 0x0a, 0x0a, 0x4d, 0x6f, 0x64, 0x65, + 0x6c, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x3f, 0x0a, 0x06, 0x70, 0x69, 0x65, 0x63, 0x65, 0x73, + 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, + 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x50, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x50, 0x69, 0x65, 0x63, 0x65, 0x52, + 0x06, 0x70, 0x69, 0x65, 0x63, 0x65, 0x73, 0x12, 0x3d, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, + 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x54, 0x72, + 0x61, 0x69, 0x6e, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0b, 0x74, 0x72, 0x61, 0x69, 0x6e, + 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x12, 0x46, 0x0a, 0x0f, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, + 0x69, 0x7a, 0x65, 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, + 0x1d, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, + 0x4e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x52, 0x0e, + 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x12, 0x41, + 0x0a, 0x0e, 0x73, 0x65, 0x6c, 0x66, 0x5f, 0x74, 0x65, 0x73, 0x74, 0x5f, 0x64, 0x61, 0x74, 0x61, + 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x73, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, + 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x44, + 0x61, 0x74, 0x61, 0x52, 0x0c, 0x73, 0x65, 0x6c, 0x66, 0x54, 0x65, 0x73, 0x74, 0x44, 0x61, 0x74, + 0x61, 0x12, 0x4a, 0x0a, 0x11, 0x64, 0x65, 0x6e, 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x72, 0x5f, 0x73, 0x70, 0x65, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x73, + 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x4e, 0x6f, 0x72, + 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x52, 0x10, 0x64, 0x65, 0x6e, + 0x6f, 0x72, 0x6d, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x72, 0x53, 0x70, 0x65, 0x63, 0x1a, 0xe6, 0x01, + 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x50, 0x69, 0x65, 0x63, 0x65, 0x12, + 0x14, 0x0a, 0x05, 0x70, 0x69, 0x65, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x70, 0x69, 0x65, 0x63, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x02, 0x52, 0x05, 0x73, 0x63, 0x6f, 0x72, 0x65, 0x12, 0x48, 0x0a, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2c, 0x2e, 0x73, 0x65, 0x6e, 0x74, + 0x65, 0x6e, 0x63, 0x65, 0x70, 0x69, 0x65, 0x63, 0x65, 0x2e, 0x4d, 0x6f, 0x64, 0x65, 0x6c, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x53, 0x65, 0x6e, 0x74, 0x65, 0x6e, 0x63, 0x65, 0x50, 0x69, 0x65, + 0x63, 0x65, 0x2e, 0x54, 0x79, 0x70, 0x65, 0x3a, 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x52, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x22, 0x54, 0x0a, 0x04, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0a, 0x0a, + 0x06, 0x4e, 0x4f, 0x52, 0x4d, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x4b, + 0x4e, 0x4f, 0x57, 0x4e, 0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x4e, 0x54, 0x52, 0x4f, + 0x4c, 0x10, 0x03, 0x12, 0x10, 0x0a, 0x0c, 0x55, 0x53, 0x45, 0x52, 0x5f, 0x44, 0x45, 0x46, 0x49, + 0x4e, 0x45, 0x44, 0x10, 0x04, 0x12, 0x08, 0x0a, 0x04, 0x42, 0x59, 0x54, 0x45, 0x10, 0x06, 0x12, + 0x0a, 0x0a, 0x06, 0x55, 0x4e, 0x55, 0x53, 0x45, 0x44, 0x10, 0x05, 0x2a, 0x09, 0x08, 0xc8, 0x01, + 0x10, 0x80, 0x80, 0x80, 0x80, 0x02, 0x2a, 0x09, 0x08, 0xc8, 0x01, 0x10, 0x80, 0x80, 0x80, 0x80, + 0x02, 0x42, 0x02, 0x48, 0x03, +} + +var ( + file_sentencepiece_sentencepiece_model_proto_rawDescOnce sync.Once + file_sentencepiece_sentencepiece_model_proto_rawDescData = file_sentencepiece_sentencepiece_model_proto_rawDesc +) + +func file_sentencepiece_sentencepiece_model_proto_rawDescGZIP() []byte { + file_sentencepiece_sentencepiece_model_proto_rawDescOnce.Do(func() { + file_sentencepiece_sentencepiece_model_proto_rawDescData = protoimpl.X.CompressGZIP(file_sentencepiece_sentencepiece_model_proto_rawDescData) + }) + return file_sentencepiece_sentencepiece_model_proto_rawDescData +} + +var file_sentencepiece_sentencepiece_model_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_sentencepiece_sentencepiece_model_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_sentencepiece_sentencepiece_model_proto_goTypes = []interface{}{ + (TrainerSpec_ModelType)(0), // 0: sentencepiece.TrainerSpec.ModelType + (ModelProto_SentencePiece_Type)(0), // 1: sentencepiece.ModelProto.SentencePiece.Type + (*TrainerSpec)(nil), // 2: sentencepiece.TrainerSpec + (*NormalizerSpec)(nil), // 3: sentencepiece.NormalizerSpec + (*SelfTestData)(nil), // 4: sentencepiece.SelfTestData + (*ModelProto)(nil), // 5: sentencepiece.ModelProto + (*SelfTestData_Sample)(nil), // 6: sentencepiece.SelfTestData.Sample + (*ModelProto_SentencePiece)(nil), // 7: sentencepiece.ModelProto.SentencePiece +} +var file_sentencepiece_sentencepiece_model_proto_depIdxs = []int32{ + 0, // 0: sentencepiece.TrainerSpec.model_type:type_name -> sentencepiece.TrainerSpec.ModelType + 6, // 1: sentencepiece.SelfTestData.samples:type_name -> sentencepiece.SelfTestData.Sample + 7, // 2: sentencepiece.ModelProto.pieces:type_name -> sentencepiece.ModelProto.SentencePiece + 2, // 3: sentencepiece.ModelProto.trainer_spec:type_name -> sentencepiece.TrainerSpec + 3, // 4: sentencepiece.ModelProto.normalizer_spec:type_name -> sentencepiece.NormalizerSpec + 4, // 5: sentencepiece.ModelProto.self_test_data:type_name -> sentencepiece.SelfTestData + 3, // 6: sentencepiece.ModelProto.denormalizer_spec:type_name -> sentencepiece.NormalizerSpec + 1, // 7: sentencepiece.ModelProto.SentencePiece.type:type_name -> sentencepiece.ModelProto.SentencePiece.Type + 8, // [8:8] is the sub-list for method output_type + 8, // [8:8] is the sub-list for method input_type + 8, // [8:8] is the sub-list for extension type_name + 8, // [8:8] is the sub-list for extension extendee + 0, // [0:8] is the sub-list for field type_name +} + +func init() { file_sentencepiece_sentencepiece_model_proto_init() } +func file_sentencepiece_sentencepiece_model_proto_init() { + if File_sentencepiece_sentencepiece_model_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_sentencepiece_sentencepiece_model_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TrainerSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_sentencepiece_sentencepiece_model_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*NormalizerSpec); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_sentencepiece_sentencepiece_model_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelfTestData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_sentencepiece_sentencepiece_model_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModelProto); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + file_sentencepiece_sentencepiece_model_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*SelfTestData_Sample); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_sentencepiece_sentencepiece_model_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ModelProto_SentencePiece); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + case 3: + return &v.extensionFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_sentencepiece_sentencepiece_model_proto_rawDesc, + NumEnums: 2, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_sentencepiece_sentencepiece_model_proto_goTypes, + DependencyIndexes: file_sentencepiece_sentencepiece_model_proto_depIdxs, + EnumInfos: file_sentencepiece_sentencepiece_model_proto_enumTypes, + MessageInfos: file_sentencepiece_sentencepiece_model_proto_msgTypes, + }.Build() + File_sentencepiece_sentencepiece_model_proto = out.File + file_sentencepiece_sentencepiece_model_proto_rawDesc = nil + file_sentencepiece_sentencepiece_model_proto_goTypes = nil + file_sentencepiece_sentencepiece_model_proto_depIdxs = nil +} diff --git a/sentencepiece/sentencepiece_model.proto b/sentencepiece/sentencepiece_model.proto new file mode 100644 index 0000000..33c01fc --- /dev/null +++ b/sentencepiece/sentencepiece_model.proto @@ -0,0 +1,314 @@ +// Copyright 2016 Google Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License.! + +syntax = "proto2"; + +// TODO(taku): Needs to use LITE RUNTIME in OSS release. +option optimize_for = LITE_RUNTIME; + +package sentencepiece; + +// BEGIN GOOGLE-INTERNAL +// LINT.IfChange +// END GOOGLE-INTERNAL +// TrainerSpec encodes a various parameters for SentencePiece training. +message TrainerSpec { + /////////////////////////////////////////////////////////////////// + // General parameters + // + // Input corpus files. + // Trainer accepts the following two formats: + // A) Monolingual: plain text, one sentence per line. + // B) Bilingual: TSV, source sentence target sentence + // When bilingual data is passed, shared vocabulary model is built. + // Note that the input file must be raw corpus, not a preprocessed corpus. + // Trainer only loads the first `input_sentence_size` sentences specified + // with this parameter. + repeated string input = 1; + + // Input corpus format: + // "text": one-sentence-per-line text format (default) + // "tsv": sentence freq + optional string input_format = 7; + + // Output model file prefix. + // .model and .vocab are generated. + optional string model_prefix = 2; + + // Model type. only have UNIGRAM now. + enum ModelType { + UNIGRAM = 1; // Unigram language model with dynamic algorithm + BPE = 2; // Byte Pair Encoding + WORD = 3; // Delimitered by whitespace. + CHAR = 4; // tokenizes into character sequence + } + optional ModelType model_type = 3 [default = UNIGRAM]; + + // Vocabulary size. 8k is the default size. + optional int32 vocab_size = 4 [default = 8000]; + + // List of the languages this model can accept. + // Since the model is language-agnostic, this field is used as a reference. + repeated string accept_language = 5; + + // Size of self-test samples, which are encoded in the model file. + optional int32 self_test_sample_size = 6 [default = 0]; + + /////////////////////////////////////////////////////////////////// + // Training parameters. + // + // Uses characters which cover the corpus with the ratio of `chars_coverage`. + // This parameter determines the set of basic Alphabet of sentence piece. + // 1.0 - `chars_coverage` characters are treated as UNK. + // See also required_chars field. + optional float character_coverage = 10 [default = 0.9995]; + + // Maximum size of sentences the trainer loads from `input` parameter. + // Trainer simply loads the `input` files in sequence. + // It is better to shuffle the input corpus randomly. + optional int32 input_sentence_size = 11 [default = 0]; + optional bool shuffle_input_sentence = 19 [default = true]; + + // Maximum size of sentences to make seed sentence pieces. + // Extended suffix array is constructed to extract frequent + // sub-strings from the corpus. This uses 20N working space, + // where N is the size of corpus. + optional int32 mining_sentence_size = 12 [deprecated = true]; + + // Maximum size of sentences to train sentence pieces. + optional int32 training_sentence_size = 13 [deprecated = true]; + + // The size of seed sentencepieces. + // `seed_sentencepiece_size` must be larger than `vocab_size`. + optional int32 seed_sentencepiece_size = 14 [default = 1000000]; + + // In every EM sub-iterations, keeps top + // `shrinking_factor` * `current sentencepieces size` with respect to + // the loss of the sentence piece. This value should be smaller than 1.0. + optional float shrinking_factor = 15 [default = 0.75]; + + // The maximum sentence length in byte. The sentences with the length + // larger than `max_sentence_length` is simply ignored. + // Longer input tends to bring the following risks: + // * Overflow during EM training (unigram language model only) + // * Performance drop because of O(n log n) cost in BPE. + optional int32 max_sentence_length = 18 [default = 4192]; + + // Number of threads in the training. + optional int32 num_threads = 16 [default = 16]; + + // Number of EM sub iterations. + optional int32 num_sub_iterations = 17 [default = 2]; + + /////////////////////////////////////////////////////////////////// + // SentencePiece parameters which control the shapes of sentence piece. + // + // Maximum length of sentencepiece. + optional int32 max_sentencepiece_length = 20 [default = 16]; + + // Uses Unicode script to split sentence pieces. + // When `split_by_unicode_script` is true, we do not allow sentence piece to + // include multiple Unicode scripts, e.g. "F1" is not a valid piece. + // Exception: CJ characters (Hiragana/Katakana/Han) are all handled + // as one script type, since Japanese word can consist of multiple scripts. + // This exception is always applied regardless of the accept-language + // parameter. + optional bool split_by_unicode_script = 21 [default = true]; + + // When `split_by_number` is true, put a boundary between number and + // non-number transition. If we want to treat "F1" is one token, set this flag + // to be false. + optional bool split_by_number = 23 [default = true]; + + // Use a white space to split sentence pieces. + // When `split_by_whitespace` is false, we may have the piece containing + // a white space in the middle. e.g., "in_the". + optional bool split_by_whitespace = 22 [default = true]; + + // Adds whitespace symbol (_) as a suffix instead of prefix. e.g., _hello => + // hello_. When `treat_whitespace_as_suffix` is true, + // NormalizerSpec::add_dummy_prefix will add the dummy whitespace to the end + // of sentence. + optional bool treat_whitespace_as_suffix = 24 [default = false]; + + // Split all digits (0-9) into separate pieces. + optional bool split_digits = 25 [default = false]; + + /////////////////////////////////////////////////////////////////// + // Vocabulary management + // + // Defines control symbols used as an indicator to + // change the behavior of the decoder. and are pre-defined. + // We can use this field to encode various meta information, + // including language indicator in multilingual model. + // These symbols are not visible to users, but visible to + // the decoder. Note that when the input sentence contains control symbols, + // they are not treated as one token, but segmented into normal pieces. + // Control symbols must be inserted independently from the segmentation. + repeated string control_symbols = 30; + + // Defines user defined symbols. + // These symbols are added with extremely high score + // so they are always treated as one unique symbol in any context. + // Typical usage of user_defined_symbols is placeholder for named entities. + repeated string user_defined_symbols = 31; + + // Defines required characters. Each UTF8 character in this string is included + // in the character set regardless of character_coverage value. Unlike + // user_defined_symbols, these characters have scores based on the frequency + // on input sentences, and the model can form subwords using characters + // in this field. + optional string required_chars = 36; + + // Decomposes unknown pieces into UTF-8 bytes. + optional bool byte_fallback = 35 [default = false]; + + // When creating the vocabulary file, defines whether or not to additionally + // output the score for each piece. + optional bool vocabulary_output_piece_score = 32 [default = true]; + + // `vocab_size` is treated as hard limit. Crash if + // the model can not produce the vocab of size `vocab_size`, + // When `hard_vocab_limit` is false, vocab_size is treated + // as soft limit. Note that when model_type=char, + // always assumes hard_vocab_limit = false. + optional bool hard_vocab_limit = 33 [default = true]; + + // use all symbols for vocab extraction. This flag is valid + // if model type is either CHAR or WORD + optional bool use_all_vocab = 34 [default = false]; + + /////////////////////////////////////////////////////////////////// + // Reserved special meta tokens. + // * -1 is not used. + // * unk_id must not be -1. + // Id must starts with 0 and be contigous. + optional int32 unk_id = 40 [default = 0]; // + optional int32 bos_id = 41 [default = 1]; // + optional int32 eos_id = 42 [default = 2]; // + optional int32 pad_id = 43 [default = -1]; // (padding) + optional string unk_piece = 45 [default = ""]; + optional string bos_piece = 46 [default = ""]; + optional string eos_piece = 47 [default = ""]; + optional string pad_piece = 48 [default = ""]; + + // Encodes into U+2047 (DOUBLE QUESTION MARK), + // since this character can be useful both for user and + // developer. We can easily figure out that is emitted. + optional string unk_surface = 44 [default = " \xE2\x81\x87 "]; + + // Increase bit depth to allow unigram model training on large + // (>10M sentences) corpora. A Side-effect of enabling this flag + // is increased memory usage. + optional bool train_extremely_large_corpus = 49 [default = false]; + + // Customized extensions: the range of field numbers + // are open to third-party extensions. + extensions 200 to max; +} + +// NormalizerSpec encodes a various parameters for string normalizaiton +message NormalizerSpec { + // name of normalization rule. + optional string name = 1; + + // Pre-compiled normalization rule created by + // Builder::GetPrecompiledCharsMap() or Builder::CompileCharsMap() method. + // Usually this field is set by Builder::GetNormalizerSpec() method. + optional bytes precompiled_charsmap = 2; + + // Adds dummy whitespace at the beginning of text in order to + // treat "world" in "world" and "hello world" in the same way. + optional bool add_dummy_prefix = 3 [default = true]; + + // Removes leading, trailing, and duplicate internal whitespace. + optional bool remove_extra_whitespaces = 4 [default = true]; + + // Replaces whitespace with meta symbol. + // This field must be true to train sentence piece model. + optional bool escape_whitespaces = 5 [default = true]; + + // Custom normalization rule file in TSV format. + // https://github.com/google/sentencepiece/blob/master/doc/normalization.md + // This field is only used in SentencePieceTrainer::Train() method, which + // compiles the rule into the binary rule stored in `precompiled_charsmap`. + optional string normalization_rule_tsv = 6; + + // Customized extensions: the range of field numbers + // are open to third-party extensions. + extensions 200 to max; +} +// BEGIN GOOGLE-INTERNAL +// LINT.ThenChange( +// //depot/google3/third_party/sentencepiece/src/spm_train_main.cc, +// //depot/google3/third_party/sentencepiece/src/spec_parser.h) +// END GOOGLE-INTERNAL + +// Proto to store samples for self-testing. +message SelfTestData { + message Sample { + optional string input = 1; + optional string expected = 2; + } + repeated Sample samples = 1; + + // Customized extensions: the range of field numbers + // are open to third-party extensions. + extensions 200 to max; +} + +// ModelProto stores model parameters. +// SentencePieceProcessor is supposed to be self-contained. +// All settings/parameters which may change the behavior must be encoded +// in ModelProto. +message ModelProto { + message SentencePiece { + enum Type { + NORMAL = 1; // normal symbol + UNKNOWN = 2; // unknown symbol. only for now. + CONTROL = 3; // control symbols. , , <2ja> etc. + USER_DEFINED = 4; // user defined symbols. + // Typical usage of USER_DEFINED symbol + // is placeholder. + BYTE = 6; // byte symbols. Used when `byte_fallback` is true. + UNUSED = 5; // this piece is not used. + } + optional string piece = 1; // piece must not be empty. + optional float score = 2; + optional Type type = 3 [default = NORMAL]; + + // Customized extensions: the range of field numbers + // are open to third-party extensions. + extensions 200 to max; + } + + // Sentence pieces with scores. + repeated SentencePiece pieces = 1; + + // Spec used to generate this model file. + optional TrainerSpec trainer_spec = 2; + + // Spec for text normalization. + optional NormalizerSpec normalizer_spec = 3; + + // Stores sample input and its expected segmentation to verify the model. + optional SelfTestData self_test_data = 4; + + // Spec for text de-normalization. + optional NormalizerSpec denormalizer_spec = 5; + + // Customized extensions: the range of field numbers + // are open to third-party extensions. + extensions 200 to max; +} \ No newline at end of file diff --git a/sentencepiece/sentencepiece_proto.go b/sentencepiece/sentencepiece_proto.go new file mode 100644 index 0000000..5423ba7 --- /dev/null +++ b/sentencepiece/sentencepiece_proto.go @@ -0,0 +1,36 @@ +package sentencepiece + +import ( + "fmt" + "io/ioutil" + + "google.golang.org/protobuf/proto" +) + +// NewSentencepieceFromFile creates sentencepiece from file. +func NewSentencepieceFromFile(filename string, lowercase bool) (Sentencepiece, error) { + s := NewEmptySentencepiece(lowercase) + bytes, err := ioutil.ReadFile(filename) + if err != nil { + return s, fmt.Errorf("Unable to read file : %s, err %v", filename, err) + } + var model ModelProto + err = proto.Unmarshal(bytes, &model) + if err != nil { + return s, fmt.Errorf("Unable to read model file : %s, err %v", filename, err) + } + + count := 0 + unknownIndex := int64(0) + for i, piece := range model.GetPieces() { + word := piece.GetPiece() + if word == unknown { + unknownIndex = int64(i) + } + s.insert(word, piece.GetScore(), int64(i)) + count++ + } + + s.SetUnknownIndex(unknownIndex) + return s, nil +} diff --git a/sentencepiece/sentencepiece_test.go b/sentencepiece/sentencepiece_test.go new file mode 100644 index 0000000..a2e0968 --- /dev/null +++ b/sentencepiece/sentencepiece_test.go @@ -0,0 +1,323 @@ +package sentencepiece + +import ( + "reflect" + "testing" +) + +func TestTokenization(t *testing.T) { + sp, err := NewSentencepieceFromFile("test_data/xlnet-base-cased-spiece.model", false) + if err != nil { + t.Errorf("Unable to create sentencepiece") + return + } + + tests := []struct { + text string + tokens []Token + }{ + {text: "this", tokens: []Token{{ID: 52, Text: "▁this"}}}, + {text: "hello", tokens: []Token{{ID: 24717, Text: "▁hello"}}}, + {text: "This is a sample sentence to be tokénized", tokens: []Token{ + {ID: 122, Text: "▁This"}, + {ID: 27, Text: "▁is"}, + {ID: 24, Text: "▁a"}, + {ID: 4561, Text: "▁sample"}, + {ID: 3833, Text: "▁sentence"}, + {ID: 22, Text: "▁to"}, + {ID: 39, Text: "▁be"}, + {ID: 22, Text: "▁to"}, + {ID: 267, Text: "k"}, + {ID: 0, Text: "é"}, + {ID: 180, Text: "n"}, + {ID: 1227, Text: "ized"}, + }}, + {text: "Wondering how this will get tokenized 🤔 ?", tokens: []Token{ + {ID: 14748, Text: "▁Wonder"}, + {ID: 56, Text: "ing"}, + {ID: 160, Text: "▁how"}, + {ID: 52, Text: "▁this"}, + {ID: 53, Text: "▁will"}, + {ID: 133, Text: "▁get"}, + {ID: 17366, Text: "▁token"}, + {ID: 1227, Text: "ized"}, + {ID: 17, Text: "▁"}, + {ID: 0, Text: "🤔"}, + {ID: 17, Text: "▁"}, + {ID: 82, Text: "?"}, + }}, + {text: "İs th!s 𩸽 Ϻ Šœ Ugljšić dấu nặng", tokens: []Token{ + {ID: 17, Text: "▁"}, + {ID: 0, Text: "İ"}, + {ID: 23, Text: "s"}, + {ID: 17, Text: "▁"}, + {ID: 138, Text: "th"}, + {ID: 136, Text: "!"}, + {ID: 23, Text: "s"}, + {ID: 17, Text: "▁"}, + {ID: 0, Text: "𩸽"}, + {ID: 17, Text: "▁"}, + {ID: 0, Text: "Ϻ"}, + {ID: 17, Text: "▁"}, + {ID: 0, Text: "Š"}, + {ID: 128, Text: "▁U"}, + {ID: 15222, Text: "gl"}, + {ID: 1315, Text: "j"}, + {ID: 0, Text: "š"}, + {ID: 150, Text: "i"}, + {ID: 0, Text: "ć"}, + {ID: 17, Text: "▁"}, + {ID: 66, Text: "d"}, + {ID: 0, Text: "ấ"}, + {ID: 660, Text: "u"}, + {ID: 17, Text: "▁"}, + {ID: 180, Text: "n"}, + {ID: 0, Text: "ặ"}, + {ID: 3511, Text: "ng"}, + }}, + {text: "compose email to john saying i will be running late to office today because i am not feeling well, my head is aching and in the body add shall we meet next week and when we go to the office lets reach by around 10 am and go for a movie in the evening, may be Spiderman which seems to be a very good movie which got 5 star review from rottentomatoes and imdb", tokens: []Token{ + {ID: 23391, Text: "▁compose"}, + {ID: 1706, Text: "▁email"}, + {ID: 22, Text: "▁to"}, + {ID: 17, Text: "▁"}, + {ID: 22116, Text: "john"}, + {ID: 591, Text: "▁saying"}, + {ID: 17, Text: "▁"}, + {ID: 150, Text: "i"}, + {ID: 53, Text: "▁will"}, + {ID: 39, Text: "▁be"}, + {ID: 926, Text: "▁running"}, + {ID: 471, Text: "▁late"}, + {ID: 22, Text: "▁to"}, + {ID: 495, Text: "▁office"}, + {ID: 494, Text: "▁today"}, + {ID: 149, Text: "▁because"}, + {ID: 17, Text: "▁"}, + {ID: 150, Text: "i"}, + {ID: 569, Text: "▁am"}, + {ID: 50, Text: "▁not"}, + {ID: 1803, Text: "▁feeling"}, + {ID: 143, Text: "▁well"}, + {ID: 19, Text: ","}, + {ID: 94, Text: "▁my"}, + {ID: 291, Text: "▁head"}, + {ID: 27, Text: "▁is"}, + {ID: 24, Text: "▁a"}, + {ID: 5410, Text: "ching"}, + {ID: 21, Text: "▁and"}, + {ID: 25, Text: "▁in"}, + {ID: 18, Text: "▁the"}, + {ID: 458, Text: "▁body"}, + {ID: 1319, Text: "▁add"}, + {ID: 1530, Text: "▁shall"}, + {ID: 80, Text: "▁we"}, + {ID: 767, Text: "▁meet"}, + {ID: 244, Text: "▁next"}, + {ID: 260, Text: "▁week"}, + {ID: 21, Text: "▁and"}, + {ID: 90, Text: "▁when"}, + {ID: 80, Text: "▁we"}, + {ID: 216, Text: "▁go"}, + {ID: 22, Text: "▁to"}, + {ID: 18, Text: "▁the"}, + {ID: 495, Text: "▁office"}, + {ID: 10234, Text: "▁lets"}, + {ID: 1287, Text: "▁reach"}, + {ID: 37, Text: "▁by"}, + {ID: 199, Text: "▁around"}, + {ID: 241, Text: "▁10"}, + {ID: 569, Text: "▁am"}, + {ID: 21, Text: "▁and"}, + {ID: 216, Text: "▁go"}, + {ID: 28, Text: "▁for"}, + {ID: 24, Text: "▁a"}, + {ID: 1432, Text: "▁movie"}, + {ID: 25, Text: "▁in"}, + {ID: 18, Text: "▁the"}, + {ID: 2060, Text: "▁evening"}, + {ID: 19, Text: ","}, + {ID: 132, Text: "▁may"}, + {ID: 39, Text: "▁be"}, + {ID: 17489, Text: "▁Spider"}, + {ID: 249, Text: "man"}, + {ID: 59, Text: "▁which"}, + {ID: 1303, Text: "▁seems"}, + {ID: 22, Text: "▁to"}, + {ID: 39, Text: "▁be"}, + {ID: 24, Text: "▁a"}, + {ID: 172, Text: "▁very"}, + {ID: 195, Text: "▁good"}, + {ID: 1432, Text: "▁movie"}, + {ID: 59, Text: "▁which"}, + {ID: 345, Text: "▁got"}, + {ID: 306, Text: "▁5"}, + {ID: 1795, Text: "▁star"}, + {ID: 1398, Text: "▁review"}, + {ID: 40, Text: "▁from"}, + {ID: 28626, Text: "▁rotten"}, + {ID: 261, Text: "to"}, + {ID: 18693, Text: "mato"}, + {ID: 202, Text: "es"}, + {ID: 21, Text: "▁and"}, + {ID: 7693, Text: "▁im"}, + {ID: 66, Text: "d"}, + {ID: 508, Text: "b"}, + }}, + } + + for _, test := range tests { + output := sp.Tokenize(test.text) + if !reflect.DeepEqual(output, test.tokens) { + t.Errorf("Tokenization error : %s, len %d, got %v || expected %v", test.text, len(test.text), output, test.tokens) + } + } +} + +func TestTokenizationSPM(t *testing.T) { + sp, err := NewSentencepieceFromFile("test_data/spm.model", true) + if err != nil { + t.Errorf("Unable to create sentencepiece") + return + } + + tests := []struct { + text string + tokens []Token + }{ + {text: "this", tokens: []Token{{ID: 48, Text: "▁this"}}}, + {text: "hello", tokens: []Token{{ID: 10975, Text: "▁hello"}}}, + {text: "This is a sample sentence to be tokénized", tokens: []Token{ + {ID: 48, Text: "▁this"}, + {ID: 25, Text: "▁is"}, + {ID: 21, Text: "▁a"}, + {ID: 5717, Text: "▁sample"}, + {ID: 5123, Text: "▁sentence"}, + {ID: 20, Text: "▁to"}, + {ID: 44, Text: "▁be"}, + {ID: 20, Text: "▁to"}, + {ID: 197, Text: "k"}, + {ID: 1, Text: "é"}, + {ID: 103, Text: "n"}, + {ID: 1333, Text: "ized"}, + }}, + {text: "compose email to john saying i will be running late to office today because i am not feeling well, my head is aching and in the body add shall we meet next week and when we go to the office lets reach by around 10 am and go for a movie in the evening, may be Spiderman which seems to be a very good movie which got 5 star review from rottentomatoes and imdb", tokens: []Token{ + {ID: 18217, Text: "▁compose"}, + {ID: 8517, Text: "▁email"}, + {ID: 20, Text: "▁to"}, + {ID: 239, Text: "▁john"}, + {ID: 1148, Text: "▁saying"}, + {ID: 31, Text: "▁i"}, + {ID: 129, Text: "▁will"}, + {ID: 44, Text: "▁be"}, + {ID: 946, Text: "▁running"}, + {ID: 456, Text: "▁late"}, + {ID: 20, Text: "▁to"}, + {ID: 488, Text: "▁office"}, + {ID: 786, Text: "▁today"}, + {ID: 185, Text: "▁because"}, + {ID: 31, Text: "▁i"}, + {ID: 589, Text: "▁am"}, + {ID: 52, Text: "▁not"}, + {ID: 1249, Text: "▁feeling"}, + {ID: 134, Text: "▁well"}, + {ID: 15, Text: ","}, + {ID: 51, Text: "▁my"}, + {ID: 157, Text: "▁head"}, + {ID: 25, Text: "▁is"}, + {ID: 17010, Text: "▁aching"}, + {ID: 17, Text: "▁and"}, + {ID: 19, Text: "▁in"}, + {ID: 14, Text: "▁the"}, + {ID: 358, Text: "▁body"}, + {ID: 3547, Text: "▁add"}, + {ID: 3004, Text: "▁shall"}, + {ID: 95, Text: "▁we"}, + {ID: 1255, Text: "▁meet"}, + {ID: 328, Text: "▁next"}, + {ID: 877, Text: "▁week"}, + {ID: 17, Text: "▁and"}, + {ID: 76, Text: "▁when"}, + {ID: 95, Text: "▁we"}, + {ID: 162, Text: "▁go"}, + {ID: 20, Text: "▁to"}, + {ID: 14, Text: "▁the"}, + {ID: 488, Text: "▁office"}, + {ID: 6884, Text: "▁lets"}, + {ID: 1470, Text: "▁reach"}, + {ID: 34, Text: "▁by"}, + {ID: 140, Text: "▁around"}, + {ID: 332, Text: "▁10"}, + {ID: 589, Text: "▁am"}, + {ID: 17, Text: "▁and"}, + {ID: 162, Text: "▁go"}, + {ID: 26, Text: "▁for"}, + {ID: 21, Text: "▁a"}, + {ID: 1308, Text: "▁movie"}, + {ID: 19, Text: "▁in"}, + {ID: 14, Text: "▁the"}, + {ID: 2089, Text: "▁evening"}, + {ID: 15, Text: ","}, + {ID: 123, Text: "▁may"}, + {ID: 44, Text: "▁be"}, + {ID: 5650, Text: "▁spider"}, + {ID: 177, Text: "man"}, + {ID: 56, Text: "▁which"}, + {ID: 2206, Text: "▁seems"}, + {ID: 20, Text: "▁to"}, + {ID: 44, Text: "▁be"}, + {ID: 21, Text: "▁a"}, + {ID: 253, Text: "▁very"}, + {ID: 254, Text: "▁good"}, + {ID: 1308, Text: "▁movie"}, + {ID: 56, Text: "▁which"}, + {ID: 330, Text: "▁got"}, + {ID: 331, Text: "▁5"}, + {ID: 778, Text: "▁star"}, + {ID: 1487, Text: "▁review"}, + {ID: 37, Text: "▁from"}, + {ID: 11573, Text: "▁rotten"}, + {ID: 262, Text: "to"}, + {ID: 8844, Text: "mato"}, + {ID: 160, Text: "es"}, + {ID: 17, Text: "▁and"}, + {ID: 797, Text: "▁im"}, + {ID: 9007, Text: "db"}, + }}, + } + + for _, test := range tests { + output := sp.Tokenize(test.text) + if !reflect.DeepEqual(output, test.tokens) { + t.Errorf("Tokenization error : %s, len %d, got %v || expected %v", test.text, len(test.text), output, test.tokens) + } + } +} + +func BenchmarkSentencePiece(b *testing.B) { + sp, err := NewSentencepieceFromFile("test_data/xlnet-base-cased-spiece.model", false) + if err != nil { + b.Errorf("Unable to create sentencepiece") + return + } + + b.ResetTimer() + + inputs := []string{ + "compose email to john saying i will be running late to office today because i am not feeling well, my head is aching and in the body add shall we meet next week and when we go to the office lets reach by around 10 am and go for a movie in the evening, may be Spiderman which seems to be a very good movie which got 5 star review from rottentomatoes and imdb", + } + + for _, input := range inputs { + b.Run(firstNChars(input, 20), func(b *testing.B) { + for i := 0; i < b.N; i++ { + sp.Tokenize(input) + } + }) + } +} + +func firstNChars(s string, n int) string { + if len(s) < n { + return s + } + return s[:n] +} diff --git a/sentencepiece/test_data/spm.model b/sentencepiece/test_data/spm.model new file mode 100644 index 0000000..c91b8ac Binary files /dev/null and b/sentencepiece/test_data/spm.model differ diff --git a/sentencepiece/test_data/xlnet-base-cased-spiece.model b/sentencepiece/test_data/xlnet-base-cased-spiece.model new file mode 100644 index 0000000..9cdb90c Binary files /dev/null and b/sentencepiece/test_data/xlnet-base-cased-spiece.model differ diff --git a/sentencepiece/token.go b/sentencepiece/token.go new file mode 100644 index 0000000..39a5d80 --- /dev/null +++ b/sentencepiece/token.go @@ -0,0 +1,14 @@ +package sentencepiece + +// Token holds a unit of a tokenized word +type Token struct { + ID int64 + Text string +} + +type tokenOffset struct { + id int64 + text string + start int + end int +}