diff --git a/.github/workflows/cd-create-release-notes.yaml b/.github/workflows/cd-create-release-notes.yaml new file mode 100644 index 0000000000..e5ee701fbe --- /dev/null +++ b/.github/workflows/cd-create-release-notes.yaml @@ -0,0 +1,69 @@ +name: Create Release Notes + +on: + workflow_dispatch: + inputs: + version: + description: 'SemVer format release tag, i.e. 0.2.4' + required: true + repository_dispatch: + types: [ release-notes ] + +jobs: + create-release-notes: + runs-on: ubuntu-22.04 + steps: + - name: Get Vars + id: get_vars + run: | + if [ "$EVENT_NAME" == "workflow_dispatch" ] + then + release_id=$(curl -H "Accept: application/vnd.github.v3+json" https://api.github.com/repos/dolthub/doltgresql/releases/tags/v${{ github.event.inputs.version }} | jq '.id') + echo "version=${{ github.event.inputs.version }}" >> $GITHUB_OUTPUT + echo "release_id=$release_id" >> $GITHUB_OUTPUT + else + echo "version=${{ github.event.client_payload.version }}" >> $GITHUB_OUTPUT + echo "release_id=${{ github.event.client_payload.release_id }}" >> $GITHUB_OUTPUT + fi + env: + EVENT_NAME: ${{ github.event_name }} + - name: Checkout Release Notes Generator + uses: actions/checkout@v3 + with: + repository: dolthub/release-notes-generator + token: ${{ secrets.REPO_ACCESS_TOKEN }} + - name: Install Dependencies + run: sudo ./install-deps.sh + env: + PERL_MM_USE_DEFAULT: 1 + - name: Create Notes + run: | + git clone https://github.com/dolthub/doltgresql.git + ./gen_release_notes.pl \ + --token "$TOKEN" dolthub/doltgresql v${{ steps.get_vars.outputs.version }} > changelog.txt + env: + TOKEN: ${{ secrets.REPO_ACCESS_TOKEN }} + - name: Post Changelog to Release + uses: actions/github-script@v6 + with: + debug: true + github-token: ${{ secrets.REPO_ACCESS_TOKEN }} + script: | + const fs = require('fs'); + const path = require('path') + try { + const body = fs.readFileSync(path.join(process.env.WORKSPACE, "changelog.txt"), { encoding: "utf8" }) + const res = await github.rest.repos.updateRelease({ + owner: "dolthub", + repo: "doltgresql", + release_id: parseInt(process.env.RELEASE_ID, 10), + body, + }); + console.log("Successfully updated release notes", res) + } catch (err) { + console.log("Error", err); + process.exit(1); + } + env: + WORKSPACE: ${{ github.workspace }} + RELEASE_ID: ${{ steps.get_vars.outputs.release_id }} diff --git a/.github/workflows/cd-release.yaml b/.github/workflows/cd-release.yaml new file mode 100644 index 0000000000..5915750d4c --- /dev/null +++ b/.github/workflows/cd-release.yaml @@ -0,0 +1,137 @@ +name: Release DoltgreSQL + +on: + workflow_dispatch: + inputs: + version: + description: 'SemVer format release tag, i.e. 0.2.4' + required: true + +jobs: + format-version: + runs-on: ubuntu-22.04 + outputs: + version: ${{ steps.format_version.outputs.version }} + steps: + - name: Format Input + id: format_version + run: | + version="${{ github.event.inputs.version }}" + if [[ $version == v* ]]; + then + version="${version:1}" + fi + echo "version=$version" >> $GITHUB_OUTPUT + + create-release: + needs: format-version + name: Create release + runs-on: ubuntu-22.04 + outputs: + release_id: ${{ steps.create_release.outputs.id }} + steps: + - name: Checkout code + uses: actions/checkout@v3 + - name: Update Doltgres version command + run: sed -i -e 's/ Version = ".*"/ Version = "'"$NEW_VERSION"'"/' "$FILE" + env: + FILE: ${{ format('{0}/server/server.go', github.workspace) }} + NEW_VERSION: ${{ needs.format-version.outputs.version }} + - uses: EndBug/add-and-commit@v9.1.1 + with: + message: ${{ format('[ga-bump-release] Update DoltgreSQL version to {0} and release v{0}', needs.format-version.outputs.version) }} + add: ${{ format('{0}/server/server.go', github.workspace) }} + cwd: "." + pull: "--ff" + - name: Build SQL Syntax + run: ./build.sh + working-directory: ./postgres/parser + shell: bash + - name: Build Binaries + id: build_binaries + run: | + latest=$(git rev-parse HEAD) + echo "commitish=$latest" >> $GITHUB_OUTPUT + GO_BUILD_VERSION=1.21 scripts/build_binaries.sh + - name: Create Release + id: create_release + uses: dolthub/create-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + tag_name: v${{ needs.format-version.outputs.version }} + release_name: ${{ needs.format-version.outputs.version }} + draft: false + prerelease: true + commitish: ${{ steps.build_binaries.outputs.commitish }} + - name: Upload Linux AMD64 Distro + id: upload-linux-amd64-distro + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-linux-amd64.tar.gz + asset_name: doltgres-linux-amd64.tar.gz + asset_content_type: application/zip + - name: Upload Linux ARM64 Distro + id: upload-linux-arm64-distro + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-linux-arm64.tar.gz + asset_name: doltgres-linux-arm64.tar.gz + asset_content_type: application/zip + - name: Upload OSX AMD64 Distro + id: upload-osx-amd64-distro + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-darwin-amd64.tar.gz + asset_name: doltgres-darwin-amd64.tar.gz + asset_content_type: application/zip + - name: Upload OSX ARM64 Distro + id: upload-osx-arm64-distro + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-darwin-arm64.tar.gz + asset_name: doltgres-darwin-arm64.tar.gz + asset_content_type: application/zip + - name: Upload Windows Distro + id: upload-windows-distro + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-windows-amd64.zip + asset_name: doltgres-windows-amd64.zip + asset_content_type: application/zip + - name: Upload Windows Distro 7z + id: upload-windows-distro-7z + uses: dolthub/upload-release-asset@v1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + with: + upload_url: ${{ steps.create_release.outputs.upload_url }} + asset_path: out/doltgres-windows-amd64.7z + asset_name: doltgres-windows-amd64.7z + asset_content_type: application/x-7z-compressed + + create-release-notes: + needs: [format-version, create-release] + runs-on: ubuntu-22.04 + steps: + - name: Trigger Release Notes + uses: peter-evans/repository-dispatch@v2.0.0 + with: + token: ${{ secrets.REPO_ACCESS_TOKEN }} + event-type: release-notes + client-payload: '{"version": "${{ needs.format-version.outputs.version }}", "release_id": "${{ needs.create-release.outputs.release_id }}"}' diff --git a/.github/workflows/ci-check-repo.yaml b/.github/workflows/ci-check-repo.yaml new file mode 100644 index 0000000000..ac2571371c --- /dev/null +++ b/.github/workflows/ci-check-repo.yaml @@ -0,0 +1,42 @@ +name: Check Formatting, Committers and Generated Code + +on: + pull_request: + branches: [ main ] + +concurrency: + group: ci-check-repo-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + verify: + name: Verify format + runs-on: ubuntu-22.04 + outputs: + format: ${{ steps.should_format.outputs.format }} + steps: + - name: Setup Go 1.x + uses: actions/setup-go@v3 + with: + go-version: ^1.21 + - uses: actions/checkout@v3 + with: + submodules: true + - name: Check all + id: should_format + run: | + ./scripts/check_bats_fmt.sh + + if ./scripts/check_fmt.sh ; then + echo "code is formatted" + else + echo "Please run scripts/format_repo.sh to format this pull request." + exit 1; + fi + + ./postgres/parser/build.sh + GOFLAGS="-mod=readonly" go build ./... + go vet -mod=readonly ./... + env: + BRANCH_NAME: ${{ github.head_ref }} + CHANGE_TARGET: ${{ github.base_ref }} diff --git a/postgres/connection/connection.go b/postgres/connection/connection.go index a0b4579375..bfb8e0f018 100644 --- a/postgres/connection/connection.go +++ b/postgres/connection/connection.go @@ -22,7 +22,7 @@ import ( "github.com/dolthub/doltgresql/utils" ) -//TODO: determine how to handle messages that are larger than the buffer +// TODO: determine how to handle messages that are larger than the buffer const bufferSize = 2048 // connBuffers maintains a pool of buffers, reusable between connections. diff --git a/postgres/parser/encoding/decimal.go b/postgres/parser/encoding/decimal.go index 865eaf3bfa..853fe6e8e9 100644 --- a/postgres/parser/encoding/decimal.go +++ b/postgres/parser/encoding/decimal.go @@ -509,9 +509,9 @@ func decodeLargeNumber( // EncodeNonsortingDecimal returns the resulting byte slice with the // encoded decimal appended to b. The encoding is limited compared to // standard encodings in this package in that -// - It will not sort lexicographically -// - It does not encode its length or terminate itself, so decoding -// functions must be provided the exact encoded bytes +// - It will not sort lexicographically +// - It does not encode its length or terminate itself, so decoding +// functions must be provided the exact encoded bytes // // The encoding assumes that any number can be written as ±0.xyz... * 10^exp, // where xyz is a digit string, x != 0, and the last decimal in xyz is also @@ -525,21 +525,22 @@ func decodeLargeNumber( // the digit string is added as a big-endian byte slice. // // All together, the encoding looks like: -// . +// +// . // // The markers are shared with the sorting decimal encoding as follows: -// decimalNaN -> decimalNaN -// decimalNegativeInfinity -> decimalNegativeInfinity -// decimalNegLarge -> decimalNegValPosExp -// decimalNegMedium -> decimalNegValZeroExp -// decimalNegSmall -> decimalNegValNegExp -// decimalZero -> decimalZero -// decimalPosSmall -> decimalPosValNegExp -// decimalPosMedium -> decimalPosValZeroExp -// decimalPosLarge -> decimalPosValPosExp -// decimalInfinity -> decimalInfinity -// decimalNaNDesc -> decimalNaNDesc // +// decimalNaN -> decimalNaN +// decimalNegativeInfinity -> decimalNegativeInfinity +// decimalNegLarge -> decimalNegValPosExp +// decimalNegMedium -> decimalNegValZeroExp +// decimalNegSmall -> decimalNegValNegExp +// decimalZero -> decimalZero +// decimalPosSmall -> decimalPosValNegExp +// decimalPosMedium -> decimalPosValZeroExp +// decimalPosLarge -> decimalPosValPosExp +// decimalInfinity -> decimalInfinity +// decimalNaNDesc -> decimalNaNDesc func EncodeNonsortingDecimal(b []byte, d *apd.Decimal) []byte { neg := d.Negative switch d.Form { diff --git a/postgres/parser/encoding/encoding.go b/postgres/parser/encoding/encoding.go index 30c0067cc4..9243d69bfd 100644 --- a/postgres/parser/encoding/encoding.go +++ b/postgres/parser/encoding/encoding.go @@ -53,11 +53,11 @@ const ( // The gap between floatNaNDesc and bytesMarker was left for // compatibility reasons. bytesMarker byte = 0x12 - bytesDescMarker byte = bytesMarker + 1 - timeMarker byte = bytesDescMarker + 1 - durationBigNegMarker byte = timeMarker + 1 // Only used for durations < MinInt64 nanos. - durationMarker byte = durationBigNegMarker + 1 - durationBigPosMarker byte = durationMarker + 1 // Only used for durations > MaxInt64 nanos. + bytesDescMarker = bytesMarker + 1 + timeMarker = bytesDescMarker + 1 + durationBigNegMarker = timeMarker + 1 // Only used for durations < MinInt64 nanos. + durationMarker = durationBigNegMarker + 1 + durationBigPosMarker = durationMarker + 1 // Only used for durations > MaxInt64 nanos. decimalNaN = durationBigPosMarker + 1 // 24 decimalNegativeInfinity = decimalNaN + 1 @@ -569,6 +569,7 @@ func getBitArrayWordsLen(b []byte, term byte) (int, int, error) { // Type represents the type of a value encoded by // Encode{Null,NotNull,Varint,Uvarint,Float,Bytes}. +// //go:generate stringer -type=Type type Type int @@ -912,12 +913,16 @@ func EncodeUntaggedDecimalValue(appendTo []byte, d *apd.Decimal) []byte { // returned colID should be discarded.) // // Concretely: -// b := ... -// typeOffset, _, colID, typ, err := DecodeValueTag(b) -// _, _, _, typ, err := DecodeValueTag(b[typeOffset:]) +// +// b := ... +// typeOffset, _, colID, typ, err := DecodeValueTag(b) +// _, _, _, typ, err := DecodeValueTag(b[typeOffset:]) +// // will return the same typ and err and -// DecodeFooValue(b) -// DecodeFooValue(b[typeOffset:]) +// +// DecodeFooValue(b) +// DecodeFooValue(b[typeOffset:]) +// // will return the same thing. PeekValueLength works as expected with either of // `b` or `b[typeOffset:]`. func DecodeValueTag(b []byte) (typeOffset int, dataOffset int, colID uint32, typ Type, err error) { diff --git a/postgres/parser/errorutil/unimplemented/unimplemented.go b/postgres/parser/errorutil/unimplemented/unimplemented.go index 121bea15a6..b221e7220a 100644 --- a/postgres/parser/errorutil/unimplemented/unimplemented.go +++ b/postgres/parser/errorutil/unimplemented/unimplemented.go @@ -71,7 +71,7 @@ func NewWithIssueDetailf(issue int, detail, format string, args ...interface{}) return unimplementedInternal(1 /*depth*/, issue, detail, true /*format*/, format, args...) } -//TODO: remove issue int +// TODO: remove issue int func unimplementedInternal(depth, issue int, detail string, format bool, msg string, args ...interface{}) error { // Create the issue link. link := errors.IssueLink{Detail: detail} diff --git a/postgres/parser/geo/geo.go b/postgres/parser/geo/geo.go index 04dca98dda..1098651a2f 100644 --- a/postgres/parser/geo/geo.go +++ b/postgres/parser/geo/geo.go @@ -624,7 +624,8 @@ func AdjustGeomTSRID(t geom.T, srid geopb.SRID) { // IsLinearRingCCW returns whether a given linear ring is counter clock wise. // See 2.07 of http://www.faqs.org/faqs/graphics/algorithms-faq/. // "Find the lowest vertex (or, if there is more than one vertex with the same lowest coordinate, -// the rightmost of those vertices) and then take the cross product of the edges fore and aft of it." +// +// the rightmost of those vertices) and then take the cross product of the edges fore and aft of it." func IsLinearRingCCW(linearRing *geom.LinearRing) bool { smallestIdx := 0 smallest := linearRing.Coord(0) diff --git a/postgres/parser/geo/geopb/geopb.proto b/postgres/parser/geo/geopb/geopb.proto index 33ae2c6b17..953fd7d005 100644 --- a/postgres/parser/geo/geopb/geopb.proto +++ b/postgres/parser/geo/geopb/geopb.proto @@ -60,9 +60,9 @@ message SpatialObject { // Type is the type of the SpatialObject. SpatialObjectType type = 1; // EWKB is the EWKB representation of the spatial object. - bytes ewkb = 2 [(gogoproto.customname)="EWKB",(gogoproto.casttype)="EWKB"]; + bytes ewkb = 2 [(gogoproto.customname) = "EWKB", (gogoproto.casttype) = "EWKB"]; // SRID is the denormalized SRID derived from the EWKB. - int32 srid = 3 [(gogoproto.customname)="SRID",(gogoproto.casttype)="SRID"]; + int32 srid = 3 [(gogoproto.customname) = "SRID", (gogoproto.casttype) = "SRID"]; // ShapeType is denormalized ShapeType derived from the EWKB. ShapeType shape_type = 4; // BoundingBox is the bounding box of the SpatialObject. diff --git a/postgres/parser/ipaddr/ipaddr.go b/postgres/parser/ipaddr/ipaddr.go index b27ee13bc1..f55ef91185 100644 --- a/postgres/parser/ipaddr/ipaddr.go +++ b/postgres/parser/ipaddr/ipaddr.go @@ -48,12 +48,12 @@ type Addr utils.Uint128 // IPAddr stores an IP address's family, IP, and host mask. This was chosen over // Go's "net" IP, as that struct doesn't work well for what we need to do. -// - It discards information when parsing IPv4, forcing it to be IPv6, and then -// assuming IPv4-mapped IPv6 addresses are purely IPv4 (only for printing). -// This is solved by having a Family field. -// - ParseIP and ParseCIDR are very strict, whereas postgres' INET and CIDR -// have very relaxed constraints for parsing an IP. -// - Doing int64 operations is much more efficient than byte slice operations. +// - It discards information when parsing IPv4, forcing it to be IPv6, and then +// assuming IPv4-mapped IPv6 addresses are purely IPv4 (only for printing). +// This is solved by having a Family field. +// - ParseIP and ParseCIDR are very strict, whereas postgres' INET and CIDR +// have very relaxed constraints for parsing an IP. +// - Doing int64 operations is much more efficient than byte slice operations. type IPAddr struct { // Family denotes what type of IP the original IP was. Family IPFamily diff --git a/postgres/parser/lex/all_keywords.go b/postgres/parser/lex/all_keywords.go index d3734fb1df..e60b4a223e 100644 --- a/postgres/parser/lex/all_keywords.go +++ b/postgres/parser/lex/all_keywords.go @@ -49,7 +49,7 @@ import ( "text/template" ) -//TODO: investigate if this file may be safely deleted (maybe generation script too?) +// TODO: investigate if this file may be safely deleted (maybe generation script too?) func main() { blockRE := regexp.MustCompile(`^.*_keyword:`) keywordRE := regexp.MustCompile(`[A-Z].*`) diff --git a/postgres/parser/pgdate/field_extract.go b/postgres/parser/pgdate/field_extract.go index 6b4bfa7f35..21ff0f3701 100644 --- a/postgres/parser/pgdate/field_extract.go +++ b/postgres/parser/pgdate/field_extract.go @@ -692,9 +692,12 @@ func (fe *fieldExtract) MakeTimeWithoutTimezone() time.Time { // stropTimezone converts the given time to a time that looks the same but is in // UTC, e.g. from -// 2020-06-26 01:02:03 +0200 CEST +// +// 2020-06-26 01:02:03 +0200 CEST +// // to -// 2020-06-27 01:02:03 +0000 UTC. +// +// 2020-06-27 01:02:03 +0000 UTC. // // Note that the two times don't represent the same time instant. func stripTimezone(t time.Time) time.Time { diff --git a/postgres/parser/pgdate/parsing.go b/postgres/parser/pgdate/parsing.go index 1b1d7f6daf..39561cc3ec 100644 --- a/postgres/parser/pgdate/parsing.go +++ b/postgres/parser/pgdate/parsing.go @@ -113,13 +113,12 @@ const ( // ParseDate converts a string into Date. // // Any specified timezone is inconsequential. Examples: -// - "now": parses to the local date (in the current timezone) -// - "2020-06-26 01:09:15.511971": parses to '2020-06-26' -// - "2020-06-26 01:09:15.511971-05": parses to '2020-06-26' +// - "now": parses to the local date (in the current timezone) +// - "2020-06-26 01:09:15.511971": parses to '2020-06-26' +// - "2020-06-26 01:09:15.511971-05": parses to '2020-06-26' // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). -// func ParseDate(now time.Time, mode ParseMode, s string) (_ Date, dependsOnContext bool, _ error) { fe := fieldExtract{ currentTime: now, @@ -174,8 +173,8 @@ func ParseTime( // location. // // Any specified timezone is inconsequential. Examples: -// - "now": parses to the local time of day (in the current timezone) -// - "01:09:15.511971" and "01:09:15.511971-05" parse to the same result +// - "now": parses to the local time of day (in the current timezone) +// - "01:09:15.511971" and "01:09:15.511971-05" parse to the same result // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). diff --git a/postgres/parser/pgerror/errors.proto b/postgres/parser/pgerror/errors.proto index 7c0e42d78e..f8ef0c5838 100644 --- a/postgres/parser/pgerror/errors.proto +++ b/postgres/parser/pgerror/errors.proto @@ -40,11 +40,11 @@ message Error { string severity = 8; message Source { - string file = 1; - int32 line = 2; - string function = 3; + string file = 1; + int32 line = 2; + string function = 3; } Source source = 5; - reserved 6,7; + reserved 6, 7; }; diff --git a/postgres/parser/pgerror/flatten.go b/postgres/parser/pgerror/flatten.go index 1ab282a338..e4dc919199 100644 --- a/postgres/parser/pgerror/flatten.go +++ b/postgres/parser/pgerror/flatten.go @@ -38,9 +38,9 @@ import ( // the name implies, the details from the chain of causes is projected // into a single struct. This is useful in at least two places: // -// - to generate Error objects suitable for 19.1 nodes, which -// only recognize this type of payload. -// - to generate an error packet on pgwire. +// - to generate Error objects suitable for 19.1 nodes, which +// only recognize this type of payload. +// - to generate an error packet on pgwire. // // Additionally, this can be used in the remainder of the code // base when an Error object is expected, until that code diff --git a/postgres/parser/pgerror/pgcode.go b/postgres/parser/pgerror/pgcode.go index 9e9d4eb618..539633a7a4 100644 --- a/postgres/parser/pgerror/pgcode.go +++ b/postgres/parser/pgerror/pgcode.go @@ -57,20 +57,24 @@ func HasCandidateCode(err error) bool { // - at each level: // // - if there is a candidate code at that level, that is used; +// // - otherwise, it calls computeDefaultCode(). // if the function returns an empty string, // UncategorizedError is used. // An example implementation for computeDefaultCode is provided below. // -// - after that, it combines the code computed already for the cause -// (inner) and the new code just computed at the current level (outer) -// as follows: +// - after that, it combines the code computed already for the cause +// (inner) and the new code just computed at the current level (outer) +// as follows: // // - if the outer code is uncategorized, the inner code is kept no // matter what. +// // - if the outer code has the special XX prefix, that is kept. // (The "XX" prefix signals importance in the pg code hierarchy.) +// // - if the inner code is not uncategorized, it is retained. +// // - otherwise the outer code is retained. // // This function should not be used directly. It is only exported diff --git a/postgres/parser/pretty/document.go b/postgres/parser/pretty/document.go index 37329eda2b..b9047ae405 100644 --- a/postgres/parser/pretty/document.go +++ b/postgres/parser/pretty/document.go @@ -41,7 +41,6 @@ // // For example code with SQL to experiment further, refer to // https://github.com/knz/prettier/ -// package pretty import "fmt" @@ -96,8 +95,9 @@ var Line Doc = line{} // // For example, text "hello" <> softbreak <> text "world" // flattens to "helloworld" (one word) but splits across lines as: -// hello -// world +// +// hello +// world // // This is a common extension to Wadler's printer. // diff --git a/postgres/parser/pretty/util.go b/postgres/parser/pretty/util.go index 4e8c8ba541..fbf72bb813 100644 --- a/postgres/parser/pretty/util.go +++ b/postgres/parser/pretty/util.go @@ -52,9 +52,12 @@ func JoinDoc(s Doc, d ...Doc) Doc { // For example: // aaaa // bbb -// bbb +// +// bbb +// // ccc -// ccc +// +// ccc func JoinNestedRight(sep Doc, nested ...Doc) Doc { switch len(nested) { case 0: @@ -229,16 +232,16 @@ const ( ) // Table defines a document that formats a list of pairs of items either: -// - as a 2-column table, with the two columns aligned for example: -// SELECT aaa -// bbb -// FROM ccc -// - as sections, for example: -// SELECT -// aaa -// bbb -// FROM -// ccc +// - as a 2-column table, with the two columns aligned for example: +// SELECT aaa +// bbb +// FROM ccc +// - as sections, for example: +// SELECT +// aaa +// bbb +// FROM +// ccc // // We restrict the left value in each list item to be a one-line string // to make the width computation efficient. diff --git a/postgres/parser/sem/tree/alter_table.go b/postgres/parser/sem/tree/alter_table.go index d3550f1cfa..7f54d02e7f 100644 --- a/postgres/parser/sem/tree/alter_table.go +++ b/postgres/parser/sem/tree/alter_table.go @@ -123,11 +123,11 @@ func (node *AlterTableAddColumn) Format(ctx *FmtCtx) { // stored in node.Cmds, into top-level commands to add those constraints. // Currently, this only applies to checks. For example, the ADD COLUMN in // -// ALTER TABLE t ADD COLUMN a INT CHECK (a < 1) +// ALTER TABLE t ADD COLUMN a INT CHECK (a < 1) // // is transformed into two commands, as in // -// ALTER TABLE t ADD COLUMN a INT, ADD CONSTRAINT check_a CHECK (a < 1) +// ALTER TABLE t ADD COLUMN a INT, ADD CONSTRAINT check_a CHECK (a < 1) // // (with an auto-generated name). // @@ -137,8 +137,7 @@ func (node *AlterTableAddColumn) Format(ctx *FmtCtx) { // constraints. For example, the following statement is accepted in // CockroachDB and Postgres, but not necessarily other SQL databases: // -// ALTER TABLE t ADD COLUMN a INT CHECK (a < b) -// +// ALTER TABLE t ADD COLUMN a INT CHECK (a < b) func (node *AlterTable) HoistAddColumnConstraints() { var normalizedCmds AlterTableCmds diff --git a/postgres/parser/sem/tree/casts.go b/postgres/parser/sem/tree/casts.go index dbda06bb30..4797b00334 100644 --- a/postgres/parser/sem/tree/casts.go +++ b/postgres/parser/sem/tree/casts.go @@ -44,17 +44,19 @@ type castInfo struct { // // Each cast defines a volatility: // -// - immutable casts yield the same result on the same arguments in whatever -// context they are evaluated. +// - immutable casts yield the same result on the same arguments in whatever +// context they are evaluated. // -// - stable casts can yield a different result depending on the evaluation context: -// - session settings (e.g. bytes encoding format) -// - current timezone -// - current time (e.g. 'now'::string). +// - stable casts can yield a different result depending on the evaluation context: +// +// - session settings (e.g. bytes encoding format) +// +// - current timezone +// +// - current time (e.g. 'now'::string). // // TODO(radu): move the PerformCast code for each cast into functions defined // within each cast. -// var validCasts = []castInfo{ // Casts to BitFamily. {from: types.UnknownFamily, to: types.BitFamily, volatility: VolatilityImmutable}, diff --git a/postgres/parser/sem/tree/constant.go b/postgres/parser/sem/tree/constant.go index 30816e37d2..568e96edac 100644 --- a/postgres/parser/sem/tree/constant.go +++ b/postgres/parser/sem/tree/constant.go @@ -205,20 +205,22 @@ func (expr *NumVal) FormattedString() string { } // canBeInt64 checks if it's possible for the value to become an int64: -// 1 = yes -// 1.0 = yes -// 1.1 = no -// 123...overflow...456 = no +// +// 1 = yes +// 1.0 = yes +// 1.1 = no +// 123...overflow...456 = no func (expr *NumVal) canBeInt64() bool { _, err := expr.AsInt64() return err == nil } // ShouldBeInt64 checks if the value naturally is an int64: -// 1 = yes -// 1.0 = no -// 1.1 = no -// 123...overflow...456 = no +// +// 1 = yes +// 1.0 = no +// 1.1 = no +// 123...overflow...456 = no func (expr *NumVal) ShouldBeInt64() bool { return expr.Kind() == constant.Int && expr.canBeInt64() } @@ -538,12 +540,13 @@ var ( // respective datum types could succeed. The hope was to eliminate impossibilities // and constrain the returned type sets as much as possible. Unfortunately, two issues // were found with this approach: -// - date and timestamp formats do not always imply a fixed-length valid input. For -// instance, timestamp formats that take fractional seconds can successfully parse -// inputs of varied length. -// - the set of date and timestamp formats are not disjoint, which means that ambiguity -// can not be eliminated when inferring the type of string literals that use these -// shared formats. +// - date and timestamp formats do not always imply a fixed-length valid input. For +// instance, timestamp formats that take fractional seconds can successfully parse +// inputs of varied length. +// - the set of date and timestamp formats are not disjoint, which means that ambiguity +// can not be eliminated when inferring the type of string literals that use these +// shared formats. +// // While these limitations still permitted improved type inference in many cases, they // resulted in behavior that was ultimately incomplete, resulted in unpredictable levels // of inference, and occasionally failed to eliminate ambiguity. Further heuristics could diff --git a/postgres/parser/sem/tree/create.go b/postgres/parser/sem/tree/create.go index cc290a06f0..ae0222c875 100644 --- a/postgres/parser/sem/tree/create.go +++ b/postgres/parser/sem/tree/create.go @@ -1171,19 +1171,19 @@ func (node *CreateTable) FormatBody(ctx *FmtCtx) { // inline with their columns and makes them table-level constraints, stored in // n.Defs. For example, the foreign key constraint in // -// CREATE TABLE foo (a INT REFERENCES bar(a)) +// CREATE TABLE foo (a INT REFERENCES bar(a)) // // gets pulled into a top-level constraint like: // -// CREATE TABLE foo (a INT, FOREIGN KEY (a) REFERENCES bar(a)) +// CREATE TABLE foo (a INT, FOREIGN KEY (a) REFERENCES bar(a)) // // Similarly, the CHECK constraint in // -// CREATE TABLE foo (a INT CHECK (a < 1), b INT) +// CREATE TABLE foo (a INT CHECK (a < 1), b INT) // // gets pulled into a top-level constraint like: // -// CREATE TABLE foo (a INT, b INT, CHECK (a < 1)) +// CREATE TABLE foo (a INT, b INT, CHECK (a < 1)) // // Note that some SQL databases require that a constraint attached to a column // to refer only to the column it is attached to. We follow Postgres' behavior, @@ -1191,10 +1191,9 @@ func (node *CreateTable) FormatBody(ctx *FmtCtx) { // constraints. For example, the following table definition is accepted in // CockroachDB and Postgres, but not necessarily other SQL databases: // -// CREATE TABLE foo (a INT CHECK (a < b), b INT) +// CREATE TABLE foo (a INT CHECK (a < b), b INT) // // Unique constraints are not hoisted. -// func (node *CreateTable) HoistConstraints() { for _, d := range node.Defs { if col, ok := d.(*ColumnTableDef); ok { diff --git a/postgres/parser/sem/tree/datum.go b/postgres/parser/sem/tree/datum.go index 5f40be8061..11b2ec9df8 100644 --- a/postgres/parser/sem/tree/datum.go +++ b/postgres/parser/sem/tree/datum.go @@ -1583,9 +1583,10 @@ func (d *DTuple) Size() uintptr { // ContainsNull returns true if the tuple contains NULL, possibly nested inside // other tuples. For example, all the following tuples contain NULL: -// (1, 2, NULL) -// ((1, 1), (2, NULL)) -// (((1, 1), (2, 2)), ((3, 3), (4, NULL))) +// +// (1, 2, NULL) +// ((1, 1), (2, NULL)) +// (((1, 1), (2, 2)), ((3, 3), (4, NULL))) func (d *DTuple) ContainsNull() bool { for _, r := range d.D { if r == DNull { @@ -2029,14 +2030,13 @@ func (d *DOid) Size() uintptr { return unsafe.Sizeof(*d) } // // Instead, DOidWrapper allows a standard Datum to be wrapped with a new Oid. // This approach provides two major advantages: -// - performance of the existing Datum types are not affected because they -// do not need to have custom oid.Oids added to their structure. -// - the introduction of new Datum aliases is straightforward and does not require -// additions to typing rules or type-dependent evaluation behavior. +// - performance of the existing Datum types are not affected because they +// do not need to have custom oid.Oids added to their structure. +// - the introduction of new Datum aliases is straightforward and does not require +// additions to typing rules or type-dependent evaluation behavior. // // Types that currently benefit from DOidWrapper are: // - DName => DOidWrapper(*DString, oid.T_name) -// type DOidWrapper struct { Wrapped Datum Oid oid.Oid diff --git a/postgres/parser/sem/tree/expr.go b/postgres/parser/sem/tree/expr.go index 41e1bc6c8b..c026f703e5 100644 --- a/postgres/parser/sem/tree/expr.go +++ b/postgres/parser/sem/tree/expr.go @@ -346,8 +346,10 @@ func (node *ParenExpr) TypedInnerExpr() TypedExpr { // StripParens strips any parentheses surrounding an expression and // returns the inner expression. For instance: -// 1 -> 1 -// (1) -> 1 +// +// 1 -> 1 +// (1) -> 1 +// // ((1)) -> 1 func StripParens(expr Expr) Expr { if p, ok := expr.(*ParenExpr); ok { diff --git a/postgres/parser/sem/tree/format.go b/postgres/parser/sem/tree/format.go index 3b29ec5717..5dcb5466a4 100644 --- a/postgres/parser/sem/tree/format.go +++ b/postgres/parser/sem/tree/format.go @@ -66,15 +66,15 @@ const ( // string will be escaped and enclosed in e'...' regardless of // whether FmtBareStrings is specified. See FmtRawStrings below for // an alternative. - FmtBareStrings FmtFlags = FmtFlags(lex.EncBareStrings) + FmtBareStrings = FmtFlags(lex.EncBareStrings) // FmtBareIdentifiers instructs the pretty-printer to print // identifiers without wrapping quotes in any case. - FmtBareIdentifiers FmtFlags = FmtFlags(lex.EncBareIdentifiers) + FmtBareIdentifiers = FmtFlags(lex.EncBareIdentifiers) // FmtShowPasswords instructs the pretty-printer to not suppress passwords. // If not set, passwords are replaced by *****. - FmtShowPasswords FmtFlags = FmtFlags(lex.EncFirstFreeFlagBit) << iota + FmtShowPasswords = FmtFlags(lex.EncFirstFreeFlagBit) << iota // FmtShowTypes instructs the pretty-printer to // annotate expressions with their resolved types. @@ -160,21 +160,21 @@ const ( // FmtPgwireText instructs the pretty-printer to use // a pg-compatible conversion to strings. See comments // in pgwire_encode.go. - FmtPgwireText FmtFlags = fmtPgwireFormat | FmtFlags(lex.EncBareStrings) + FmtPgwireText = fmtPgwireFormat | FmtFlags(lex.EncBareStrings) // FmtParsable instructs the pretty-printer to produce a representation that // can be parsed into an equivalent expression. If there is a chance that the // formatted data will be stored durably on disk or sent to other nodes, // then this formatting directive is not appropriate, and FmtSerializable // should be used instead. - FmtParsable FmtFlags = fmtDisambiguateDatumTypes | FmtParsableNumerics + FmtParsable = fmtDisambiguateDatumTypes | FmtParsableNumerics // FmtSerializable instructs the pretty-printer to produce a representation // for expressions that can be serialized to disk. It serializes user defined // types using representations that are stable across changes of the type // itself. This should be used when serializing expressions that will be // stored on disk, like DEFAULT expressions of columns. - FmtSerializable FmtFlags = FmtParsable | fmtStaticallyFormatUserDefinedTypes + FmtSerializable = FmtParsable | fmtStaticallyFormatUserDefinedTypes // FmtCheckEquivalence instructs the pretty-printer to produce a representation // that can be used to check equivalence of expressions. Specifically: @@ -187,7 +187,7 @@ const ( // - user defined types and datums of user defined types are formatted // using static representations to avoid name resolution and invalidation // due to changes in the underlying type. - FmtCheckEquivalence FmtFlags = fmtSymbolicVars | + FmtCheckEquivalence = fmtSymbolicVars | fmtDisambiguateDatumTypes | FmtParsableNumerics | fmtStaticallyFormatUserDefinedTypes @@ -196,7 +196,7 @@ const ( // for the output of array_to_string(). This de-quotes // the strings enclosed in the array and skips the normal escaping // of strings. Special characters are hex-escaped. - FmtArrayToString FmtFlags = FmtBareStrings | fmtRawStrings + FmtArrayToString = FmtBareStrings | fmtRawStrings // FmtExport, if set, formats datums in a raw form suitable for // EXPORT, e.g. suitable for output into a CSV file. The intended @@ -214,10 +214,10 @@ const ( // // TODO(mjibson): Note that this is currently not suitable for // emitting arrays or tuples. See: #33429 - FmtExport FmtFlags = FmtBareStrings | fmtRawStrings + FmtExport = FmtBareStrings | fmtRawStrings ) -const flagsRequiringAnnotations FmtFlags = FmtAlwaysQualifyTableNames +const flagsRequiringAnnotations = FmtAlwaysQualifyTableNames // NoCopy may be embedded into structs which must not be copied // after the first use. diff --git a/postgres/parser/sem/tree/hide_constants.go b/postgres/parser/sem/tree/hide_constants.go index 8f6987c6c0..d48be8281b 100644 --- a/postgres/parser/sem/tree/hide_constants.go +++ b/postgres/parser/sem/tree/hide_constants.go @@ -104,12 +104,13 @@ func (node *Exprs) formatHideConstants(ctx *FmtCtx) { // placeholders and longer than 1 element as a tuple of its first // two elements, scrubbed. // e.g. (1) -> (_) -// (1, 2) -> (_, _) -// (1, 2, 3) -> (_, _, __more3__) -// ROW() -> ROW() -// ROW($1, $2, $3) -> ROW($1, $2, __more3__) -// (1+2, 2+3, 3+4) -> (_ + _, _ + _, _ + _) -// (1+2, b, c) -> (_ + _, b, c) +// +// (1, 2) -> (_, _) +// (1, 2, 3) -> (_, _, __more3__) +// ROW() -> ROW() +// ROW($1, $2, $3) -> ROW($1, $2, __more3__) +// (1+2, 2+3, 3+4) -> (_ + _, _ + _, _ + _) +// (1+2, b, c) -> (_ + _, b, c) func (node *Tuple) formatHideConstants(ctx *FmtCtx) { if len(node.Exprs) < 2 { node.Format(ctx) @@ -146,9 +147,10 @@ func (node *Tuple) formatHideConstants(ctx *FmtCtx) { // literals or placeholders and longer than 1 element as an array // expression of its first two elements, scrubbed. // e.g. array[1] -> array[_] -// array[1, 2] -> array[_, _] -// array[1, 2, 3] -> array[_, _, __more3__] -// array[1+2, 2+3, 3+4] -> array[_ + _, _ + _, _ + _] +// +// array[1, 2] -> array[_, _] +// array[1, 2, 3] -> array[_, _, __more3__] +// array[1+2, 2+3, 3+4] -> array[_ + _, _ + _, _ + _] func (node *Array) formatHideConstants(ctx *FmtCtx) { if len(node.Exprs) < 2 { node.Format(ctx) diff --git a/postgres/parser/sem/tree/interval.go b/postgres/parser/sem/tree/interval.go index 54a92bc5d9..c7ab11b7ae 100644 --- a/postgres/parser/sem/tree/interval.go +++ b/postgres/parser/sem/tree/interval.go @@ -198,8 +198,8 @@ func newInvalidSQLDurationError(s string) error { // Parses a SQL standard interval string. // See the following links for examples: -// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES -// - http://www.ibm.com/support/knowledgecenter/SSGU8G_12.1.0/com.ibm.esqlc.doc/ids_esqlc_0190.htm +// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES +// - http://www.ibm.com/support/knowledgecenter/SSGU8G_12.1.0/com.ibm.esqlc.doc/ids_esqlc_0190.htm func sqlStdToDuration(s string, itm types.IntervalTypeMetadata) (duration.Duration, error) { var d duration.Duration parts := strings.Fields(s) @@ -424,9 +424,9 @@ func sqlStdToDuration(s string, itm types.IntervalTypeMetadata) (duration.Durati // Parses an ISO8601 (with designators) string. // See the following links for examples: -// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES -// - https://en.wikipedia.org/wiki/ISO_8601#Time_intervals -// - https://en.wikipedia.org/wiki/ISO_8601#Durations +// - http://www.postgresql.org/docs/9.1/static/datatype-datetime.html#DATATYPE-INTERVAL-INPUT-EXAMPLES +// - https://en.wikipedia.org/wiki/ISO_8601#Time_intervals +// - https://en.wikipedia.org/wiki/ISO_8601#Durations func iso8601ToDuration(s string) (duration.Duration, error) { var d duration.Duration if len(s) == 0 || s[0] != 'P' { diff --git a/postgres/parser/sem/tree/name_part.go b/postgres/parser/sem/tree/name_part.go index 882e3108a1..079dc8d2ac 100644 --- a/postgres/parser/sem/tree/name_part.go +++ b/postgres/parser/sem/tree/name_part.go @@ -68,13 +68,13 @@ func NameString(s string) string { // identifier suitable for printing in error messages, avoiding a heap // allocation. func ErrNameStringP(s *string) string { - return ErrString(((*Name)(s))) + return ErrString((*Name)(s)) } // ErrNameString escapes an identifier stored a string to a SQL // identifier suitable for printing in error messages. func ErrNameString(s string) string { - return ErrString(((*Name)(&s))) + return ErrString((*Name)(&s)) } // Normalize normalizes to lowercase and Unicode Normalization Form C diff --git a/postgres/parser/sem/tree/name_resolution.go b/postgres/parser/sem/tree/name_resolution.go index 6c1cd85028..a144c582fc 100644 --- a/postgres/parser/sem/tree/name_resolution.go +++ b/postgres/parser/sem/tree/name_resolution.go @@ -129,7 +129,7 @@ const ( // database/catalog. PublicSchema string = sessiondata.PublicSchemaName // PublicSchemaName is the same, typed as Name. - PublicSchemaName Name = Name(PublicSchema) + PublicSchemaName = Name(PublicSchema) ) // NumResolutionResults represents the number of results in the lookup diff --git a/postgres/parser/sem/tree/overload.go b/postgres/parser/sem/tree/overload.go index d566e3cf4f..98512fa417 100644 --- a/postgres/parser/sem/tree/overload.go +++ b/postgres/parser/sem/tree/overload.go @@ -430,9 +430,11 @@ type typeCheckOverloadState struct { // expression parameters, along with an optional desired return type. It returns the expression // parameters after being type checked, along with a slice of candidate overloadImpls. The // slice may have length: -// 0: overload resolution failed because no compatible overloads were found -// 1: overload resolution succeeded -// 2+: overload resolution failed because of ambiguity +// +// 0: overload resolution failed because no compatible overloads were found +// 1: overload resolution succeeded +// 2+: overload resolution failed because of ambiguity +// // The inBinOp parameter denotes whether this type check is occurring within a binary operator, // in which case we may need to make a guess that the two parameters are of the same type if one // of them is NULL. diff --git a/postgres/parser/sem/tree/placeholders.go b/postgres/parser/sem/tree/placeholders.go index e740f41d02..cbcb29d67a 100644 --- a/postgres/parser/sem/tree/placeholders.go +++ b/postgres/parser/sem/tree/placeholders.go @@ -120,7 +120,7 @@ func (p *PlaceholderTypesInfo) Type(idx PlaceholderIdx) (_ *types.T, ok bool) { if t == nil && len(p.TypeHints) >= int(idx) { t = p.TypeHints[idx] } - return t, (t != nil) + return t, t != nil } // ValueType returns the type of the value that must be supplied for a placeholder. @@ -135,7 +135,7 @@ func (p *PlaceholderTypesInfo) ValueType(idx PlaceholderIdx) (_ *types.T, ok boo if t == nil { t = p.Types[idx] } - return t, (t != nil) + return t, t != nil } // SetType assigns a known type to a placeholder. diff --git a/postgres/parser/sem/tree/pretty.go b/postgres/parser/sem/tree/pretty.go index 8d570f65fc..dca4bc7151 100644 --- a/postgres/parser/sem/tree/pretty.go +++ b/postgres/parser/sem/tree/pretty.go @@ -405,25 +405,24 @@ func (node *Exprs) doc(p *PrettyCfg) pretty.Doc { // peelBinaryOperand conditionally (p.Simplify) removes the // parentheses around an expression. The parentheses are always // removed in the following conditions: -// - if the operand is a unary operator (these are always -// of higher precedence): "(-a) * b" -> "-a * b" -// - if the operand is a binary operator and its precedence -// is guaranteed to be higher: "(a * b) + c" -> "a * b + c" +// - if the operand is a unary operator (these are always +// of higher precedence): "(-a) * b" -> "-a * b" +// - if the operand is a binary operator and its precedence +// is guaranteed to be higher: "(a * b) + c" -> "a * b + c" // // Additionally, iff sameLevel is set, then parentheses are removed // around any binary operator that has the same precedence level as // the parent. // sameLevel can be set: // -// - for the left operand of all binary expressions, because -// (in pg SQL) all binary expressions are left-associative. -// This rewrites e.g. "(a + b) - c" -> "a + b - c" -// and "(a - b) + c" -> "a - b + c" -// - for the right operand when the parent operator is known -// to be fully associative, e.g. -// "a + (b - c)" -> "a + b - c" because "+" is fully assoc, -// but "a - (b + c)" cannot be simplified because "-" is not fully associative. -// +// - for the left operand of all binary expressions, because +// (in pg SQL) all binary expressions are left-associative. +// This rewrites e.g. "(a + b) - c" -> "a + b - c" +// and "(a - b) + c" -> "a - b + c" +// - for the right operand when the parent operator is known +// to be fully associative, e.g. +// "a + (b - c)" -> "a + b - c" because "+" is fully assoc, +// but "a - (b + c)" cannot be simplified because "-" is not fully associative. func (p *PrettyCfg) peelBinaryOperand(e Expr, sameLevel bool, parenPrio int) Expr { if !p.Simplify { return e diff --git a/postgres/parser/sem/tree/select.go b/postgres/parser/sem/tree/select.go index 30c8ed71d5..7d130cf1cf 100644 --- a/postgres/parser/sem/tree/select.go +++ b/postgres/parser/sem/tree/select.go @@ -278,10 +278,11 @@ type IndexID uint32 // IndexFlags represents "@" or "@{param[,param]}" where // param is one of: -// - FORCE_INDEX= -// - ASC / DESC -// - NO_INDEX_JOIN -// - IGNORE_FOREIGN_KEYS +// - FORCE_INDEX= +// - ASC / DESC +// - NO_INDEX_JOIN +// - IGNORE_FOREIGN_KEYS +// // It is used optionally after a table name in SELECT statements. type IndexFlags struct { Index UnrestrictedName @@ -338,8 +339,8 @@ func (ih *IndexFlags) CombineWith(other *IndexFlags) error { } // Check verifies if the flags are valid: -// - ascending/descending is not specified without an index; -// - no_index_join isn't specified with an index. +// - ascending/descending is not specified without an index; +// - no_index_join isn't specified with an index. func (ih *IndexFlags) Check() error { if ih.NoIndexJoin && ih.ForceIndex() { return errors.New("FORCE_INDEX cannot be specified in conjunction with NO_INDEX_JOIN") diff --git a/postgres/parser/sem/tree/table_name.go b/postgres/parser/sem/tree/table_name.go index 09e765d68a..08687ff98c 100644 --- a/postgres/parser/sem/tree/table_name.go +++ b/postgres/parser/sem/tree/table_name.go @@ -170,18 +170,18 @@ func (ts *TableNames) String() string { return AsString(ts) } // TableIndexName refers to a table index. There are a few cases: // -// - if both the table name and the index name are set, refers to a specific -// index in a specific table. +// - if both the table name and the index name are set, refers to a specific +// index in a specific table. // -// - if the table name is set and index name is empty, refers to the primary -// index of that table. +// - if the table name is set and index name is empty, refers to the primary +// index of that table. // -// - if the table name is empty and the index name is set, refers to an index -// of that name among all tables within a catalog/schema; if there is a -// duplicate name, that will result in an error. Note that it is possible to -// specify the schema or catalog without specifying a table name; in this -// case, Table.ObjectNamePrefix has the fields set but Table.ObjectName is -// empty. +// - if the table name is empty and the index name is set, refers to an index +// of that name among all tables within a catalog/schema; if there is a +// duplicate name, that will result in an error. Note that it is possible to +// specify the schema or catalog without specifying a table name; in this +// case, Table.ObjectNamePrefix has the fields set but Table.ObjectName is +// empty. type TableIndexName struct { Table TableName Index UnrestrictedName diff --git a/postgres/parser/sem/tree/type_check.go b/postgres/parser/sem/tree/type_check.go index d827243cca..eaeb9547c6 100644 --- a/postgres/parser/sem/tree/type_check.go +++ b/postgres/parser/sem/tree/type_check.go @@ -2532,17 +2532,17 @@ func (*placeholderAnnotationVisitor) VisitPost(expr Expr) Expr { return expr } // provided Statement, annotating all placeholders with a type in either of the following // situations: // -// - the placeholder is the subject of an explicit type annotation in at least one -// of its occurrences. If it is subject to multiple explicit type annotations -// where the types are not all in agreement, or if the placeholder already has -// a type hint in the placeholder map which conflicts with the explicit type -// annotation type, an error will be thrown. +// - the placeholder is the subject of an explicit type annotation in at least one +// of its occurrences. If it is subject to multiple explicit type annotations +// where the types are not all in agreement, or if the placeholder already has +// a type hint in the placeholder map which conflicts with the explicit type +// annotation type, an error will be thrown. // -// - the placeholder is the subject to a cast of the same type in all -// occurrences of the placeholder. If the placeholder is subject to casts of -// multiple types, or if it has occurrences without a cast, no error will be -// thrown but the type will not be inferred. If the placeholder already has a -// type hint, that type will be kept regardless of any casts. +// - the placeholder is the subject to a cast of the same type in all +// occurrences of the placeholder. If the placeholder is subject to casts of +// multiple types, or if it has occurrences without a cast, no error will be +// thrown but the type will not be inferred. If the placeholder already has a +// type hint, that type will be kept regardless of any casts. // // See docs/RFCS/20160203_typing.md for more details on placeholder typing (in // particular section "First pass: placeholder annotations"). diff --git a/postgres/parser/sem/tree/var_name.go b/postgres/parser/sem/tree/var_name.go index d31d26bc6f..47a783211f 100644 --- a/postgres/parser/sem/tree/var_name.go +++ b/postgres/parser/sem/tree/var_name.go @@ -34,9 +34,9 @@ import ( // // Immediately after parsing, the following types can occur: // -// - UnqualifiedStar: a naked star as argument to a function, e.g. count(*), -// or at the top level of a SELECT clause. -// See also uses of StarExpr() and StarSelectExpr() in the grammar. +// - UnqualifiedStar: a naked star as argument to a function, e.g. count(*), +// or at the top level of a SELECT clause. +// See also uses of StarExpr() and StarSelectExpr() in the grammar. // // - UnresolvedName: other names of the form `a.b....e` or `a.b...e.*`. // diff --git a/postgres/parser/sem/tree/walk.go b/postgres/parser/sem/tree/walk.go index c8f93ccd30..61f72d44ac 100644 --- a/postgres/parser/sem/tree/walk.go +++ b/postgres/parser/sem/tree/walk.go @@ -733,7 +733,7 @@ func WalkExpr(v Visitor, expr Expr) (newExpr Expr, changed bool) { } // We cannot use == because some Expr implementations are not comparable (e.g. DTuple) - return newExpr, (reflect.ValueOf(expr) != reflect.ValueOf(newExpr)) + return newExpr, reflect.ValueOf(expr) != reflect.ValueOf(newExpr) } // WalkExprConst is a variant of WalkExpr for visitors that do not modify the expression. @@ -765,7 +765,7 @@ func walkReturningClause(v Visitor, clause ReturningClause) (ReturningClause, bo (*ret)[i].Expr = e } } - return ret, (ret != t) + return ret, ret != t case *ReturningNothing, *NoReturningClause: return t, false default: @@ -1463,7 +1463,7 @@ func walkStmt(v Visitor, stmt Statement) (newStmt Statement, changed bool) { return stmt, false } newStmt = walkable.walkStmt(v) - return newStmt, (stmt != newStmt) + return newStmt, stmt != newStmt } type simpleVisitor struct { diff --git a/postgres/parser/timetz/timetz.go b/postgres/parser/timetz/timetz.go index 4625832d83..baed279f46 100644 --- a/postgres/parser/timetz/timetz.go +++ b/postgres/parser/timetz/timetz.go @@ -117,7 +117,6 @@ func Now() TimeTZ { // // The dependsOnContext return value indicates if we had to consult the given // `now` value (either for the time or the local timezone). -// func ParseTimeTZ( now time.Time, s string, precision time.Duration, ) (_ TimeTZ, dependsOnContext bool, _ error) { diff --git a/postgres/parser/types/types.go b/postgres/parser/types/types.go index f6ebd3c3ff..125c752ed5 100644 --- a/postgres/parser/types/types.go +++ b/postgres/parser/types/types.go @@ -48,26 +48,26 @@ import ( // nullable and non-nullable types. It is up to the caller to store that // information separately if it is needed. Here are some example types: // -// INT4 - any 32-bit integer -// DECIMAL(10, 3) - any base-10 value with at most 10 digits, with -// up to 3 to right of decimal point -// FLOAT[] - array of 64-bit IEEE 754 floating-point values -// TUPLE[TIME, VARCHAR(20)] - any pair of values where first value is a time -// of day and the second value is a string having -// up to 20 characters +// INT4 - any 32-bit integer +// DECIMAL(10, 3) - any base-10 value with at most 10 digits, with +// up to 3 to right of decimal point +// FLOAT[] - array of 64-bit IEEE 754 floating-point values +// TUPLE[TIME, VARCHAR(20)] - any pair of values where first value is a time +// of day and the second value is a string having +// up to 20 characters // // Fundamentally, a type consists of the following attributes, each of which has // a corresponding accessor method. Some of these attributes are only defined // for a subset of types. See the method comments for more details. // -// Family - equivalence group of the type (enumeration) -// Oid - Postgres Object ID that describes the type (enumeration) -// Precision - maximum accuracy of the type (numeric) -// Width - maximum size or scale of the type (numeric) -// Locale - location which governs sorting, formatting, etc. (string) -// ArrayContents - array element type (T) -// TupleContents - slice of types of each tuple field ([]*T) -// TupleLabels - slice of labels of each tuple field ([]string) +// Family - equivalence group of the type (enumeration) +// Oid - Postgres Object ID that describes the type (enumeration) +// Precision - maximum accuracy of the type (numeric) +// Width - maximum size or scale of the type (numeric) +// Locale - location which governs sorting, formatting, etc. (string) +// ArrayContents - array element type (T) +// TupleContents - slice of types of each tuple field ([]*T) +// TupleLabels - slice of labels of each tuple field ([]string) // // Some types are not currently allowed as the type of a column (e.g. nested // arrays). Other usages of the types package may have similar restrictions. @@ -798,9 +798,8 @@ func MakeQChar(width int32) *T { // that is collated according to the given locale. The new type is based upon // the given string type, having the same oid and width values. For example: // -// STRING => STRING COLLATE EN -// VARCHAR(20) => VARCHAR(20) COLLATE EN -// +// STRING => STRING COLLATE EN +// VARCHAR(20) => VARCHAR(20) COLLATE EN func MakeCollatedString(strType *T, locale string) *T { switch strType.Oid() { case oid.T_text, oid.T_varchar, oid.T_bpchar, oid.T_char, oid.T_name: @@ -1118,12 +1117,12 @@ func (t *T) Locale() string { // Width is the size or scale of the type, such as number of bits or characters. // -// INT : # of bits (64, 32, 16) -// FLOAT : # of bits (64, 32) -// DECIMAL : max # of digits after decimal point (must be <= Precision) -// STRING : max # of characters -// COLLATEDSTRING: max # of characters -// BIT : max # of bits +// INT : # of bits (64, 32, 16) +// FLOAT : # of bits (64, 32) +// DECIMAL : max # of digits after decimal point (must be <= Precision) +// STRING : max # of characters +// COLLATEDSTRING: max # of characters +// BIT : max # of bits // // Width is always 0 for other types. func (t *T) Width() int32 { @@ -1132,12 +1131,12 @@ func (t *T) Width() int32 { // Precision is the accuracy of the data type. // -// DECIMAL : max # digits (must be >= Width/Scale) -// INTERVAL : max # fractional second digits -// TIME : max # fractional second digits -// TIMETZ : max # fractional second digits -// TIMESTAMP : max # fractional second digits -// TIMESTAMPTZ: max # fractional second digits +// DECIMAL : max # digits (must be >= Width/Scale) +// INTERVAL : max # fractional second digits +// TIME : max # fractional second digits +// TIMETZ : max # fractional second digits +// TIMESTAMP : max # fractional second digits +// TIMESTAMPTZ: max # fractional second digits // // Precision for time-related families has special rules for 0 -- see // `precision_is_set` on the `InternalType` proto. @@ -1375,13 +1374,12 @@ func (t *T) Name() string { // than the native CRDB name for it (i.e. the Name function). It is used when // compatibility with PG is important. Examples of differences: // -// Name() PGName() -// -------------------------- -// char bpchar -// "char" char -// bytes bytea -// int4[] _int4 -// +// Name() PGName() +// -------------------------- +// char bpchar +// "char" char +// bytes bytea +// int4[] _int4 func (t *T) PGName() string { name, ok := oidext.TypeName(t.Oid()) if ok { @@ -1405,8 +1403,7 @@ func (t *T) PGName() string { // standard (or by Postgres for any non-standard types). This can be looked up // for any type in Postgres using a query similar to this: // -// SELECT format_type(pg_typeof(1::int)::regtype, NULL) -// +// SELECT format_type(pg_typeof(1::int)::regtype, NULL) func (t *T) SQLStandardName() string { return t.SQLStandardNameWithTypmod(false, 0) } @@ -1422,7 +1419,7 @@ func (t *T) TelemetryName() string { // typmod argument, and a boolean which indicates whether or not a typmod was // even specified. The expected results of this function should be, in Postgres: // -// SELECT format_type('thetype'::regype, typmod) +// SELECT format_type('thetype'::regype, typmod) // // Generally, what this does with a non-0 typmod is append the scale, precision // or length of a datatype to the name of the datatype. For example, a @@ -1927,8 +1924,8 @@ func (t *InternalType) Identical(other *InternalType) bool { // protobuf serialization rules. It is backwards-compatible with formats used // by older versions of CRDB. // -// var t T -// err := protoutil.Unmarshal(data, &t) +// var t T +// err := protoutil.Unmarshal(data, &t) // // Unmarshal is part of the protoutil.Message interface. func (t *T) Unmarshal(data []byte) error { @@ -2141,8 +2138,7 @@ func (t *T) upgradeType() error { // version of CRDB so that clusters can run in mixed version mode during // upgrade. // -// bytes, err := protoutil.Marshal(&typ) -// +// bytes, err := protoutil.Marshal(&typ) func (t *T) Marshal() (data []byte, err error) { // First downgrade to a struct that will be serialized in a backwards- // compatible bytes format. @@ -2458,9 +2454,8 @@ func IsWildcardTupleType(t *T) bool { // or []COLLATEDSTRING type. This is tricky in the case of an array of collated // string, since brackets must precede the COLLATE identifier: // -// STRING COLLATE EN -// VARCHAR(20)[] COLLATE DE -// +// STRING COLLATE EN +// VARCHAR(20)[] COLLATE DE func (t *T) collatedStringTypeSQL(isArray bool) string { var buf bytes.Buffer buf.WriteString(t.stringTypeSQL()) @@ -2534,9 +2529,9 @@ func init() { // TypeForNonKeywordTypeName returns the column type for the string name of a // type, if one exists. The third return value indicates: // -// 0 if no error or the type is not known in postgres. -// -1 if the type is known in postgres. -// >0 for a github issue number. +// 0 if no error or the type is not known in postgres. +// -1 if the type is known in postgres. +// >0 for a github issue number. func TypeForNonKeywordTypeName(name string) (*T, bool, int) { t, ok := typNameLiterals[name] if ok { diff --git a/postgres/parser/types/types.proto b/postgres/parser/types/types.proto index 41a3225055..18205b3cef 100644 --- a/postgres/parser/types/types.proto +++ b/postgres/parser/types/types.proto @@ -32,369 +32,369 @@ import "geo/geopb/geopb.proto"; // See the comment header for the T.Family method for more details. enum Family { - option (gogoproto.goproto_enum_prefix) = false; - - // BoolFamily is the family of boolean true/false types. - // - // Canonical: types.Bool - // Oid : T_bool - // - // Examples: - // BOOL - // - BoolFamily = 0; - - // IntFamily is the family of signed integer types. - // - // Canonical: types.Int - // Oid : T_int8, T_int4, T_int2 - // Width : 64, 32, 16 - // - // Examples: - // INT - // INT8 - // INT4 - // - IntFamily = 1; - - // FloatFamily is the family of base-2 floating-point types (IEEE 754). - // - // Canonical: types.Float - // Oid : T_float8, T_float4 - // Width : 64, 32 - // - // Examples: - // FLOAT8 - // FLOAT4 - // - FloatFamily = 2; - - // DecimalFamily is the family of base-10 floating and fixed point types. - // - // Canonical : types.Decimal - // Oid : T_numeric - // Precision : max # decimal digits (0 = no specified limit) - // Width (Scale): # digits after decimal point (0 = no specified limit) - // - // Examples: - // DECIMAL - // DECIMAL(10) - // DECIMAL(10,3) - // - DecimalFamily = 3; - - // DateFamily is the family of date types that store only year/month/day with - // no time component. - // - // Canonical: types.Date - // Oid : T_date - // - // Examples: - // DATE - // - DateFamily = 4; - - // TimestampFamily is the family of date types that store a year/month/day - // date component, as well as an hour/minute/second time component. There is - // no timezone component (see TIMESTAMPTZ). Seconds can have varying precision - // (defaults to microsecond precision). Currently, only microsecond precision - // is supported. - // - // Canonical: types.Timestamp - // Oid : T_timestamp - // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) - // - // Examples: - // TIMESTAMP - // TIMESTAMP(6) - // - TimestampFamily = 5; - - // IntervalFamily is the family of types describing a duration of time. - // Currently, only microsecond precision is supported. - // - // Canonical: types.Interval - // Oid : T_interval - // - // Examples: - // INTERVAL - // - IntervalFamily = 6; - - // StringFamily is the family of types containing Unicode textual strings. - // This family includes types constructed by STRING, VARCHAR, CHAR, and "char" - // column type definitions (CHAR and "char" are distinct PG types). Note - // that while STRING and VARCHAR have no default width limit, CHAR has a - // default width of 1. - // TODO(andyk): "char" should have default width of 1 as well, but doesn't. - // - // Canonical: types.String - // Oid : T_text, T_varchar, T_bpchar, T_char - // Width : max # characters (0 = no specified limit) - // - // Examples: - // STRING - // TEXT - // VARCHAR(10) - // CHAR - // - StringFamily = 7; - - // BytesFamily is the family of types containing a list of raw byte values. - // - // Canonical: types.BYTES - // Oid : T_bytea - // - // Examples: - // BYTES - // - BytesFamily = 8; - - // TimestampTZFamily is the family of date types that store a year/month/day - // date component, as well as an hour/minute/second time component, along with - // a timezone. Seconds can have varying precision (defaults to microsecond - // precision). Currently, only microsecond precision is supported. - // - // Canonical: types.TimestampTZ - // Oid : T_timestamptz - // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) - // - // Examples: - // TIMESTAMPTZ - // TIMESTAMPTZ(6) - // - TimestampTZFamily = 9; - - // CollatedStringFamily is the family of types containing Unicode textual - // strings with an associated COLLATE value that specifies the locale used - // for various character-based operations such as sorting, pattern matching, - // and builtin functions like lower and upper. - // - // Oid : T_text, T_varchar, T_bpchar, T_char - // Width : max # characters (0 = no specified limit) - // Locale : name of locale (e.g. EN or DE) - // - // Examples: - // STRING COLLATE en - // VARCHAR(10) COLLATE de - // - CollatedStringFamily = 10; - - // NAME deprecated in 19.1, since it now uses Oid. - reserved 11; - - // OidFamily is the family of types containing Postgres Object ID (Oid) - // values. Oids are integer values that identify some object in the database, - // like a type, relation, or procedure. - // - // Canonical: types.Oid - // Oid : T_oid, T_regclass, T_regproc, T_regprocedure, T_regtype, - // T_regnamespace - // - // Examples: - // OID - // REGCLASS - // REGPROC - // - // TODO(andyk): Oids should be part of the IntFamily, since they are treated - // as equivalent to ints by PG. - OidFamily = 12; - - // UnknownFamily is a special type family that tags expressions that - // statically evaluate to NULL. An UnknownFamily expression *must* be NULL. - // But the inverse is not true, since other types allow NULL values as well. - // UnknownFamily types are not supported as a table column type, but can be - // transferred through DistSQL streams. - // - // Canonical: types.Unknown - // Oid : T_unknown - // - UnknownFamily = 13; - - // UuidFamily is the family of types containing universally unique - // identifiers. A UUID is a 128-bit quantity that is very unlikely to ever be - // generated again, and so can be relied on to be distinct from all other UUID - // values. - // - // Canonical: types.Uuid - // Oid : T_uuid - // - // Examples: - // UUID - // - UuidFamily = 14; - - // ArrayFamily is a family of non-scalar types that contain an ordered list of - // elements. The elements of an array must all share the same type. Elements - // can have have any type, including ARRAY. However, while the types package - // supports nested arrays, other parts of CRDB do not currently support them. - // Also, the length of array dimension(s) are ignored by PG and CRDB (e.g. - // an array of length 11 could be inserted into a column declared as INT[11]). - // - // Array OID values are special. Rather than having a single T_array OID, - // Postgres defines a separate OID for each possible array element type. - // Here are some examples: - // - // T__int8: array of int8 values - // T__text: array of text values - // - // Notice that each array OID has double underscores to distinguish it from - // the OID of the scalar type it contains. - // - // Oid : T__int, T__text, T__numeric, etc. - // ArrayContents: types.T of the array element type - // - // Examples: - // INT[] - // VARCHAR(10)[] COLLATE EN - // DECIMAL(10,1)[] - // TIMESTAMP[5] - // - ArrayFamily = 15; - - // INetFamily is the family of types containing IPv4 or IPv6 network address - // identifiers (e.g. 192.168.100.128/25 or FE80:CD00:0:CDE:1257:0:211E:729C). - // - // Canonical: types.INet - // Oid : T_inet - // - // Examples: - // INET - // - INetFamily = 16; - - // TimeFamily is the family of date types that store only hour/minute/second - // with no date component. There is no timezone component. Seconds can have - // varying precision (defaults to microsecond precision). Currently, only - // microsecond precision is supported. - // - // Canonical: types.Time - // Oid : T_time - // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) - // - // Examples: - // TIME - // TIME(6) - // - TimeFamily = 17; - - // JsonFamily is the family of types containing JavaScript Object Notation - // (JSON) values. Currently, CRDB only supports JSONB values, which are stored - // in a decomposed binary format. - // - // Canonical: types.Jsonb - // Oid : T_jsonb - // - // Examples: - // JSON - // JSONB - // - JsonFamily = 18; - - // TimeTZFamily is the family of date types that store only hour/minute/second - // and timestamp components, with no date component. Seconds can have - // varying precision (defaults to microsecond precision). Currently, only - // microsecond precision is supported. - // - // Canonical: types.TimeTZ - // Oid : T_timetz - // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) - // - // Examples: - // TIMETZ - // - TimeTZFamily = 19; - - // TupleFamily is a family of non-scalar structural types that describes the - // fields of a row or record. The fields can be of any type, including nested - // tuple and array types. Fields can also have optional labels. Currently, - // CRDB does not support tuple types as column types, but it is possible to - // construct tuples using the ROW function or tuple construction syntax. - // - // Oid : T_record - // TupleContents: []*types.T of each tuple field - // TupleLabels : []string of each tuple label - // - // Examples: - // (1, 'foo') - // ((1, 'foo') AS num, str) - // ROW(1, 'foo') - // (ROW(1, 'foo') AS num, str) - // - TupleFamily = 20; - - // BitFamily is the family of types containing ordered lists of bit values - // (0 or 1). Note that while VARBIT has no default width limit, BIT has a - // default width limit of 1. - // - // Canonical: types.VarBit - // Oid : T_varbit, T_bit - // Width : max # of bits (0 = no specified limit) - // - // Examples: - // VARBIT - // VARBIT(10) - // BIT - // BIT(10) - // - BitFamily = 21; - - // GeometryFamily is a family that supports the Geometry geospatial type, - // which is compatible with PostGIS's Geometry implementation. - // - // Canonical: types.Geometry - // Oid : oidext.T_geometry - // - // Examples: - // GEOMETRY - // GEOMETRY(LINESTRING) - // GEOMETRY(LINESTRING, SRID) - GeometryFamily = 22; - - // GeographyFamily is a family that supports the Geography geospatial type, - // which is compatible with PostGIS's Geography implementation. - // - // Canonical: types.Geography - // Oid : oidext.T_geography - // - // Examples: - // GEOGRAPHY - // GEOGRAPHY(LINESTRING) - // GEOGRAPHY(LINESTRING, SRID) - GeographyFamily = 23; - - // EnumFamily is a family that represents all ENUM types. ENUM types - // have data about the ENUM defined in a TypeDescriptor. The ID of - // the TypeDescriptor that backs this ENUM is stored in the StableTypeID - // field. It does not have a canonical form. - EnumFamily = 24; - - // Box2DFamily is a family representing the box2d type. This is compatible - // with PostGIS's box2d implementation. - // - // Canonical: types.Box2D - // Oid : oidext.T_box2d - // - // Examples: - // Box2D - Box2DFamily = 25; - - // AnyFamily is a special type family used during static analysis as a - // wildcard type that matches any other type, including scalar, array, and - // tuple types. Execution-time values should never have this type. As an - // example of its use, many SQL builtin functions allow an input value to be - // of any type, and so use this type in their static definitions. - // - // Canonical: types.Any - // Oid : T_anyelement - // - AnyFamily = 100; - - // Int2VectorFamily deprecated in 19.1, since it now uses Oid. - reserved 200; - - // OidVectorFamily deprecated in 19.1, since it now uses Oid. - reserved 201; + option (gogoproto.goproto_enum_prefix) = false; + + // BoolFamily is the family of boolean true/false types. + // + // Canonical: types.Bool + // Oid : T_bool + // + // Examples: + // BOOL + // + BoolFamily = 0; + + // IntFamily is the family of signed integer types. + // + // Canonical: types.Int + // Oid : T_int8, T_int4, T_int2 + // Width : 64, 32, 16 + // + // Examples: + // INT + // INT8 + // INT4 + // + IntFamily = 1; + + // FloatFamily is the family of base-2 floating-point types (IEEE 754). + // + // Canonical: types.Float + // Oid : T_float8, T_float4 + // Width : 64, 32 + // + // Examples: + // FLOAT8 + // FLOAT4 + // + FloatFamily = 2; + + // DecimalFamily is the family of base-10 floating and fixed point types. + // + // Canonical : types.Decimal + // Oid : T_numeric + // Precision : max # decimal digits (0 = no specified limit) + // Width (Scale): # digits after decimal point (0 = no specified limit) + // + // Examples: + // DECIMAL + // DECIMAL(10) + // DECIMAL(10,3) + // + DecimalFamily = 3; + + // DateFamily is the family of date types that store only year/month/day with + // no time component. + // + // Canonical: types.Date + // Oid : T_date + // + // Examples: + // DATE + // + DateFamily = 4; + + // TimestampFamily is the family of date types that store a year/month/day + // date component, as well as an hour/minute/second time component. There is + // no timezone component (see TIMESTAMPTZ). Seconds can have varying precision + // (defaults to microsecond precision). Currently, only microsecond precision + // is supported. + // + // Canonical: types.Timestamp + // Oid : T_timestamp + // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) + // + // Examples: + // TIMESTAMP + // TIMESTAMP(6) + // + TimestampFamily = 5; + + // IntervalFamily is the family of types describing a duration of time. + // Currently, only microsecond precision is supported. + // + // Canonical: types.Interval + // Oid : T_interval + // + // Examples: + // INTERVAL + // + IntervalFamily = 6; + + // StringFamily is the family of types containing Unicode textual strings. + // This family includes types constructed by STRING, VARCHAR, CHAR, and "char" + // column type definitions (CHAR and "char" are distinct PG types). Note + // that while STRING and VARCHAR have no default width limit, CHAR has a + // default width of 1. + // TODO(andyk): "char" should have default width of 1 as well, but doesn't. + // + // Canonical: types.String + // Oid : T_text, T_varchar, T_bpchar, T_char + // Width : max # characters (0 = no specified limit) + // + // Examples: + // STRING + // TEXT + // VARCHAR(10) + // CHAR + // + StringFamily = 7; + + // BytesFamily is the family of types containing a list of raw byte values. + // + // Canonical: types.BYTES + // Oid : T_bytea + // + // Examples: + // BYTES + // + BytesFamily = 8; + + // TimestampTZFamily is the family of date types that store a year/month/day + // date component, as well as an hour/minute/second time component, along with + // a timezone. Seconds can have varying precision (defaults to microsecond + // precision). Currently, only microsecond precision is supported. + // + // Canonical: types.TimestampTZ + // Oid : T_timestamptz + // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) + // + // Examples: + // TIMESTAMPTZ + // TIMESTAMPTZ(6) + // + TimestampTZFamily = 9; + + // CollatedStringFamily is the family of types containing Unicode textual + // strings with an associated COLLATE value that specifies the locale used + // for various character-based operations such as sorting, pattern matching, + // and builtin functions like lower and upper. + // + // Oid : T_text, T_varchar, T_bpchar, T_char + // Width : max # characters (0 = no specified limit) + // Locale : name of locale (e.g. EN or DE) + // + // Examples: + // STRING COLLATE en + // VARCHAR(10) COLLATE de + // + CollatedStringFamily = 10; + + // NAME deprecated in 19.1, since it now uses Oid. + reserved 11; + + // OidFamily is the family of types containing Postgres Object ID (Oid) + // values. Oids are integer values that identify some object in the database, + // like a type, relation, or procedure. + // + // Canonical: types.Oid + // Oid : T_oid, T_regclass, T_regproc, T_regprocedure, T_regtype, + // T_regnamespace + // + // Examples: + // OID + // REGCLASS + // REGPROC + // + // TODO(andyk): Oids should be part of the IntFamily, since they are treated + // as equivalent to ints by PG. + OidFamily = 12; + + // UnknownFamily is a special type family that tags expressions that + // statically evaluate to NULL. An UnknownFamily expression *must* be NULL. + // But the inverse is not true, since other types allow NULL values as well. + // UnknownFamily types are not supported as a table column type, but can be + // transferred through DistSQL streams. + // + // Canonical: types.Unknown + // Oid : T_unknown + // + UnknownFamily = 13; + + // UuidFamily is the family of types containing universally unique + // identifiers. A UUID is a 128-bit quantity that is very unlikely to ever be + // generated again, and so can be relied on to be distinct from all other UUID + // values. + // + // Canonical: types.Uuid + // Oid : T_uuid + // + // Examples: + // UUID + // + UuidFamily = 14; + + // ArrayFamily is a family of non-scalar types that contain an ordered list of + // elements. The elements of an array must all share the same type. Elements + // can have have any type, including ARRAY. However, while the types package + // supports nested arrays, other parts of CRDB do not currently support them. + // Also, the length of array dimension(s) are ignored by PG and CRDB (e.g. + // an array of length 11 could be inserted into a column declared as INT[11]). + // + // Array OID values are special. Rather than having a single T_array OID, + // Postgres defines a separate OID for each possible array element type. + // Here are some examples: + // + // T__int8: array of int8 values + // T__text: array of text values + // + // Notice that each array OID has double underscores to distinguish it from + // the OID of the scalar type it contains. + // + // Oid : T__int, T__text, T__numeric, etc. + // ArrayContents: types.T of the array element type + // + // Examples: + // INT[] + // VARCHAR(10)[] COLLATE EN + // DECIMAL(10,1)[] + // TIMESTAMP[5] + // + ArrayFamily = 15; + + // INetFamily is the family of types containing IPv4 or IPv6 network address + // identifiers (e.g. 192.168.100.128/25 or FE80:CD00:0:CDE:1257:0:211E:729C). + // + // Canonical: types.INet + // Oid : T_inet + // + // Examples: + // INET + // + INetFamily = 16; + + // TimeFamily is the family of date types that store only hour/minute/second + // with no date component. There is no timezone component. Seconds can have + // varying precision (defaults to microsecond precision). Currently, only + // microsecond precision is supported. + // + // Canonical: types.Time + // Oid : T_time + // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) + // + // Examples: + // TIME + // TIME(6) + // + TimeFamily = 17; + + // JsonFamily is the family of types containing JavaScript Object Notation + // (JSON) values. Currently, CRDB only supports JSONB values, which are stored + // in a decomposed binary format. + // + // Canonical: types.Jsonb + // Oid : T_jsonb + // + // Examples: + // JSON + // JSONB + // + JsonFamily = 18; + + // TimeTZFamily is the family of date types that store only hour/minute/second + // and timestamp components, with no date component. Seconds can have + // varying precision (defaults to microsecond precision). Currently, only + // microsecond precision is supported. + // + // Canonical: types.TimeTZ + // Oid : T_timetz + // Precision: fractional seconds (3 = ms, 0,6 = us, 9 = ns, etc.) + // + // Examples: + // TIMETZ + // + TimeTZFamily = 19; + + // TupleFamily is a family of non-scalar structural types that describes the + // fields of a row or record. The fields can be of any type, including nested + // tuple and array types. Fields can also have optional labels. Currently, + // CRDB does not support tuple types as column types, but it is possible to + // construct tuples using the ROW function or tuple construction syntax. + // + // Oid : T_record + // TupleContents: []*types.T of each tuple field + // TupleLabels : []string of each tuple label + // + // Examples: + // (1, 'foo') + // ((1, 'foo') AS num, str) + // ROW(1, 'foo') + // (ROW(1, 'foo') AS num, str) + // + TupleFamily = 20; + + // BitFamily is the family of types containing ordered lists of bit values + // (0 or 1). Note that while VARBIT has no default width limit, BIT has a + // default width limit of 1. + // + // Canonical: types.VarBit + // Oid : T_varbit, T_bit + // Width : max # of bits (0 = no specified limit) + // + // Examples: + // VARBIT + // VARBIT(10) + // BIT + // BIT(10) + // + BitFamily = 21; + + // GeometryFamily is a family that supports the Geometry geospatial type, + // which is compatible with PostGIS's Geometry implementation. + // + // Canonical: types.Geometry + // Oid : oidext.T_geometry + // + // Examples: + // GEOMETRY + // GEOMETRY(LINESTRING) + // GEOMETRY(LINESTRING, SRID) + GeometryFamily = 22; + + // GeographyFamily is a family that supports the Geography geospatial type, + // which is compatible with PostGIS's Geography implementation. + // + // Canonical: types.Geography + // Oid : oidext.T_geography + // + // Examples: + // GEOGRAPHY + // GEOGRAPHY(LINESTRING) + // GEOGRAPHY(LINESTRING, SRID) + GeographyFamily = 23; + + // EnumFamily is a family that represents all ENUM types. ENUM types + // have data about the ENUM defined in a TypeDescriptor. The ID of + // the TypeDescriptor that backs this ENUM is stored in the StableTypeID + // field. It does not have a canonical form. + EnumFamily = 24; + + // Box2DFamily is a family representing the box2d type. This is compatible + // with PostGIS's box2d implementation. + // + // Canonical: types.Box2D + // Oid : oidext.T_box2d + // + // Examples: + // Box2D + Box2DFamily = 25; + + // AnyFamily is a special type family used during static analysis as a + // wildcard type that matches any other type, including scalar, array, and + // tuple types. Execution-time values should never have this type. As an + // example of its use, many SQL builtin functions allow an input value to be + // of any type, and so use this type in their static definitions. + // + // Canonical: types.Any + // Oid : T_anyelement + // + AnyFamily = 100; + + // Int2VectorFamily deprecated in 19.1, since it now uses Oid. + reserved 200; + + // OidVectorFamily deprecated in 19.1, since it now uses Oid. + reserved 201; } // IntervalDurationType represents a duration that can be used @@ -437,8 +437,8 @@ message IntervalDurationField { // GeoMetadata contains metadata associated with Geospatial data types. message GeoMetadata { - optional int32 srid = 1 [(gogoproto.nullable)=false,(gogoproto.customname)="SRID",(gogoproto.casttype)="github.com/dolthub/doltgresql/postgres/parser/geo/geopb.SRID"]; - optional geopb.ShapeType shape_type = 2 [(gogoproto.nullable)=false]; + optional int32 srid = 1 [(gogoproto.nullable) = false, (gogoproto.customname) = "SRID", (gogoproto.casttype) = "github.com/dolthub/doltgresql/postgres/parser/geo/geopb.SRID"]; + optional geopb.ShapeType shape_type = 2 [(gogoproto.nullable) = false]; } // PersistentUserDefinedTypeMetadata contains user defined type metadata @@ -448,103 +448,103 @@ message PersistentUserDefinedTypeMetadata { // ArrayTypeOID is the OID of the array type for this user defined type. It // is only set for user defined types that aren't arrays. optional uint32 array_type_oid = 2 - [(gogoproto.nullable) = false, (gogoproto.customname) = "ArrayTypeOID", (gogoproto.customtype) = "github.com/lib/pq/oid.Oid"]; + [(gogoproto.nullable) = false, (gogoproto.customname) = "ArrayTypeOID", (gogoproto.customtype) = "github.com/lib/pq/oid.Oid"]; reserved 1; } // T is a wrapper around InternalType. message T { - option (gogoproto.typedecl) = false; - option (gogoproto.marshaler) = false; - option (gogoproto.unmarshaler) = false; - option (gogoproto.sizer) = false; - option (gogoproto.goproto_getters) = false; - option (gogoproto.goproto_stringer) = false; - // InternalType should never be directly referenced outside this package. The - // only reason it is exported is because gogoproto panics when printing the - // string representation of an unexported field. This is a problem when this - // struct is embedded in a larger struct (like a ColumnDescriptor). - optional InternalType internal_type = 1 [(gogoproto.nullable) = false]; + option (gogoproto.typedecl) = false; + option (gogoproto.marshaler) = false; + option (gogoproto.unmarshaler) = false; + option (gogoproto.sizer) = false; + option (gogoproto.goproto_getters) = false; + option (gogoproto.goproto_stringer) = false; + // InternalType should never be directly referenced outside this package. The + // only reason it is exported is because gogoproto panics when printing the + // string representation of an unexported field. This is a problem when this + // struct is embedded in a larger struct (like a ColumnDescriptor). + optional InternalType internal_type = 1 [(gogoproto.nullable) = false]; } // InternalType is the protobuf encoding for SQL types. It is always wrapped by // a T struct, and should never be used directly by outside packages. See the // comment header for the T struct for more details. message InternalType { - // Family specifies a group of types that are compatible with one another. - // See the header for the T.Family method for more details. - optional sql.sem.types.Family family = 1 [(gogoproto.nullable) = false]; - - // Width is the size or scale of the type, such as number of bits or - // characters. See the T.Width method for more details. - optional int32 width = 2 [(gogoproto.nullable) = false]; - - // Precision is the accuracy of the data type. See the T.Precision method for - // more details. This field was also by FLOAT pre-2.1 (this was incorrect.) - optional int32 precision = 3 [(gogoproto.nullable) = false]; - - // ArrayDimensions is deprecated in 19.2, since it was never used. It - // previously contained the length of each dimension in the array. A - // dimension of -1 meant that no bound was specified for that dimension. If - // arrayDimensions was nil, then the array had one unbounded dimension. - repeated int32 array_dimensions = 4; - - // Locale identifies a specific geographical, political, or cultural region that - // impacts various character-based operations such as sorting, pattern matching, - // and builtin functions like lower and upper. See the T.Locale method for - // more details. - optional string locale = 5; - - // VisibleType is deprecated in 19.2, since it is now superseded by the Oid - // field. It previously contained an alias for any types where our internal - // representation is different than the user specification. Examples are INT4, - // FLOAT4, etc. Mostly for Postgres compatibility. - optional int32 visible_type = 6 [(gogoproto.nullable) = false]; - - // ArrayElemType is deprecated in 19.2, since it is now superseded by the - // ArrayContents field. It previously contained the type family of array - // elements. The other array fields (width/precision/locale/etc) were used - // to store the other attributes of the array's element type. - optional sql.sem.types.Family array_elem_type = 7; - - // TupleContents returns a slice containing the type of each tuple field. This - // is nil for non-TUPLE types. - repeated T tuple_contents = 8; - - // TupleLabels returns a slice containing the labels of each tuple field. This - // is nil for non-TUPLE types, or if the TUPLE type does not specify labels. - repeated string tuple_labels = 9; - - // Oid returns the type's Postgres Object ID. See the header for the T.Oid - // method for more details. For user-defined types, the OID value is an - // offset (oidext.CockroachPredefinedOIDMax) away from the stable_type_id - // field. This makes it easy to retrieve a type descriptor by OID. - optional uint32 oid = 10 [(gogoproto.nullable) = false, (gogoproto.customname) = "Oid", (gogoproto.customtype) = "github.com/lib/pq/oid.Oid"]; - - // ArrayContents returns the type of array elements. This is nil for non-ARRAY - // types. - optional T array_contents = 11; - - // TimePrecisionIsSet indicates whether the precision was explicitly set. - // It is currently in use for the TIME-related families and INTERVALs - // where a Precision of 0 indicated the default precision of 6 - // in versions pre-20.1. - // The rules for Precision to use are as follows: - // * If Precision is > 0, then that is the precision. - // * If Precision is 0, it will default to 6 if TimePrecisionIsSet is false - // (for compatibility reasons). - // * Otherwise, Precision = 0 and TimePrecisionIsSet = true, so it is - // actually 0. - optional bool time_precision_is_set = 12 [(gogoproto.nullable) = false]; - - // IntervalDurationField is populated for intervals, representing extra - // typmod or precision data that may be required. - optional IntervalDurationField interval_duration_field = 13; - - // GeoMetadata is populated for geospatial types. - optional GeoMetadata geo_metadata = 14; - - // UDTMetadata is populated for user defined types that are not arrays. - optional PersistentUserDefinedTypeMetadata udt_metadata = 15 [(gogoproto.customname) = "UDTMetadata"]; + // Family specifies a group of types that are compatible with one another. + // See the header for the T.Family method for more details. + optional sql.sem.types.Family family = 1 [(gogoproto.nullable) = false]; + + // Width is the size or scale of the type, such as number of bits or + // characters. See the T.Width method for more details. + optional int32 width = 2 [(gogoproto.nullable) = false]; + + // Precision is the accuracy of the data type. See the T.Precision method for + // more details. This field was also by FLOAT pre-2.1 (this was incorrect.) + optional int32 precision = 3 [(gogoproto.nullable) = false]; + + // ArrayDimensions is deprecated in 19.2, since it was never used. It + // previously contained the length of each dimension in the array. A + // dimension of -1 meant that no bound was specified for that dimension. If + // arrayDimensions was nil, then the array had one unbounded dimension. + repeated int32 array_dimensions = 4; + + // Locale identifies a specific geographical, political, or cultural region that + // impacts various character-based operations such as sorting, pattern matching, + // and builtin functions like lower and upper. See the T.Locale method for + // more details. + optional string locale = 5; + + // VisibleType is deprecated in 19.2, since it is now superseded by the Oid + // field. It previously contained an alias for any types where our internal + // representation is different than the user specification. Examples are INT4, + // FLOAT4, etc. Mostly for Postgres compatibility. + optional int32 visible_type = 6 [(gogoproto.nullable) = false]; + + // ArrayElemType is deprecated in 19.2, since it is now superseded by the + // ArrayContents field. It previously contained the type family of array + // elements. The other array fields (width/precision/locale/etc) were used + // to store the other attributes of the array's element type. + optional sql.sem.types.Family array_elem_type = 7; + + // TupleContents returns a slice containing the type of each tuple field. This + // is nil for non-TUPLE types. + repeated T tuple_contents = 8; + + // TupleLabels returns a slice containing the labels of each tuple field. This + // is nil for non-TUPLE types, or if the TUPLE type does not specify labels. + repeated string tuple_labels = 9; + + // Oid returns the type's Postgres Object ID. See the header for the T.Oid + // method for more details. For user-defined types, the OID value is an + // offset (oidext.CockroachPredefinedOIDMax) away from the stable_type_id + // field. This makes it easy to retrieve a type descriptor by OID. + optional uint32 oid = 10 [(gogoproto.nullable) = false, (gogoproto.customname) = "Oid", (gogoproto.customtype) = "github.com/lib/pq/oid.Oid"]; + + // ArrayContents returns the type of array elements. This is nil for non-ARRAY + // types. + optional T array_contents = 11; + + // TimePrecisionIsSet indicates whether the precision was explicitly set. + // It is currently in use for the TIME-related families and INTERVALs + // where a Precision of 0 indicated the default precision of 6 + // in versions pre-20.1. + // The rules for Precision to use are as follows: + // * If Precision is > 0, then that is the precision. + // * If Precision is 0, it will default to 6 if TimePrecisionIsSet is false + // (for compatibility reasons). + // * Otherwise, Precision = 0 and TimePrecisionIsSet = true, so it is + // actually 0. + optional bool time_precision_is_set = 12 [(gogoproto.nullable) = false]; + + // IntervalDurationField is populated for intervals, representing extra + // typmod or precision data that may be required. + optional IntervalDurationField interval_duration_field = 13; + + // GeoMetadata is populated for geospatial types. + optional GeoMetadata geo_metadata = 14; + + // UDTMetadata is populated for user defined types that are not arrays. + optional PersistentUserDefinedTypeMetadata udt_metadata = 15 [(gogoproto.customname) = "UDTMetadata"]; } diff --git a/postgres/parser/uuid/codec.go b/postgres/parser/uuid/codec.go index ecda183e20..f01334bd7b 100644 --- a/postgres/parser/uuid/codec.go +++ b/postgres/parser/uuid/codec.go @@ -81,37 +81,36 @@ func (u UUID) MarshalText() ([]byte, error) { // UnmarshalText implements the encoding.TextUnmarshaler interface. // Following formats are supported: // -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "6ba7b8109dad11d180b400c04fd430c8" -// "{6ba7b8109dad11d180b400c04fd430c8}", -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// "{6ba7b8109dad11d180b400c04fd430c8}", +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" // // ABNF for supported UUID text representation follows: // -// URN := 'urn' -// UUID-NID := 'uuid' +// URN := 'urn' +// UUID-NID := 'uuid' // -// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | -// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | -// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' // -// hexoct := hexdig hexdig -// 2hexoct := hexoct hexoct -// 4hexoct := 2hexoct 2hexoct -// 6hexoct := 4hexoct 2hexoct -// 12hexoct := 6hexoct 6hexoct +// hexoct := hexdig hexdig +// 2hexoct := hexoct hexoct +// 4hexoct := 2hexoct 2hexoct +// 6hexoct := 4hexoct 2hexoct +// 12hexoct := 6hexoct 6hexoct // -// hashlike := 12hexoct -// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hashlike := 12hexoct +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct // -// plain := canonical | hashlike -// uuid := canonical | hashlike | braced | urn -// -// braced := '{' plain '}' | '{' hashlike '}' -// urn := URN ':' UUID-NID ':' plain +// plain := canonical | hashlike +// uuid := canonical | hashlike | braced | urn // +// braced := '{' plain '}' | '{' hashlike '}' +// urn := URN ':' UUID-NID ':' plain func (u *UUID) UnmarshalText(text []byte) error { switch len(text) { case 32: @@ -153,7 +152,8 @@ func (u *UUID) decodeCanonical(t []byte) error { } // decodeHashLike decodes UUID strings that are using the following format: -// "6ba7b8109dad11d180b400c04fd430c8". +// +// "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeHashLike(t []byte) error { src := t[:] dst := u[:] @@ -163,8 +163,9 @@ func (u *UUID) decodeHashLike(t []byte) error { } // decodeBraced decodes UUID strings that are using the following formats: -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" -// "{6ba7b8109dad11d180b400c04fd430c8}". +// +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" +// "{6ba7b8109dad11d180b400c04fd430c8}". func (u *UUID) decodeBraced(t []byte) error { l := len(t) @@ -176,8 +177,9 @@ func (u *UUID) decodeBraced(t []byte) error { } // decodeURN decodes UUID strings that are using the following formats: -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +// +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodeURN(t []byte) error { total := len(t) @@ -191,8 +193,9 @@ func (u *UUID) decodeURN(t []byte) error { } // decodePlain decodes UUID strings that are using the following formats: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format -// "6ba7b8109dad11d180b400c04fd430c8". +// +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". func (u *UUID) decodePlain(t []byte) error { switch len(t) { case 32: diff --git a/postgres/parser/uuid/uuid.go b/postgres/parser/uuid/uuid.go index eadd781af4..2b9bb4572b 100644 --- a/postgres/parser/uuid/uuid.go +++ b/postgres/parser/uuid/uuid.go @@ -176,22 +176,23 @@ func (u *UUID) SetVersion(v byte) { func (u *UUID) SetVariant(v byte) { switch v { case VariantNCS: - u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + u[8] = u[8]&(0xff>>1) | (0x00 << 7) case VariantRFC4122: - u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + u[8] = u[8]&(0xff>>2) | (0x02 << 6) case VariantMicrosoft: - u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + u[8] = u[8]&(0xff>>3) | (0x06 << 5) case VariantFuture: fallthrough default: - u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) + u[8] = u[8]&(0xff>>3) | (0x07 << 5) } } // Must is a helper that wraps a call to a function returning (UUID, error) // and panics if the error is non-nil. It is intended for use in variable // initializations such as -// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) +// +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")) func Must(u UUID, err error) UUID { if err != nil { panic(err) diff --git a/scripts/build_binaries.sh b/scripts/build_binaries.sh new file mode 100755 index 0000000000..d3f90888ff --- /dev/null +++ b/scripts/build_binaries.sh @@ -0,0 +1,43 @@ +#!/bin/bash + +set -e +set -o pipefail + +script_dir=$(dirname "$0") +cd $script_dir/.. + +[ ! -z "$GO_BUILD_VERSION" ] || (echo "Must supply GO_BUILD_VERSION"; exit 1) + +docker run --rm -v `pwd`:/src golang:"$GO_BUILD_VERSION"-bookworm /bin/bash -c ' +set -e +set -o pipefail +apt-get update && apt-get install -y p7zip-full pigz +cd /src + +BINS="doltgres" +OS_ARCH_TUPLES="windows-amd64 linux-amd64 linux-arm64 darwin-amd64 darwin-arm64" + +for tuple in $OS_ARCH_TUPLES; do + os=`echo $tuple | sed 's/-.*//'` + arch=`echo $tuple | sed 's/.*-//'` + o="out/doltgresql-$os-$arch" + mkdir -p "$o/bin" + mkdir -p "$o/licenses" + cp Godeps/LICENSES "$o/" + cp -r ./licenses "$o/licenses" + cp LICENSE "$o/licenses" + for bin in $BINS; do + echo Building "$o/$bin" + obin="$bin" + if [ "$os" = windows ]; then + obin="$bin.exe" + fi + CGO_ENABLED=0 GOOS="$os" GOARCH="$arch" go build -trimpath -ldflags="-s -w" -o "$o/bin/$obin" . + done + if [ "$os" = windows ]; then + (cd out && 7z a "doltgresql-$os-$arch.zip" "doltgresql-$os-$arch" && 7z a "doltgresql-$os-$arch.7z" "doltgresql-$os-$arch") + else + tar cf - -C out "doltgresql-$os-$arch" | pigz -9 > "out/doltgresql-$os-$arch.tar.gz" + fi +done +' diff --git a/scripts/check_bats_fmt.sh b/scripts/check_bats_fmt.sh new file mode 100755 index 0000000000..835cb82d80 --- /dev/null +++ b/scripts/check_bats_fmt.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +script_dir=$(dirname "$0") +cd $script_dir/../testing/bats + +ERRORS_FOUND=0 +for FILENAME_WITH_EXT in *.bats; do + FILENAME=${FILENAME_WITH_EXT%".bats"} + while read -r LINE; do + if [[ ! "$LINE" =~ @test[[:space:]]+[\"\']$FILENAME: ]]; then + TESTNAME=$(echo "$LINE" | grep -oP "(?<=@test\s)[\"\'][^\"\']+[\"\']") + echo -e "ERROR: test \"$TESTNAME\" in \"$FILENAME_WITH_EXT\" must start with \"$FILENAME:\" in the title" + ERRORS_FOUND=1 + fi + done <<< $(grep '@test ['"'"'"]' "$FILENAME_WITH_EXT") +done +if [[ $ERRORS_FOUND -eq 1 ]]; then + exit 1 +fi +exit 0 diff --git a/scripts/check_fmt.sh b/scripts/check_fmt.sh new file mode 100755 index 0000000000..84e1b06119 --- /dev/null +++ b/scripts/check_fmt.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -eo pipefail + +script_dir=$(dirname "$0") +cd $script_dir/.. + +go install golang.org/x/tools/cmd/goimports + +paths=`find . -maxdepth 1 -mindepth 1 ! -name ".idea" ! -name ".git" ! -name ".github" \( -name gen -prune -o -type d -print -o -type f -name '*.go' -print \)` + +bad_files=$(goimports -l -local github.com/dolthub/doltgresql $paths) +if [ "$bad_files" != "" ]; then + echo "ERROR: The following files do not match goimports output:" + echo "$bad_files" + echo + echo "Please format the go code in the repository with './scripts/format_repo.sh'" + exit 1 +fi + +bad_files=$(find $paths -name '*.go' | while read f; do + if [[ $(awk '/import \(/{flag=1;next}/\)/{flag=0}flag' < $f | egrep -c '$^') -gt 2 ]]; then + echo $f + fi +done) + +if [ "$bad_files" != "" ]; then + echo "ERROR: The following files have more than three import groups:" + echo "$bad_files" + echo + echo "Please format the go code in the repository with './scripts/format_repo.sh'" + exit 1 +fi diff --git a/format_repo.sh b/scripts/format_repo.sh old mode 100644 new mode 100755 similarity index 96% rename from format_repo.sh rename to scripts/format_repo.sh index 8ff58ccd14..d86d01c071 --- a/format_repo.sh +++ b/scripts/format_repo.sh @@ -16,6 +16,9 @@ set -eo pipefail +script_dir=$(dirname "$0") +cd $script_dir/.. + paths=`find . -maxdepth 1 -mindepth 1 ! -name ".idea" ! -name ".git" ! -name ".github" \( -name gen -prune -o -type d -print -o -type f -name '*.go' -print \)` goimports -w -local github.com/dolthub/doltgresql $paths diff --git a/server/ast/expr.go b/server/ast/expr.go index c52acae048..5875feb9db 100644 --- a/server/ast/expr.go +++ b/server/ast/expr.go @@ -470,7 +470,7 @@ func nodeExpr(node tree.Expr) (vitess.Expr, error) { if node.Row { return nil, fmt.Errorf("ROW keyword for tuples not yet supported") } - + valTuple, err := nodeExprs(node.Exprs) if err != nil { return nil, err diff --git a/server/listener.go b/server/listener.go index 08d18d36c9..d35bfe717a 100644 --- a/server/listener.go +++ b/server/listener.go @@ -23,15 +23,16 @@ import ( "strings" "sync/atomic" - "github.com/dolthub/doltgresql/postgres/connection" - "github.com/dolthub/doltgresql/postgres/messages" - "github.com/dolthub/doltgresql/postgres/parser/parser" - "github.com/dolthub/doltgresql/server/ast" "github.com/dolthub/go-mysql-server/server" "github.com/dolthub/go-mysql-server/sql/mysql_db" "github.com/dolthub/vitess/go/mysql" "github.com/dolthub/vitess/go/sqltypes" "github.com/dolthub/vitess/go/vt/sqlparser" + + "github.com/dolthub/doltgresql/postgres/connection" + "github.com/dolthub/doltgresql/postgres/messages" + "github.com/dolthub/doltgresql/postgres/parser/parser" + "github.com/dolthub/doltgresql/server/ast" ) var ( @@ -329,7 +330,7 @@ func (l *Listener) sendClientStartupMessages(conn net.Conn, startupMessage messa }); err != nil { return err } - + if err := connection.Send(conn, messages.ParameterStatus{ Name: "client_encoding", Value: "UTF8", @@ -343,7 +344,7 @@ func (l *Listener) sendClientStartupMessages(conn net.Conn, startupMessage messa }); err != nil { return err } - + return nil } diff --git a/testing/logictest/harness/doltgres_harness.go b/testing/logictest/harness/doltgres_harness.go index d0d73c6f4a..7e65ef181d 100755 --- a/testing/logictest/harness/doltgres_harness.go +++ b/testing/logictest/harness/doltgres_harness.go @@ -15,14 +15,12 @@ package harness import ( + "database/sql" "fmt" "strings" - "database/sql" - - _ "github.com/jackc/pgx/v4/stdlib" - "github.com/dolthub/sqllogictest/go/logictest" + _ "github.com/jackc/pgx/v4/stdlib" ) var _ logictest.Harness = &PostgresqlServerHarness{} @@ -164,7 +162,6 @@ func (h *PostgresqlServerHarness) dropAllViews() error { return nil } - // Returns the string representation of the column value given func stringVal(col interface{}) string { switch v := col.(type) { diff --git a/testing/logictest/main.go b/testing/logictest/main.go index 07766d7f32..0d20c1a9ab 100755 --- a/testing/logictest/main.go +++ b/testing/logictest/main.go @@ -22,8 +22,9 @@ import ( "fmt" "os" - "github.com/dolthub/doltgresql/testing/logictest/harness" "github.com/dolthub/sqllogictest/go/logictest" + + "github.com/dolthub/doltgresql/testing/logictest/harness" ) var resultFormat = flag.String("r", "json", "format of parsed results") @@ -201,4 +202,3 @@ func writeResultsCsv(results []*DoltResultRecord) (err error) { } return } -