1
0
mirror of https://github.com/openshift/installer.git synced 2026-02-05 06:46:36 +01:00

tooling: vendor gci tool

This commit is contained in:
Rafael Fonseca
2022-12-12 22:29:50 +01:00
parent 6039d415ec
commit 95c1c33b5d
54 changed files with 3707 additions and 0 deletions

2
go.mod
View File

@@ -29,6 +29,7 @@ require (
github.com/containers/image v3.0.2+incompatible
github.com/coreos/ignition/v2 v2.14.0
github.com/coreos/stream-metadata-go v0.1.8
github.com/daixiang0/gci v0.9.0
github.com/diskfs/go-diskfs v1.2.1-0.20210727185522-a769efacd235
github.com/form3tech-oss/jwt-go v3.2.3+incompatible
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
@@ -187,6 +188,7 @@ require (
github.com/hashicorp/go-retryablehttp v0.7.0 // indirect
github.com/hashicorp/go-version v1.6.0 // indirect
github.com/hashicorp/terraform-json v0.14.0 // indirect
github.com/hexops/gotextdiff v1.0.3 // indirect
github.com/imdario/mergo v0.3.12 // indirect
github.com/inconshreveable/mousetrap v1.0.1 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect

4
go.sum
View File

@@ -323,6 +323,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/creack/pty v1.1.17 h1:QeVUsEDNrLBW4tMgZHvxy18sKtr6VI492kBhUfhDJNI=
github.com/creack/pty v1.1.17/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
github.com/daixiang0/gci v0.9.0 h1:t8XZ0vK6l0pwPoOmoGyqW2NwQlvbpAQNVvu/GRBgykM=
github.com/daixiang0/gci v0.9.0/go.mod h1:EpVfrztufwVgQRXjnX4zuNinEpLj5OmMjtu/+MB0V0c=
github.com/dave/dst v0.26.2/go.mod h1:UMDJuIRPfyUCC78eFuB+SV/WI8oDeyFDvM/JR6NI3IU=
github.com/dave/gopackages v0.0.0-20170318123100-46e7023ec56e/go.mod h1:i00+b/gKdIDIxuLDFob7ustLAVqhsZRk2qVZrArELGQ=
github.com/dave/jennifer v1.2.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
@@ -866,6 +868,8 @@ github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjl
github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI=
github.com/hashicorp/terraform-json v0.14.0 h1:sh9iZ1Y8IFJLx+xQiKHGud6/TSUCM0N8e17dKDpqV7s=
github.com/hashicorp/terraform-json v0.14.0/go.mod h1:5A9HIWPkk4e5aeeXIBbkcOvaZbIYnAIkEyqP2pNSckM=
github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM=
github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec h1:qv2VnGeEQHchGaZ/u7lxST/RaJw+cv273q79D81Xbog=
github.com/hinshun/vt10x v0.0.0-20220119200601-820417d04eec/go.mod h1:Q48J4R4DvxnHolD5P8pOtXigYlRuPLGl6moFx3ulM68=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=

4
vendor/github.com/daixiang0/gci/.gitignore generated vendored Normal file
View File

@@ -0,0 +1,4 @@
dist/
.gitcookies
.idea/
.vscode/

85
vendor/github.com/daixiang0/gci/.golangci.yml generated vendored Normal file
View File

@@ -0,0 +1,85 @@
# options for analysis running
run:
# default concurrency is a available CPU number
concurrency: 4
# timeout for analysis, e.g. 30s, 5m, default is 1m
deadline: 10m
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# include test files or not, default is true
tests: true
# list of build tags, all linters use it. Default is empty list.
build-tags:
# which dirs to skip: they won't be analyzed;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but next dirs are always skipped independently
# from this option's value:
# third_party$, testdata$, examples$, Godeps$, builtin$
skip-dirs:
# which files to skip: they will be analyzed, but issues from them
# won't be reported. Default value is empty list, but there is
# no need to include all autogenerated files, we confidently recognize
# autogenerated files. If it's not please let us know.
skip-files:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle, default is "colored-line-number"
format: tab
# print lines of code with issue, default is true
print-issued-lines: true
# print linter name in the end of issue text, default is true
print-linter-name: true
# all available settings of specific linters
linters-settings:
gci:
# Checks that no inline Comments are present.
# Default: false
no-inline-comments: false
# Checks that no prefix Comments(comment lines above an import) are present.
# Default: false
no-prefix-comments: false
# Section configuration to compare against.
# Section names are case-insensitive and may contain parameters in ().
# Default: ["standard", "default"]
sections:
- standard # Captures all standard packages if they do not match another section.
- default # Contains all imports that could not be matched to another section type.
- prefix(github.com/daixiang0/gci) # Groups all imports with the specified Prefix.
# Separators that should be present between sections.
# Default: ["newLine"]
section-separators:
- newLine
gofmt:
# simplify code: gofmt with `-s` option, true by default
simplify: true
goimports:
# put imports beginning with prefix after 3rd-party packages;
# it's a comma-separated list of prefixes
local-prefixes: github.com/daixiang0/gci
linters:
fast: false
enable:
- gofmt
- gofumpt
- goimports
- gci
disable-all: true
issues:
exclude:

29
vendor/github.com/daixiang0/gci/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,29 @@
BSD 3-Clause License
Copyright (c) 2020, Xiang Dai
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

18
vendor/github.com/daixiang0/gci/Makefile generated vendored Normal file
View File

@@ -0,0 +1,18 @@
.PHONY: clean generate test build
BIN_OUTPUT := $(if $(filter $(shell go env GOOS), windows), dist/gci.exe, dist/gci)
default: clean generate test build
clean:
@echo BIN_OUTPUT: ${BIN_OUTPUT}
@rm -rf dist cover.out
build: clean
@go build -v -trimpath -o ${BIN_OUTPUT} .
test: clean
@go test -v -count=1 -cover ./...
generate:
@go generate ./...

232
vendor/github.com/daixiang0/gci/README.md generated vendored Normal file
View File

@@ -0,0 +1,232 @@
# GCI
GCI, a tool that controls golang package import order and makes it always deterministic.
The desired output format is highly configurable and allows for more custom formatting than `goimport` does.
GCI considers a import block based on AST as below:
```
Doc
Name Path Comment
```
All comments will keep as they were, except the independent comment blocks(line breaks before and after).
GCI splits all import blocks into different sections, now support five section type:
- standard: Golang official imports, like "fmt"
- custom: Custom section, use full and the longest match (match full string first, if multiple matches, use the longest one)
- default: All rest import blocks
- blank: Put blank imports together in a separate group
- dot: Put dot imports together in a separate group
The priority is standard > default > custom > blank > dot, all sections sort alphabetically inside.
By default, blank and dot sections are not used and the corresponding lines end up in the other groups.
All import blocks use one TAB(`\t`) as Indent.
Since v0.9.0, GCI always puts C import block as the first.
**Note**:
`nolint` is hard to handle at section level, GCI will consider it as a single comment.
## Installation
To download and install the highest available release version -
```shell
go install github.com/daixiang0/gci@latest
```
You may also specify a specific version, for example:
```shell
go install github.com/daixiang0/gci@v0.6.0
```
## Usage
Now GCI provides two command line methods, mainly for backward compatibility.
### New style
GCI supports three modes of operation
```shell
$ gci print -h
Print outputs the formatted file. If you want to apply the changes to a file use write instead!
Usage:
gci print path... [flags]
Aliases:
print, output
Flags:
-d, --debug Enables debug output from the formatter
-h, --help help for write
-s, --section strings Sections define how inputs will be processed. Section names are case-insensitive and may contain parameters in (). The section order is standard > default > custom > blank > dot. The default value is [standard,default].
standard - standard section that Golang provides officially, like "fmt"
Prefix(github.com/daixiang0) - custom section, groups all imports with the specified Prefix. Imports will be matched to the longest Prefix.
default - default section, contains all rest imports
blank - blank section, contains all blank imports. This section is not presed unless explicitly enabled. (default [standard,default])
--skip-generated Skip generated files
--custom-order Enable custom order of sections. If specified, make the section order the same as your configuration order. The default order is standard > default > custom > blank > dot.
```
```shell
$ gci write -h
Write modifies the specified files in-place
Usage:
gci write path... [flags]
Aliases:
write, overwrite
Flags:
-d, --debug Enables debug output from the formatter
-h, --help help for write
-s, --section strings Sections define how inputs will be processed. Section names are case-insensitive and may contain parameters in (). The section order is standard > default > custom > blank > dot. The default value is [standard,default].
standard - standard section thatolang provides officially, like "fmt"
Prefix(github.com/daixiang0) - custom section, groups all imports with the specified Prefix. Imports will be matched to the longest Prefix.
default - default section, contains all rest imports
blank - blank section, contains all blank imports. This section is not presed unless explicitly enabled.
dot - dot section, contains all dot imports. This section is not presed unless explicitly enabled. (default [standard,default])
--skip-generated Skip generated files
--custom-order Enable custom order of sections. If specified, make the section order the same as your configuration order. The default order is standard > default > custom > blank > dot.
```
```shell
$ gci diff -h
Diff prints a patch in the style of the diff tool that contains the required changes to the file to make it adhere to the specified formatting.
Usage:
gci diff path... [flags]
Flags:
-d, --debug Enables debug output from the formatter
-h, --help help for write
-s, --section strings Sections define how inputs will be processed. Section names are case-insensitive and may contain parameters in (). The section order is standard > default > custom > blank > dot. The default value is [standard,default].
standard - standard section thatolang provides officially, like "fmt"
Prefix(github.com/daixiang0) - custom section, groups all imports with the specified Prefix. Imports will be matched to the longest Prefix.
default - default section, contains all rest imports
blank - blank section, contains all blank imports. This section is not presed unless explicitly enabled.
dot - dot section, contains all dot imports. This section is not presed unless explicitly enabled. (default [standard,default])
--skip-generated Skip generated files
--custom-order Enable custom order of sections. If specified, make the section order the same as your configuration order. The default order is standard > default > custom > blank > dot.
```
### Old style
```shell
Usage:
gci [-diff | -write] [--local localPackageURLs] path... [flags]
Flags:
-d, --diff display diffs instead of rewriting files
-h, --help help for gci
-l, --local strings put imports beginning with this string after 3rd-party packages, separate imports by comma
-v, --version version for gci
-w, --write write result to (source) file instead of stdout
```
**Note**::
The old style is only for local tests, will be deprecated, please uses new style, `golangci-lint` uses new style as well.
## Examples
Run `gci write -s standard -s default -s "prefix(github.com/daixiang0/gci)" main.go` and you will handle following cases:
### simple case
```go
package main
import (
"golang.org/x/tools"
"fmt"
"github.com/daixiang0/gci"
)
```
to
```go
package main
import (
"fmt"
"golang.org/x/tools"
"github.com/daixiang0/gci"
)
```
### with alias
```go
package main
import (
"fmt"
go "github.com/golang"
"github.com/daixiang0/gci"
)
```
to
```go
package main
import (
"fmt"
go "github.com/golang"
"github.com/daixiang0/gci"
)
```
### with blank and dot grouping enabled
```go
package main
import (
"fmt"
go "github.com/golang"
_ "github.com/golang/blank"
. "github.com/golang/dot"
"github.com/daixiang0/gci"
_ "github.com/daixiang0/gci/blank"
. "github.com/daixiang0/gci/dot"
)
```
to
```go
package main
import (
"fmt"
go "github.com/golang"
"github.com/daixiang0/gci"
_ "github.com/daixiang0/gci/blank"
_ "github.com/golang/blank"
. "github.com/daixiang0/gci/dot"
. "github.com/golang/dot"
)
```
## TODO
- Ensure only one blank between `Name` and `Path` in an import block
- Ensure only one blank between `Path` and `Comment` in an import block
- Format comments
- Add more testcases
- Support imports completion (please use `goimports` first then use GCI)
- Optimize comments

27
vendor/github.com/daixiang0/gci/cmd/gci/completion.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package gci
import (
"strings"
"github.com/spf13/cobra"
)
func subCommandOrGoFileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
var commandAliases []string
for _, subCmd := range cmd.Commands() {
commandAliases = append(commandAliases, subCmd.Name())
commandAliases = append(commandAliases, subCmd.Aliases...)
}
for _, subCmdStr := range commandAliases {
if strings.HasPrefix(subCmdStr, toComplete) {
// completion for commands is already provided by cobra
// do not return file completion
return []string{}, cobra.ShellCompDirectiveNoFileComp
}
}
return goFileCompletion(cmd, args, toComplete)
}
func goFileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
return []string{"go"}, cobra.ShellCompDirectiveFilterFileExt
}

16
vendor/github.com/daixiang0/gci/cmd/gci/diff.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package gci
import (
"github.com/daixiang0/gci/pkg/gci"
)
// diffCmd represents the diff command
func (e *Executor) initDiff() {
e.newGciCommand(
"diff path...",
"Prints a git style diff to STDOUT",
"Diff prints a patch in the style of the diff tool that contains the required changes to the file to make it adhere to the specified formatting.",
[]string{},
true,
gci.DiffFormattedFiles)
}

72
vendor/github.com/daixiang0/gci/cmd/gci/gcicommand.go generated vendored Normal file
View File

@@ -0,0 +1,72 @@
package gci
import (
"github.com/spf13/cobra"
"go.uber.org/zap/zapcore"
"github.com/daixiang0/gci/pkg/config"
"github.com/daixiang0/gci/pkg/log"
"github.com/daixiang0/gci/pkg/section"
)
type processingFunc = func(args []string, gciCfg config.Config) error
func (e *Executor) newGciCommand(use, short, long string, aliases []string, stdInSupport bool, processingFunc processingFunc) *cobra.Command {
var noInlineComments, noPrefixComments, skipGenerated, customOrder, debug *bool
var sectionStrings, sectionSeparatorStrings *[]string
cmd := cobra.Command{
Use: use,
Aliases: aliases,
Short: short,
Long: long,
ValidArgsFunction: goFileCompletion,
RunE: func(cmd *cobra.Command, args []string) error {
fmtCfg := config.BoolConfig{
NoInlineComments: *noInlineComments,
NoPrefixComments: *noPrefixComments,
Debug: *debug,
SkipGenerated: *skipGenerated,
CustomOrder: *customOrder,
}
gciCfg, err := config.YamlConfig{Cfg: fmtCfg, SectionStrings: *sectionStrings, SectionSeparatorStrings: *sectionSeparatorStrings}.Parse()
if err != nil {
return err
}
if *debug {
log.SetLevel(zapcore.DebugLevel)
}
return processingFunc(args, *gciCfg)
},
}
if !stdInSupport {
cmd.Args = cobra.MinimumNArgs(1)
}
// register command as subcommand
e.rootCmd.AddCommand(&cmd)
debug = cmd.Flags().BoolP("debug", "d", false, "Enables debug output from the formatter")
sectionHelp := `Sections define how inputs will be processed. Section names are case-insensitive and may contain parameters in (). The section order is standard > default > custom > blank > dot. The default value is [standard,default].
standard - standard section that Golang provides officially, like "fmt"
Prefix(github.com/daixiang0) - custom section, groups all imports with the specified Prefix. Imports will be matched to the longest Prefix.
default - default section, contains all rest imports
blank - blank section, contains all blank imports. This section is not presed unless explicitly enabled.
dot - dot section, contains all dot imports. This section is not presed unless explicitly enabled.`
skipGenerated = cmd.Flags().Bool("skip-generated", false, "Skip generated files")
customOrder = cmd.Flags().Bool("custom-order", false, "Enable custom order of sections")
sectionStrings = cmd.Flags().StringSliceP("section", "s", section.DefaultSections().String(), sectionHelp)
// deprecated
noInlineComments = cmd.Flags().Bool("NoInlineComments", false, "Drops inline comments while formatting")
cmd.Flags().MarkDeprecated("NoInlineComments", "Drops inline comments while formatting")
noPrefixComments = cmd.Flags().Bool("NoPrefixComments", false, "Drops comment lines above an import statement while formatting")
cmd.Flags().MarkDeprecated("NoPrefixComments", "Drops inline comments while formatting")
sectionSeparatorStrings = cmd.Flags().StringSliceP("SectionSeparator", "x", section.DefaultSectionSeparators().String(), "SectionSeparators are inserted between Sections")
cmd.Flags().MarkDeprecated("SectionSeparator", "Drops inline comments while formatting")
cmd.Flags().MarkDeprecated("x", "Drops inline comments while formatting")
return &cmd
}

16
vendor/github.com/daixiang0/gci/cmd/gci/print.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package gci
import (
"github.com/daixiang0/gci/pkg/gci"
)
// printCmd represents the print command
func (e *Executor) initPrint() {
e.newGciCommand(
"print path...",
"Outputs the formatted file to STDOUT",
"Print outputs the formatted file. If you want to apply the changes to a file use write instead!",
[]string{"output"},
true,
gci.PrintFormattedFiles)
}

81
vendor/github.com/daixiang0/gci/cmd/gci/root.go generated vendored Normal file
View File

@@ -0,0 +1,81 @@
package gci
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/daixiang0/gci/pkg/config"
"github.com/daixiang0/gci/pkg/gci"
"github.com/daixiang0/gci/pkg/log"
"github.com/daixiang0/gci/pkg/section"
)
type Executor struct {
rootCmd *cobra.Command
diffMode *bool
writeMode *bool
localFlags *[]string
}
func NewExecutor(version string) *Executor {
log.InitLogger()
defer log.L().Sync()
e := Executor{}
rootCmd := cobra.Command{
Use: "gci [-diff | -write] [--local localPackageURLs] path...",
Short: "Gci controls golang package import order and makes it always deterministic",
Long: "Gci enables automatic formatting of imports in a deterministic manner" +
"\n" +
"If you want to integrate this as part of your CI take a look at golangci-lint.",
ValidArgsFunction: subCommandOrGoFileCompletion,
Args: cobra.MinimumNArgs(1),
Version: version,
RunE: e.runInCompatibilityMode,
}
e.rootCmd = &rootCmd
e.diffMode = rootCmd.Flags().BoolP("diff", "d", false, "display diffs instead of rewriting files")
e.writeMode = rootCmd.Flags().BoolP("write", "w", false, "write result to (source) file instead of stdout")
e.localFlags = rootCmd.Flags().StringSliceP("local", "l", []string{}, "put imports beginning with this string after 3rd-party packages, separate imports by comma")
e.initDiff()
e.initPrint()
e.initWrite()
return &e
}
// Execute adds all child commands to the root command and sets flags appropriately.
// This is called by main.main(). It only needs to happen once to the rootCmd.
func (e *Executor) Execute() error {
return e.rootCmd.Execute()
}
func (e *Executor) runInCompatibilityMode(cmd *cobra.Command, args []string) error {
// Workaround since the Deprecation message in Cobra can not be printed to STDERR
_, _ = fmt.Fprintln(os.Stderr, "Using the old parameters is deprecated, please use the named subcommands!")
if *e.writeMode && *e.diffMode {
return fmt.Errorf("diff and write must not be specified at the same time")
}
// generate section specification from old localFlags format
sections := gci.LocalFlagsToSections(*e.localFlags)
sectionSeparators := section.DefaultSectionSeparators()
cfg := config.Config{
BoolConfig: config.BoolConfig{
NoInlineComments: false,
NoPrefixComments: false,
Debug: false,
SkipGenerated: false,
},
Sections: sections,
SectionSeparators: sectionSeparators,
}
if *e.writeMode {
return gci.WriteFormattedFiles(args, cfg)
}
if *e.diffMode {
return gci.DiffFormattedFiles(args, cfg)
}
return gci.PrintFormattedFiles(args, cfg)
}

16
vendor/github.com/daixiang0/gci/cmd/gci/write.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
package gci
import (
"github.com/daixiang0/gci/pkg/gci"
)
// writeCmd represents the write command
func (e *Executor) initWrite() {
e.newGciCommand(
"write path...",
"Formats the specified files in-place",
"Write modifies the specified files in-place",
[]string{"overwrite"},
false,
gci.WriteFormattedFiles)
}

18
vendor/github.com/daixiang0/gci/main.go generated vendored Normal file
View File

@@ -0,0 +1,18 @@
package main
import (
"os"
"github.com/daixiang0/gci/cmd/gci"
)
var version = "0.9.0"
func main() {
e := gci.NewExecutor(version)
err := e.Execute()
if err != nil {
os.Exit(1)
}
}

90
vendor/github.com/daixiang0/gci/pkg/config/config.go generated vendored Normal file
View File

@@ -0,0 +1,90 @@
package config
import (
"io/ioutil"
"sort"
"strings"
"gopkg.in/yaml.v3"
"github.com/daixiang0/gci/pkg/section"
)
var defaultOrder = map[string]int{
section.StandardType: 0,
section.DefaultType: 1,
section.CustomType: 2,
section.BlankType: 3,
section.DotType: 4,
}
type BoolConfig struct {
NoInlineComments bool `yaml:"no-inlineComments"`
NoPrefixComments bool `yaml:"no-prefixComments"`
Debug bool `yaml:"-"`
SkipGenerated bool `yaml:"skipGenerated"`
CustomOrder bool `yaml:"customOrder"`
}
type Config struct {
BoolConfig
Sections section.SectionList
SectionSeparators section.SectionList
}
type YamlConfig struct {
Cfg BoolConfig `yaml:",inline"`
SectionStrings []string `yaml:"sections"`
SectionSeparatorStrings []string `yaml:"sectionseparators"`
}
func (g YamlConfig) Parse() (*Config, error) {
var err error
sections, err := section.Parse(g.SectionStrings)
if err != nil {
return nil, err
}
if sections == nil {
sections = section.DefaultSections()
}
// if default order sorted sections
if !g.Cfg.CustomOrder {
sort.Slice(sections, func(i, j int) bool {
sectionI, sectionJ := sections[i].Type(), sections[j].Type()
if strings.Compare(sectionI, sectionJ) == 0 {
return strings.Compare(sections[i].String(), sections[j].String()) < 0
}
return defaultOrder[sectionI] < defaultOrder[sectionJ]
})
}
sectionSeparators, err := section.Parse(g.SectionSeparatorStrings)
if err != nil {
return nil, err
}
if sectionSeparators == nil {
sectionSeparators = section.DefaultSectionSeparators()
}
return &Config{g.Cfg, sections, sectionSeparators}, nil
}
func InitializeGciConfigFromYAML(filePath string) (*Config, error) {
config := YamlConfig{}
yamlData, err := ioutil.ReadFile(filePath)
if err != nil {
return nil, err
}
err = yaml.Unmarshal(yamlData, &config)
if err != nil {
return nil, err
}
gciCfg, err := config.Parse()
if err != nil {
return nil, err
}
return gciCfg, nil
}

46
vendor/github.com/daixiang0/gci/pkg/format/format.go generated vendored Normal file
View File

@@ -0,0 +1,46 @@
package format
import (
"fmt"
"github.com/daixiang0/gci/pkg/config"
"github.com/daixiang0/gci/pkg/log"
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/section"
"github.com/daixiang0/gci/pkg/specificity"
)
type Block struct {
Start, End int
}
type resultMap map[string][]*Block
func Format(data []*parse.GciImports, cfg *config.Config) (resultMap, error) {
result := make(resultMap, len(cfg.Sections))
for _, d := range data {
// determine match specificity for every available section
var bestSection section.Section
var bestSectionSpecificity specificity.MatchSpecificity = specificity.MisMatch{}
for _, section := range cfg.Sections {
sectionSpecificity := section.MatchSpecificity(d)
if sectionSpecificity.IsMoreSpecific(specificity.MisMatch{}) && sectionSpecificity.Equal(bestSectionSpecificity) {
// specificity is identical
// return nil, section.EqualSpecificityMatchError{}
return nil, nil
}
if sectionSpecificity.IsMoreSpecific(bestSectionSpecificity) {
// better match found
bestSectionSpecificity = sectionSpecificity
bestSection = section
}
}
if bestSection == nil {
return nil, section.NoMatchingSectionForImportError{Imports: d}
}
log.L().Debug(fmt.Sprintf("Matched import %v to section %s", d, bestSection))
result[bestSection.String()] = append(result[bestSection.String()], &Block{d.Start, d.End})
}
return result, nil
}

209
vendor/github.com/daixiang0/gci/pkg/gci/gci.go generated vendored Normal file
View File

@@ -0,0 +1,209 @@
package gci
import (
"bytes"
"errors"
"fmt"
goFormat "go/format"
"os"
"sync"
"github.com/hexops/gotextdiff"
"github.com/hexops/gotextdiff/myers"
"github.com/hexops/gotextdiff/span"
"golang.org/x/sync/errgroup"
"github.com/daixiang0/gci/pkg/config"
"github.com/daixiang0/gci/pkg/format"
"github.com/daixiang0/gci/pkg/io"
"github.com/daixiang0/gci/pkg/log"
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/section"
"github.com/daixiang0/gci/pkg/utils"
)
func LocalFlagsToSections(localFlags []string) section.SectionList {
sections := section.DefaultSections()
// Add all local arguments as ImportPrefix sections
// for _, l := range localFlags {
// sections = append(sections, section.Section{l, nil, nil})
// }
return sections
}
func PrintFormattedFiles(paths []string, cfg config.Config) error {
return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error {
fmt.Print(string(formattedFile))
return nil
})
}
func WriteFormattedFiles(paths []string, cfg config.Config) error {
return processGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error {
if bytes.Equal(unmodifiedFile, formattedFile) {
log.L().Debug(fmt.Sprintf("Skipping correctly formatted File: %s", filePath))
return nil
}
log.L().Info(fmt.Sprintf("Writing formatted File: %s", filePath))
return os.WriteFile(filePath, formattedFile, 0o644)
})
}
func DiffFormattedFiles(paths []string, cfg config.Config) error {
return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error {
fileURI := span.URIFromPath(filePath)
edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile))
unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits)
fmt.Printf("%v", unifiedEdits)
return nil
})
}
func DiffFormattedFilesToArray(paths []string, cfg config.Config, diffs *[]string, lock *sync.Mutex) error {
log.InitLogger()
defer log.L().Sync()
return processStdInAndGoFilesInPaths(paths, cfg, func(filePath string, unmodifiedFile, formattedFile []byte) error {
fileURI := span.URIFromPath(filePath)
edits := myers.ComputeEdits(fileURI, string(unmodifiedFile), string(formattedFile))
unifiedEdits := gotextdiff.ToUnified(filePath, filePath, string(unmodifiedFile), edits)
lock.Lock()
*diffs = append(*diffs, fmt.Sprint(unifiedEdits))
lock.Unlock()
return nil
})
}
type fileFormattingFunc func(filePath string, unmodifiedFile, formattedFile []byte) error
func processStdInAndGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error {
return ProcessFiles(io.StdInGenerator.Combine(io.GoFilesInPathsGenerator(paths)), cfg, fileFunc)
}
func processGoFilesInPaths(paths []string, cfg config.Config, fileFunc fileFormattingFunc) error {
return ProcessFiles(io.GoFilesInPathsGenerator(paths), cfg, fileFunc)
}
func ProcessFiles(fileGenerator io.FileGeneratorFunc, cfg config.Config, fileFunc fileFormattingFunc) error {
var taskGroup errgroup.Group
files, err := fileGenerator()
if err != nil {
return err
}
for _, file := range files {
// run file processing in parallel
taskGroup.Go(processingFunc(file, cfg, fileFunc))
}
return taskGroup.Wait()
}
func processingFunc(file io.FileObj, cfg config.Config, formattingFunc fileFormattingFunc) func() error {
return func() error {
unmodifiedFile, formattedFile, err := LoadFormatGoFile(file, cfg)
if err != nil {
// if errors.Is(err, FileParsingError{}) {
// // do not process files that are improperly formatted
// return nil
// }
return err
}
return formattingFunc(file.Path(), unmodifiedFile, formattedFile)
}
}
func LoadFormatGoFile(file io.FileObj, cfg config.Config) (src, dist []byte, err error) {
src, err = file.Load()
log.L().Debug(fmt.Sprintf("Loaded File: %s", file.Path()))
if err != nil {
return nil, nil, err
}
if cfg.SkipGenerated && parse.IsGeneratedFileByComment(string(src)) {
return src, src, nil
}
imports, headEnd, tailStart, cStart, cEnd, err := parse.ParseFile(src, file.Path())
if err != nil {
if errors.Is(err, parse.NoImportError{}) {
return src, src, nil
}
return nil, nil, err
}
// do not do format if only one import
if len(imports) <= 1 {
return src, src, nil
}
result, err := format.Format(imports, &cfg)
if err != nil {
return nil, nil, err
}
firstWithIndex := true
var body []byte
// order by section list
for _, s := range cfg.Sections {
if len(result[s.String()]) > 0 {
if body != nil && len(body) > 0 {
body = append(body, utils.Linebreak)
}
for _, d := range result[s.String()] {
AddIndent(&body, &firstWithIndex)
body = append(body, src[d.Start:d.End]...)
}
}
}
head := make([]byte, headEnd)
copy(head, src[:headEnd])
tail := make([]byte, len(src)-tailStart)
copy(tail, src[tailStart:])
head = append(head, utils.Linebreak)
// ensure C
if cStart != 0 {
head = append(head, src[cStart:cEnd]...)
head = append(head, utils.Linebreak)
}
// add beginning of import block
head = append(head, `import (`...)
// add end of import block
body = append(body, []byte{utils.RightParenthesis, utils.Linebreak}...)
log.L().Debug(fmt.Sprintf("head:\n%s", head))
log.L().Debug(fmt.Sprintf("body:\n%s", body))
if len(tail) > 20 {
log.L().Debug(fmt.Sprintf("tail:\n%s", tail[:20]))
} else {
log.L().Debug(fmt.Sprintf("tail:\n%s", tail))
}
var totalLen int
slices := [][]byte{head, body, tail}
for _, s := range slices {
totalLen += len(s)
}
dist = make([]byte, totalLen)
var i int
for _, s := range slices {
i += copy(dist[i:], s)
}
log.L().Debug(fmt.Sprintf("raw:\n%s", dist))
dist, err = goFormat.Source(dist)
if err != nil {
return nil, nil, err
}
return src, dist, nil
}
func AddIndent(in *[]byte, first *bool) {
if *first {
*first = false
return
}
*in = append(*in, utils.Indent)
}

59
vendor/github.com/daixiang0/gci/pkg/io/file.go generated vendored Normal file
View File

@@ -0,0 +1,59 @@
package io
import "io/ioutil"
// FileObj allows mocking the access to files
type FileObj interface {
Load() ([]byte, error)
Path() string
}
// File represents a file that can be loaded from the file system
type File struct {
FilePath string
}
func (f File) Path() string {
return f.FilePath
}
func (f File) Load() ([]byte, error) {
return ioutil.ReadFile(f.FilePath)
}
// FileGeneratorFunc returns a list of files that can be loaded and processed
type FileGeneratorFunc func() ([]FileObj, error)
func (a FileGeneratorFunc) Combine(b FileGeneratorFunc) FileGeneratorFunc {
return func() ([]FileObj, error) {
files, err := a()
if err != nil {
return nil, err
}
additionalFiles, err := b()
if err != nil {
return nil, err
}
files = append(files, additionalFiles...)
return files, err
}
}
func GoFilesInPathsGenerator(paths []string) FileGeneratorFunc {
return FilesInPathsGenerator(paths, isGoFile)
}
func FilesInPathsGenerator(paths []string, fileCheckFun fileCheckFunction) FileGeneratorFunc {
return func() (foundFiles []FileObj, err error) {
for _, path := range paths {
files, err := FindFilesForPath(path, fileCheckFun)
if err != nil {
return nil, err
}
for _, filePath := range files {
foundFiles = append(foundFiles, File{filePath})
}
}
return foundFiles, nil
}
}

47
vendor/github.com/daixiang0/gci/pkg/io/search.go generated vendored Normal file
View File

@@ -0,0 +1,47 @@
package io
import (
"io/fs"
"os"
"path/filepath"
)
type fileCheckFunction func(file os.FileInfo) bool
func FindFilesForPath(path string, fileCheckFun fileCheckFunction) ([]string, error) {
switch entry, err := os.Stat(path); {
case err != nil:
return nil, err
case entry.IsDir():
return findFilesForDirectory(path, fileCheckFun)
case fileCheckFun(entry):
return []string{filepath.Clean(path)}, nil
default:
return []string{}, nil
}
}
func findFilesForDirectory(dirPath string, fileCheckFun fileCheckFunction) ([]string, error) {
var filePaths []string
err := filepath.WalkDir(dirPath, func(path string, entry fs.DirEntry, err error) error {
if err != nil {
return err
}
file, err := entry.Info()
if err != nil {
return err
}
if !entry.IsDir() && fileCheckFun(file) {
filePaths = append(filePaths, filepath.Clean(path))
}
return nil
})
if err != nil {
return nil, err
}
return filePaths, nil
}
func isGoFile(file os.FileInfo) bool {
return !file.IsDir() && filepath.Ext(file.Name()) == ".go"
}

27
vendor/github.com/daixiang0/gci/pkg/io/stdin.go generated vendored Normal file
View File

@@ -0,0 +1,27 @@
package io
import (
"io/ioutil"
"os"
)
type stdInFile struct{}
func (s stdInFile) Load() ([]byte, error) {
return ioutil.ReadAll(os.Stdin)
}
func (s stdInFile) Path() string {
return "StdIn"
}
var StdInGenerator FileGeneratorFunc = func() ([]FileObj, error) {
stat, err := os.Stdin.Stat()
if err != nil {
return nil, err
}
if (stat.Mode() & os.ModeCharDevice) == 0 {
return []FileObj{stdInFile{}}, nil
}
return []FileObj{}, nil
}

50
vendor/github.com/daixiang0/gci/pkg/log/log.go generated vendored Normal file
View File

@@ -0,0 +1,50 @@
package log
import (
"sync"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
)
// Use L to log with Zap
var logger *zap.Logger
// Keep the config to reference the atomicLevel for changing levels
var logConfig zap.Config
var doOnce sync.Once
// InitLogger sets up the logger
func InitLogger() {
doOnce.Do(func() {
logConfig = zap.NewDevelopmentConfig()
logConfig.EncoderConfig.TimeKey = "timestamp"
logConfig.EncoderConfig.EncodeTime = zapcore.ISO8601TimeEncoder
logConfig.Level.SetLevel(zapcore.InfoLevel)
logConfig.OutputPaths = []string{"stderr"}
var err error
logger, err = logConfig.Build()
if err != nil {
panic(err)
}
})
}
// SetLevel allows you to set the level of the default gci logger.
// This will not work if you replace the logger
func SetLevel(level zapcore.Level) {
logConfig.Level.SetLevel(level)
}
// L returns the logger
func L() *zap.Logger {
return logger
}
// SetLogger allows you to set the logger to whatever you want
func SetLogger(l *zap.Logger) {
logger = l
}

196
vendor/github.com/daixiang0/gci/pkg/parse/parse.go generated vendored Normal file
View File

@@ -0,0 +1,196 @@
package parse
import (
"go/ast"
"go/parser"
"go/token"
"sort"
"strings"
)
const C = "\"C\""
type GciImports struct {
// original index of import group, include doc, name, path and comment
Start, End int
Name, Path string
}
type ImportList []*GciImports
func (l ImportList) Len() int {
return len(l)
}
func (l ImportList) Less(i, j int) bool {
if strings.Compare(l[i].Path, l[j].Path) == 0 {
return strings.Compare(l[i].Name, l[j].Name) < 0
}
return strings.Compare(l[i].Path, l[j].Path) < 0
}
func (l ImportList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
/*
* AST considers a import block as below:
* ```
* Doc
* Name Path Comment
* ```
* An example is like below:
* ```
* // test
* test "fmt" // test
* ```
* getImports return a import block with name, start and end index
*/
func getImports(imp *ast.ImportSpec) (start, end int, name string) {
if imp.Doc != nil {
// doc poc need minus one to get the first index of comment
start = int(imp.Doc.Pos()) - 1
} else {
if imp.Name != nil {
// name pos need minus one too
start = int(imp.Name.Pos()) - 1
} else {
// path pos start without quote, need minus one for it
start = int(imp.Path.Pos()) - 1
}
}
if imp.Name != nil {
name = imp.Name.Name
}
if imp.Comment != nil {
end = int(imp.Comment.End())
} else {
end = int(imp.Path.End())
}
return
}
func ParseFile(src []byte, filename string) (ImportList, int, int, int, int, error) {
fileSet := token.NewFileSet()
f, err := parser.ParseFile(fileSet, filename, src, parser.ParseComments)
if err != nil {
return nil, 0, 0, 0, 0, err
}
if len(f.Imports) == 0 {
return nil, 0, 0, 0, 0, NoImportError{}
}
var (
// headEnd means the start of import block
headEnd int
// tailStart means the end + 1 of import block
tailStart int
// cStart means the start of C import block
cStart int
// cEnd means the end of C import block
cEnd int
data ImportList
)
for index, decl := range f.Decls {
switch decl.(type) {
// skip BadDecl and FuncDecl
case *ast.GenDecl:
genDecl := decl.(*ast.GenDecl)
if genDecl.Tok == token.IMPORT {
// there are two cases, both end with linebreak:
// 1.
// import (
// "xxxx"
// )
// 2.
// import "xxx"
if headEnd == 0 {
headEnd = int(decl.Pos()) - 1
}
tailStart = int(decl.End())
for _, spec := range genDecl.Specs {
imp := spec.(*ast.ImportSpec)
// there are only one C import block
// ensure C import block is the first import block
if imp.Path.Value == C {
/*
common case:
// #include <png.h>
import "C"
notice that decl.Pos() == genDecl.Pos() > genDecl.Doc.Pos()
*/
if genDecl.Doc != nil {
cStart = int(genDecl.Doc.Pos()) - 1
// if C import block is the first, update headEnd
if index == 0 {
headEnd = cStart
}
} else {
/*
special case:
import "C"
*/
cStart = int(decl.Pos()) - 1
}
cEnd = int(decl.End())
continue
}
start, end, name := getImports(imp)
data = append(data, &GciImports{
Start: start,
End: end,
Name: name,
Path: strings.Trim(imp.Path.Value, `"`),
})
}
}
}
}
sort.Sort(data)
return data, headEnd, tailStart, cStart, cEnd, nil
}
// IsGeneratedFileByComment reports whether the source file is generated code.
// Using a bit laxer rules than https://golang.org/s/generatedcode to
// match more generated code.
// Taken from https://github.com/golangci/golangci-lint.
func IsGeneratedFileByComment(in string) bool {
const (
genCodeGenerated = "code generated"
genDoNotEdit = "do not edit"
genAutoFile = "autogenerated file" // easyjson
)
markers := []string{genCodeGenerated, genDoNotEdit, genAutoFile}
in = strings.ToLower(in)
for _, marker := range markers {
if strings.Contains(in, marker) {
return true
}
}
return false
}
type NoImportError struct{}
func (n NoImportError) Error() string {
return "No imports"
}
func (i NoImportError) Is(err error) bool {
_, ok := err.(NoImportError)
return ok
}

25
vendor/github.com/daixiang0/gci/pkg/section/blank.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
type Blank struct{}
const BlankType = "blank"
func (b Blank) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
if spec.Name == "_" {
return specificity.NameMatch{}
}
return specificity.MisMatch{}
}
func (b Blank) String() string {
return BlankType
}
func (b Blank) Type() string {
return BlankType
}

View File

@@ -0,0 +1,24 @@
package section
import (
"fmt"
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
type CommentLine struct {
Comment string
}
func (c CommentLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
return specificity.MisMatch{}
}
func (c CommentLine) String() string {
return fmt.Sprintf("commentline(%s)", c.Comment)
}
func (c CommentLine) Type() string {
return "commentline"
}

22
vendor/github.com/daixiang0/gci/pkg/section/default.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
const DefaultType = "default"
type Default struct{}
func (d Default) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
return specificity.Default{}
}
func (d Default) String() string {
return DefaultType
}
func (d Default) Type() string {
return DefaultType
}

25
vendor/github.com/daixiang0/gci/pkg/section/dot.go generated vendored Normal file
View File

@@ -0,0 +1,25 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
type Dot struct{}
const DotType = "dot"
func (d Dot) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
if spec.Name == "." {
return specificity.NameMatch{}
}
return specificity.MisMatch{}
}
func (d Dot) String() string {
return DotType
}
func (d Dot) Type() string {
return DotType
}

107
vendor/github.com/daixiang0/gci/pkg/section/errors.go generated vendored Normal file
View File

@@ -0,0 +1,107 @@
package section
import (
"errors"
"fmt"
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/utils"
)
type SectionParsingError struct {
error
}
func (s SectionParsingError) Unwrap() error {
return s.error
}
func (s SectionParsingError) Wrap(sectionStr string) error {
return fmt.Errorf("failed to parse section %q: %w", sectionStr, s)
}
func (s SectionParsingError) Is(err error) bool {
_, ok := err.(SectionParsingError)
return ok
}
var MissingParameterClosingBracketsError = fmt.Errorf("section parameter is missing closing %q", utils.RightParenthesis)
var MoreThanOneOpeningQuotesError = fmt.Errorf("found more than one %q parameter start sequences", utils.RightParenthesis)
var SectionTypeDoesNotAcceptParametersError = errors.New("section type does not accept a parameter")
var SectionTypeDoesNotAcceptPrefixError = errors.New("section may not contain a Prefix")
var SectionTypeDoesNotAcceptSuffixError = errors.New("section may not contain a Suffix")
type EqualSpecificityMatchError struct {
Imports *parse.GciImports
SectionA, SectionB Section
}
func (e EqualSpecificityMatchError) Error() string {
return fmt.Sprintf("Import %v matched section %s and %s equally", e.Imports, e.SectionA, e.SectionB)
}
func (e EqualSpecificityMatchError) Is(err error) bool {
_, ok := err.(EqualSpecificityMatchError)
return ok
}
type NoMatchingSectionForImportError struct {
Imports *parse.GciImports
}
func (n NoMatchingSectionForImportError) Error() string {
return fmt.Sprintf("No section found for Import: %v", n.Imports)
}
func (n NoMatchingSectionForImportError) Is(err error) bool {
_, ok := err.(NoMatchingSectionForImportError)
return ok
}
type InvalidImportSplitError struct {
segments []string
}
func (i InvalidImportSplitError) Error() string {
return fmt.Sprintf("separating the inline comment from the import yielded an invalid number of segments: %v", i.segments)
}
func (i InvalidImportSplitError) Is(err error) bool {
_, ok := err.(InvalidImportSplitError)
return ok
}
type InvalidAliasSplitError struct {
segments []string
}
func (i InvalidAliasSplitError) Error() string {
return fmt.Sprintf("separating the alias from the path yielded an invalid number of segments: %v", i.segments)
}
func (i InvalidAliasSplitError) Is(err error) bool {
_, ok := err.(InvalidAliasSplitError)
return ok
}
var (
MissingImportStatementError = FileParsingError{errors.New("no import statement present in File")}
ImportStatementNotClosedError = FileParsingError{errors.New("import statement not closed")}
)
type FileParsingError struct {
error
}
func (f FileParsingError) Unwrap() error {
return f.error
}
func (f FileParsingError) Is(err error) bool {
_, ok := err.(FileParsingError)
return ok
}

22
vendor/github.com/daixiang0/gci/pkg/section/newline.go generated vendored Normal file
View File

@@ -0,0 +1,22 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
const newLineName = "newline"
type NewLine struct{}
func (n NewLine) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
return specificity.MisMatch{}
}
func (n NewLine) String() string {
return newLineName
}
func (n NewLine) Type() string {
return newLineName
}

44
vendor/github.com/daixiang0/gci/pkg/section/parser.go generated vendored Normal file
View File

@@ -0,0 +1,44 @@
package section
import (
"errors"
"fmt"
"strings"
)
func Parse(data []string) (SectionList, error) {
if len(data) == 0 {
return nil, nil
}
var list SectionList
var errString string
for _, d := range data {
s := strings.ToLower(d)
if len(s) == 0 {
return nil, nil
}
if s == "default" {
list = append(list, Default{})
} else if s == "standard" {
list = append(list, Standard{})
} else if s == "newline" {
list = append(list, NewLine{})
} else if strings.HasPrefix(s, "prefix(") && len(d) > 8 {
list = append(list, Custom{d[7 : len(d)-1]})
} else if strings.HasPrefix(s, "commentline(") && len(d) > 13 {
list = append(list, Custom{d[12 : len(d)-1]})
} else if s == "dot" {
list = append(list, Dot{})
} else if s == "blank" {
list = append(list, Blank{})
} else {
errString += fmt.Sprintf(" %s", s)
}
}
if errString != "" {
return nil, errors.New(fmt.Sprintf("invalid params:%s", errString))
}
return list, nil
}

30
vendor/github.com/daixiang0/gci/pkg/section/prefix.go generated vendored Normal file
View File

@@ -0,0 +1,30 @@
package section
import (
"fmt"
"strings"
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
type Custom struct {
Prefix string
}
const CustomType = "custom"
func (c Custom) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
if strings.HasPrefix(spec.Path, c.Prefix) {
return specificity.Match{Length: len(c.Prefix)}
}
return specificity.MisMatch{}
}
func (c Custom) String() string {
return fmt.Sprintf("prefix(%s)", c.Prefix)
}
func (c Custom) Type() string {
return CustomType
}

36
vendor/github.com/daixiang0/gci/pkg/section/section.go generated vendored Normal file
View File

@@ -0,0 +1,36 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
// Section defines a part of the formatted output.
type Section interface {
// MatchSpecificity returns how well an Import matches to this Section
MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity
// String Implements the stringer interface
String() string
// return section type
Type() string
}
type SectionList []Section
func (list SectionList) String() []string {
var output []string
for _, section := range list {
output = append(output, section.String())
}
return output
}
func DefaultSections() SectionList {
return SectionList{Standard{}, Default{}}
}
func DefaultSectionSeparators() SectionList {
return SectionList{NewLine{}}
}

View File

@@ -0,0 +1,30 @@
package section
import (
"github.com/daixiang0/gci/pkg/parse"
"github.com/daixiang0/gci/pkg/specificity"
)
const StandardType = "standard"
type Standard struct{}
func (s Standard) MatchSpecificity(spec *parse.GciImports) specificity.MatchSpecificity {
if isStandard(spec.Path) {
return specificity.StandardMatch{}
}
return specificity.MisMatch{}
}
func (s Standard) String() string {
return StandardType
}
func (s Standard) Type() string {
return StandardType
}
func isStandard(pkg string) bool {
_, ok := standardPackages[pkg]
return ok
}

View File

@@ -0,0 +1,160 @@
package section
// Code generated based on go1.19.2. DO NOT EDIT.
var standardPackages = map[string]struct{}{
"archive/tar": {},
"archive/zip": {},
"bufio": {},
"bytes": {},
"compress/bzip2": {},
"compress/flate": {},
"compress/gzip": {},
"compress/lzw": {},
"compress/zlib": {},
"container/heap": {},
"container/list": {},
"container/ring": {},
"context": {},
"crypto": {},
"crypto/aes": {},
"crypto/cipher": {},
"crypto/des": {},
"crypto/dsa": {},
"crypto/ecdsa": {},
"crypto/ed25519": {},
"crypto/elliptic": {},
"crypto/hmac": {},
"crypto/md5": {},
"crypto/rand": {},
"crypto/rc4": {},
"crypto/rsa": {},
"crypto/sha1": {},
"crypto/sha256": {},
"crypto/sha512": {},
"crypto/subtle": {},
"crypto/tls": {},
"crypto/x509": {},
"crypto/x509/pkix": {},
"database/sql": {},
"database/sql/driver": {},
"debug/buildinfo": {},
"debug/dwarf": {},
"debug/elf": {},
"debug/gosym": {},
"debug/macho": {},
"debug/pe": {},
"debug/plan9obj": {},
"embed": {},
"encoding": {},
"encoding/ascii85": {},
"encoding/asn1": {},
"encoding/base32": {},
"encoding/base64": {},
"encoding/binary": {},
"encoding/csv": {},
"encoding/gob": {},
"encoding/hex": {},
"encoding/json": {},
"encoding/pem": {},
"encoding/xml": {},
"errors": {},
"expvar": {},
"flag": {},
"fmt": {},
"go/ast": {},
"go/build": {},
"go/build/constraint": {},
"go/constant": {},
"go/doc": {},
"go/doc/comment": {},
"go/format": {},
"go/importer": {},
"go/parser": {},
"go/printer": {},
"go/scanner": {},
"go/token": {},
"go/types": {},
"hash": {},
"hash/adler32": {},
"hash/crc32": {},
"hash/crc64": {},
"hash/fnv": {},
"hash/maphash": {},
"html": {},
"html/template": {},
"image": {},
"image/color": {},
"image/color/palette": {},
"image/draw": {},
"image/gif": {},
"image/jpeg": {},
"image/png": {},
"index/suffixarray": {},
"io": {},
"io/fs": {},
"io/ioutil": {},
"log": {},
"log/syslog": {},
"math": {},
"math/big": {},
"math/bits": {},
"math/cmplx": {},
"math/rand": {},
"mime": {},
"mime/multipart": {},
"mime/quotedprintable": {},
"net": {},
"net/http": {},
"net/http/cgi": {},
"net/http/cookiejar": {},
"net/http/fcgi": {},
"net/http/httptest": {},
"net/http/httptrace": {},
"net/http/httputil": {},
"net/http/pprof": {},
"net/mail": {},
"net/netip": {},
"net/rpc": {},
"net/rpc/jsonrpc": {},
"net/smtp": {},
"net/textproto": {},
"net/url": {},
"os": {},
"os/exec": {},
"os/signal": {},
"os/user": {},
"path": {},
"path/filepath": {},
"plugin": {},
"reflect": {},
"regexp": {},
"regexp/syntax": {},
"runtime": {},
"runtime/cgo": {},
"runtime/debug": {},
"runtime/metrics": {},
"runtime/pprof": {},
"runtime/race": {},
"runtime/trace": {},
"sort": {},
"strconv": {},
"strings": {},
"sync": {},
"sync/atomic": {},
"syscall": {},
"testing": {},
"testing/fstest": {},
"testing/iotest": {},
"testing/quick": {},
"text/scanner": {},
"text/tabwriter": {},
"text/template": {},
"text/template/parse": {},
"time": {},
"time/tzdata": {},
"unicode": {},
"unicode/utf16": {},
"unicode/utf8": {},
"unsafe": {},
}

View File

@@ -0,0 +1,19 @@
package specificity
type Default struct{}
func (d Default) IsMoreSpecific(than MatchSpecificity) bool {
return isMoreSpecific(d, than)
}
func (d Default) Equal(to MatchSpecificity) bool {
return equalSpecificity(d, to)
}
func (d Default) class() specificityClass {
return DefaultClass
}
func (d Default) String() string {
return "Default"
}

View File

@@ -0,0 +1,24 @@
package specificity
import "fmt"
type Match struct {
Length int
}
func (m Match) IsMoreSpecific(than MatchSpecificity) bool {
otherMatch, isMatch := than.(Match)
return isMoreSpecific(m, than) || (isMatch && m.Length > otherMatch.Length)
}
func (m Match) Equal(to MatchSpecificity) bool {
return equalSpecificity(m, to)
}
func (m Match) class() specificityClass {
return MatchClass
}
func (m Match) String() string {
return fmt.Sprintf("Match(length: %d)", m.Length)
}

View File

@@ -0,0 +1,19 @@
package specificity
type MisMatch struct{}
func (m MisMatch) IsMoreSpecific(than MatchSpecificity) bool {
return isMoreSpecific(m, than)
}
func (m MisMatch) Equal(to MatchSpecificity) bool {
return equalSpecificity(m, to)
}
func (m MisMatch) class() specificityClass {
return MisMatchClass
}
func (m MisMatch) String() string {
return "Mismatch"
}

View File

@@ -0,0 +1,19 @@
package specificity
type NameMatch struct{}
func (n NameMatch) IsMoreSpecific(than MatchSpecificity) bool {
return isMoreSpecific(n, than)
}
func (n NameMatch) Equal(to MatchSpecificity) bool {
return equalSpecificity(n, to)
}
func (n NameMatch) class() specificityClass {
return NameClass
}
func (n NameMatch) String() string {
return "Name"
}

View File

@@ -0,0 +1,27 @@
package specificity
type specificityClass int
const (
MisMatchClass = 0
DefaultClass = 10
StandardClass = 20
MatchClass = 30
NameClass = 40
)
// MatchSpecificity is used to determine which section matches an import best
type MatchSpecificity interface {
IsMoreSpecific(than MatchSpecificity) bool
Equal(to MatchSpecificity) bool
class() specificityClass
}
func isMoreSpecific(this, than MatchSpecificity) bool {
return this.class() > than.class()
}
func equalSpecificity(base, to MatchSpecificity) bool {
// m.class() == to.class() would not work for Match
return !base.IsMoreSpecific(to) && !to.IsMoreSpecific(base)
}

View File

@@ -0,0 +1,19 @@
package specificity
type StandardMatch struct{}
func (s StandardMatch) IsMoreSpecific(than MatchSpecificity) bool {
return isMoreSpecific(s, than)
}
func (s StandardMatch) Equal(to MatchSpecificity) bool {
return equalSpecificity(s, to)
}
func (s StandardMatch) class() specificityClass {
return StandardClass
}
func (s StandardMatch) String() string {
return "Standard"
}

11
vendor/github.com/daixiang0/gci/pkg/utils/constants.go generated vendored Normal file
View File

@@ -0,0 +1,11 @@
package utils
const (
Indent = '\t'
Linebreak = '\n'
Colon = ":"
LeftParenthesis = '('
RightParenthesis = ')'
)

27
vendor/github.com/hexops/gotextdiff/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,27 @@
Copyright (c) 2009 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

54
vendor/github.com/hexops/gotextdiff/README.md generated vendored Normal file
View File

@@ -0,0 +1,54 @@
# gotextdiff - unified text diffing in Go <a href="https://hexops.com"><img align="right" alt="Hexops logo" src="https://raw.githubusercontent.com/hexops/media/master/readme.svg"></img></a>
This is a copy of the Go text diffing packages that [the official Go language server gopls uses internally](https://github.com/golang/tools/tree/master/internal/lsp/diff) to generate unified diffs.
If you've previously tried to generate unified text diffs in Go (like the ones you see in Git and on GitHub), you may have found [github.com/sergi/go-diff](https://github.com/sergi/go-diff) which is a Go port of Neil Fraser's google-diff-match-patch code - however it [does not support unified diffs](https://github.com/sergi/go-diff/issues/57).
This is arguably one of the best (and most maintained) unified text diffing packages in Go as of at least 2020.
(All credit goes to [the Go authors](http://tip.golang.org/AUTHORS), I am merely re-publishing their work so others can use it.)
## Example usage
Import the packages:
```Go
import (
"github.com/hexops/gotextdiff"
"github.com/hexops/gotextdiff/myers"
)
```
Assuming you want to diff `a.txt` and `b.txt`, whose contents are stored in `aString` and `bString` then:
```Go
edits := myers.ComputeEdits(span.URIFromPath("a.txt"), aString, bString)
diff := fmt.Sprint(gotextdiff.ToUnified("a.txt", "b.txt", aString, edits))
```
`diff` will be a string like:
```diff
--- a.txt
+++ b.txt
@@ -1,13 +1,28 @@
-foo
+bar
```
## API compatability
We will publish a new major version anytime the API changes in a backwards-incompatible way. Because the upstream is not being developed with this being a public package in mind, API breakages may occur more often than in other Go packages (but you can always continue using the old version thanks to Go modules.)
## Alternatives
- [github.com/andreyvit/diff](https://github.com/andreyvit/diff): Quick'n'easy string diffing functions for Golang based on github.com/sergi/go-diff.
- [github.com/kylelemons/godebug/diff](https://github.com/kylelemons/godebug/tree/master/diff): implements a linewise diff algorithm ([inactive](https://github.com/kylelemons/godebug/issues/22#issuecomment-524573477)).
## Contributing
We will only accept changes made [upstream](https://github.com/golang/tools/tree/master/internal/lsp/diff), please send any contributions to the upstream instead! Compared to the upstream, only import paths will be modified (to be non-`internal` so they are importable.) The only thing we add here is this README.
## License
See https://github.com/golang/tools/blob/master/LICENSE

159
vendor/github.com/hexops/gotextdiff/diff.go generated vendored Normal file
View File

@@ -0,0 +1,159 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package gotextdiff supports a pluggable diff algorithm.
package gotextdiff
import (
"sort"
"strings"
"github.com/hexops/gotextdiff/span"
)
// TextEdit represents a change to a section of a document.
// The text within the specified span should be replaced by the supplied new text.
type TextEdit struct {
Span span.Span
NewText string
}
// ComputeEdits is the type for a function that produces a set of edits that
// convert from the before content to the after content.
type ComputeEdits func(uri span.URI, before, after string) []TextEdit
// SortTextEdits attempts to order all edits by their starting points.
// The sort is stable so that edits with the same starting point will not
// be reordered.
func SortTextEdits(d []TextEdit) {
// Use a stable sort to maintain the order of edits inserted at the same position.
sort.SliceStable(d, func(i int, j int) bool {
return span.Compare(d[i].Span, d[j].Span) < 0
})
}
// ApplyEdits applies the set of edits to the before and returns the resulting
// content.
// It may panic or produce garbage if the edits are not valid for the provided
// before content.
func ApplyEdits(before string, edits []TextEdit) string {
// Preconditions:
// - all of the edits apply to before
// - and all the spans for each TextEdit have the same URI
if len(edits) == 0 {
return before
}
_, edits, _ = prepareEdits(before, edits)
after := strings.Builder{}
last := 0
for _, edit := range edits {
start := edit.Span.Start().Offset()
if start > last {
after.WriteString(before[last:start])
last = start
}
after.WriteString(edit.NewText)
last = edit.Span.End().Offset()
}
if last < len(before) {
after.WriteString(before[last:])
}
return after.String()
}
// LineEdits takes a set of edits and expands and merges them as necessary
// to ensure that there are only full line edits left when it is done.
func LineEdits(before string, edits []TextEdit) []TextEdit {
if len(edits) == 0 {
return nil
}
c, edits, partial := prepareEdits(before, edits)
if partial {
edits = lineEdits(before, c, edits)
}
return edits
}
// prepareEdits returns a sorted copy of the edits
func prepareEdits(before string, edits []TextEdit) (*span.TokenConverter, []TextEdit, bool) {
partial := false
c := span.NewContentConverter("", []byte(before))
copied := make([]TextEdit, len(edits))
for i, edit := range edits {
edit.Span, _ = edit.Span.WithAll(c)
copied[i] = edit
partial = partial ||
edit.Span.Start().Offset() >= len(before) ||
edit.Span.Start().Column() > 1 || edit.Span.End().Column() > 1
}
SortTextEdits(copied)
return c, copied, partial
}
// lineEdits rewrites the edits to always be full line edits
func lineEdits(before string, c *span.TokenConverter, edits []TextEdit) []TextEdit {
adjusted := make([]TextEdit, 0, len(edits))
current := TextEdit{Span: span.Invalid}
for _, edit := range edits {
if current.Span.IsValid() && edit.Span.Start().Line() <= current.Span.End().Line() {
// overlaps with the current edit, need to combine
// first get the gap from the previous edit
gap := before[current.Span.End().Offset():edit.Span.Start().Offset()]
// now add the text of this edit
current.NewText += gap + edit.NewText
// and then adjust the end position
current.Span = span.New(current.Span.URI(), current.Span.Start(), edit.Span.End())
} else {
// does not overlap, add previous run (if there is one)
adjusted = addEdit(before, adjusted, current)
// and then remember this edit as the start of the next run
current = edit
}
}
// add the current pending run if there is one
return addEdit(before, adjusted, current)
}
func addEdit(before string, edits []TextEdit, edit TextEdit) []TextEdit {
if !edit.Span.IsValid() {
return edits
}
// if edit is partial, expand it to full line now
start := edit.Span.Start()
end := edit.Span.End()
if start.Column() > 1 {
// prepend the text and adjust to start of line
delta := start.Column() - 1
start = span.NewPoint(start.Line(), 1, start.Offset()-delta)
edit.Span = span.New(edit.Span.URI(), start, end)
edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
}
if start.Offset() >= len(before) && start.Line() > 1 && before[len(before)-1] != '\n' {
// after end of file that does not end in eol, so join to last line of file
// to do this we need to know where the start of the last line was
eol := strings.LastIndex(before, "\n")
if eol < 0 {
// file is one non terminated line
eol = 0
}
delta := len(before) - eol
start = span.NewPoint(start.Line()-1, 1, start.Offset()-delta)
edit.Span = span.New(edit.Span.URI(), start, end)
edit.NewText = before[start.Offset():start.Offset()+delta] + edit.NewText
}
if end.Column() > 1 {
remains := before[end.Offset():]
eol := strings.IndexRune(remains, '\n')
if eol < 0 {
eol = len(remains)
} else {
eol++
}
end = span.NewPoint(end.Line()+1, 1, end.Offset()+eol)
edit.Span = span.New(edit.Span.URI(), start, end)
edit.NewText = edit.NewText + remains[:eol]
}
edits = append(edits, edit)
return edits
}

205
vendor/github.com/hexops/gotextdiff/myers/diff.go generated vendored Normal file
View File

@@ -0,0 +1,205 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package myers implements the Myers diff algorithm.
package myers
import (
"strings"
diff "github.com/hexops/gotextdiff"
"github.com/hexops/gotextdiff/span"
)
// Sources:
// https://blog.jcoglan.com/2017/02/17/the-myers-diff-algorithm-part-3/
// https://www.codeproject.com/Articles/42279/%2FArticles%2F42279%2FInvestigating-Myers-diff-algorithm-Part-1-of-2
func ComputeEdits(uri span.URI, before, after string) []diff.TextEdit {
ops := operations(splitLines(before), splitLines(after))
edits := make([]diff.TextEdit, 0, len(ops))
for _, op := range ops {
s := span.New(uri, span.NewPoint(op.I1+1, 1, 0), span.NewPoint(op.I2+1, 1, 0))
switch op.Kind {
case diff.Delete:
// Delete: unformatted[i1:i2] is deleted.
edits = append(edits, diff.TextEdit{Span: s})
case diff.Insert:
// Insert: formatted[j1:j2] is inserted at unformatted[i1:i1].
if content := strings.Join(op.Content, ""); content != "" {
edits = append(edits, diff.TextEdit{Span: s, NewText: content})
}
}
}
return edits
}
type operation struct {
Kind diff.OpKind
Content []string // content from b
I1, I2 int // indices of the line in a
J1 int // indices of the line in b, J2 implied by len(Content)
}
// operations returns the list of operations to convert a into b, consolidating
// operations for multiple lines and not including equal lines.
func operations(a, b []string) []*operation {
if len(a) == 0 && len(b) == 0 {
return nil
}
trace, offset := shortestEditSequence(a, b)
snakes := backtrack(trace, len(a), len(b), offset)
M, N := len(a), len(b)
var i int
solution := make([]*operation, len(a)+len(b))
add := func(op *operation, i2, j2 int) {
if op == nil {
return
}
op.I2 = i2
if op.Kind == diff.Insert {
op.Content = b[op.J1:j2]
}
solution[i] = op
i++
}
x, y := 0, 0
for _, snake := range snakes {
if len(snake) < 2 {
continue
}
var op *operation
// delete (horizontal)
for snake[0]-snake[1] > x-y {
if op == nil {
op = &operation{
Kind: diff.Delete,
I1: x,
J1: y,
}
}
x++
if x == M {
break
}
}
add(op, x, y)
op = nil
// insert (vertical)
for snake[0]-snake[1] < x-y {
if op == nil {
op = &operation{
Kind: diff.Insert,
I1: x,
J1: y,
}
}
y++
}
add(op, x, y)
op = nil
// equal (diagonal)
for x < snake[0] {
x++
y++
}
if x >= M && y >= N {
break
}
}
return solution[:i]
}
// backtrack uses the trace for the edit sequence computation and returns the
// "snakes" that make up the solution. A "snake" is a single deletion or
// insertion followed by zero or diagonals.
func backtrack(trace [][]int, x, y, offset int) [][]int {
snakes := make([][]int, len(trace))
d := len(trace) - 1
for ; x > 0 && y > 0 && d > 0; d-- {
V := trace[d]
if len(V) == 0 {
continue
}
snakes[d] = []int{x, y}
k := x - y
var kPrev int
if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
kPrev = k + 1
} else {
kPrev = k - 1
}
x = V[kPrev+offset]
y = x - kPrev
}
if x < 0 || y < 0 {
return snakes
}
snakes[d] = []int{x, y}
return snakes
}
// shortestEditSequence returns the shortest edit sequence that converts a into b.
func shortestEditSequence(a, b []string) ([][]int, int) {
M, N := len(a), len(b)
V := make([]int, 2*(N+M)+1)
offset := N + M
trace := make([][]int, N+M+1)
// Iterate through the maximum possible length of the SES (N+M).
for d := 0; d <= N+M; d++ {
copyV := make([]int, len(V))
// k lines are represented by the equation y = x - k. We move in
// increments of 2 because end points for even d are on even k lines.
for k := -d; k <= d; k += 2 {
// At each point, we either go down or to the right. We go down if
// k == -d, and we go to the right if k == d. We also prioritize
// the maximum x value, because we prefer deletions to insertions.
var x int
if k == -d || (k != d && V[k-1+offset] < V[k+1+offset]) {
x = V[k+1+offset] // down
} else {
x = V[k-1+offset] + 1 // right
}
y := x - k
// Diagonal moves while we have equal contents.
for x < M && y < N && a[x] == b[y] {
x++
y++
}
V[k+offset] = x
// Return if we've exceeded the maximum values.
if x == M && y == N {
// Makes sure to save the state of the array before returning.
copy(copyV, V)
trace[d] = copyV
return trace, offset
}
}
// Save the state of the array.
copy(copyV, V)
trace[d] = copyV
}
return nil, 0
}
func splitLines(text string) []string {
lines := strings.SplitAfter(text, "\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}

100
vendor/github.com/hexops/gotextdiff/span/parse.go generated vendored Normal file
View File

@@ -0,0 +1,100 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"strconv"
"strings"
"unicode/utf8"
)
// Parse returns the location represented by the input.
// Only file paths are accepted, not URIs.
// The returned span will be normalized, and thus if printed may produce a
// different string.
func Parse(input string) Span {
// :0:0#0-0:0#0
valid := input
var hold, offset int
hadCol := false
suf := rstripSuffix(input)
if suf.sep == "#" {
offset = suf.num
suf = rstripSuffix(suf.remains)
}
if suf.sep == ":" {
valid = suf.remains
hold = suf.num
hadCol = true
suf = rstripSuffix(suf.remains)
}
switch {
case suf.sep == ":":
return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), Point{})
case suf.sep == "-":
// we have a span, fall out of the case to continue
default:
// separator not valid, rewind to either the : or the start
return New(URIFromPath(valid), NewPoint(hold, 0, offset), Point{})
}
// only the span form can get here
// at this point we still don't know what the numbers we have mean
// if have not yet seen a : then we might have either a line or a column depending
// on whether start has a column or not
// we build an end point and will fix it later if needed
end := NewPoint(suf.num, hold, offset)
hold, offset = 0, 0
suf = rstripSuffix(suf.remains)
if suf.sep == "#" {
offset = suf.num
suf = rstripSuffix(suf.remains)
}
if suf.sep != ":" {
// turns out we don't have a span after all, rewind
return New(URIFromPath(valid), end, Point{})
}
valid = suf.remains
hold = suf.num
suf = rstripSuffix(suf.remains)
if suf.sep != ":" {
// line#offset only
return New(URIFromPath(valid), NewPoint(hold, 0, offset), end)
}
// we have a column, so if end only had one number, it is also the column
if !hadCol {
end = NewPoint(suf.num, end.v.Line, end.v.Offset)
}
return New(URIFromPath(suf.remains), NewPoint(suf.num, hold, offset), end)
}
type suffix struct {
remains string
sep string
num int
}
func rstripSuffix(input string) suffix {
if len(input) == 0 {
return suffix{"", "", -1}
}
remains := input
num := -1
// first see if we have a number at the end
last := strings.LastIndexFunc(remains, func(r rune) bool { return r < '0' || r > '9' })
if last >= 0 && last < len(remains)-1 {
number, err := strconv.ParseInt(remains[last+1:], 10, 64)
if err == nil {
num = int(number)
remains = remains[:last+1]
}
}
// now see if we have a trailing separator
r, w := utf8.DecodeLastRuneInString(remains)
if r != ':' && r != '#' && r == '#' {
return suffix{input, "", -1}
}
remains = remains[:len(remains)-w]
return suffix{remains, string(r), num}
}

285
vendor/github.com/hexops/gotextdiff/span/span.go generated vendored Normal file
View File

@@ -0,0 +1,285 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package span contains support for representing with positions and ranges in
// text files.
package span
import (
"encoding/json"
"fmt"
"path"
)
// Span represents a source code range in standardized form.
type Span struct {
v span
}
// Point represents a single point within a file.
// In general this should only be used as part of a Span, as on its own it
// does not carry enough information.
type Point struct {
v point
}
type span struct {
URI URI `json:"uri"`
Start point `json:"start"`
End point `json:"end"`
}
type point struct {
Line int `json:"line"`
Column int `json:"column"`
Offset int `json:"offset"`
}
// Invalid is a span that reports false from IsValid
var Invalid = Span{v: span{Start: invalidPoint.v, End: invalidPoint.v}}
var invalidPoint = Point{v: point{Line: 0, Column: 0, Offset: -1}}
// Converter is the interface to an object that can convert between line:column
// and offset forms for a single file.
type Converter interface {
//ToPosition converts from an offset to a line:column pair.
ToPosition(offset int) (int, int, error)
//ToOffset converts from a line:column pair to an offset.
ToOffset(line, col int) (int, error)
}
func New(uri URI, start Point, end Point) Span {
s := Span{v: span{URI: uri, Start: start.v, End: end.v}}
s.v.clean()
return s
}
func NewPoint(line, col, offset int) Point {
p := Point{v: point{Line: line, Column: col, Offset: offset}}
p.v.clean()
return p
}
func Compare(a, b Span) int {
if r := CompareURI(a.URI(), b.URI()); r != 0 {
return r
}
if r := comparePoint(a.v.Start, b.v.Start); r != 0 {
return r
}
return comparePoint(a.v.End, b.v.End)
}
func ComparePoint(a, b Point) int {
return comparePoint(a.v, b.v)
}
func comparePoint(a, b point) int {
if !a.hasPosition() {
if a.Offset < b.Offset {
return -1
}
if a.Offset > b.Offset {
return 1
}
return 0
}
if a.Line < b.Line {
return -1
}
if a.Line > b.Line {
return 1
}
if a.Column < b.Column {
return -1
}
if a.Column > b.Column {
return 1
}
return 0
}
func (s Span) HasPosition() bool { return s.v.Start.hasPosition() }
func (s Span) HasOffset() bool { return s.v.Start.hasOffset() }
func (s Span) IsValid() bool { return s.v.Start.isValid() }
func (s Span) IsPoint() bool { return s.v.Start == s.v.End }
func (s Span) URI() URI { return s.v.URI }
func (s Span) Start() Point { return Point{s.v.Start} }
func (s Span) End() Point { return Point{s.v.End} }
func (s *Span) MarshalJSON() ([]byte, error) { return json.Marshal(&s.v) }
func (s *Span) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &s.v) }
func (p Point) HasPosition() bool { return p.v.hasPosition() }
func (p Point) HasOffset() bool { return p.v.hasOffset() }
func (p Point) IsValid() bool { return p.v.isValid() }
func (p *Point) MarshalJSON() ([]byte, error) { return json.Marshal(&p.v) }
func (p *Point) UnmarshalJSON(b []byte) error { return json.Unmarshal(b, &p.v) }
func (p Point) Line() int {
if !p.v.hasPosition() {
panic(fmt.Errorf("position not set in %v", p.v))
}
return p.v.Line
}
func (p Point) Column() int {
if !p.v.hasPosition() {
panic(fmt.Errorf("position not set in %v", p.v))
}
return p.v.Column
}
func (p Point) Offset() int {
if !p.v.hasOffset() {
panic(fmt.Errorf("offset not set in %v", p.v))
}
return p.v.Offset
}
func (p point) hasPosition() bool { return p.Line > 0 }
func (p point) hasOffset() bool { return p.Offset >= 0 }
func (p point) isValid() bool { return p.hasPosition() || p.hasOffset() }
func (p point) isZero() bool {
return (p.Line == 1 && p.Column == 1) || (!p.hasPosition() && p.Offset == 0)
}
func (s *span) clean() {
//this presumes the points are already clean
if !s.End.isValid() || (s.End == point{}) {
s.End = s.Start
}
}
func (p *point) clean() {
if p.Line < 0 {
p.Line = 0
}
if p.Column <= 0 {
if p.Line > 0 {
p.Column = 1
} else {
p.Column = 0
}
}
if p.Offset == 0 && (p.Line > 1 || p.Column > 1) {
p.Offset = -1
}
}
// Format implements fmt.Formatter to print the Location in a standard form.
// The format produced is one that can be read back in using Parse.
func (s Span) Format(f fmt.State, c rune) {
fullForm := f.Flag('+')
preferOffset := f.Flag('#')
// we should always have a uri, simplify if it is file format
//TODO: make sure the end of the uri is unambiguous
uri := string(s.v.URI)
if c == 'f' {
uri = path.Base(uri)
} else if !fullForm {
uri = s.v.URI.Filename()
}
fmt.Fprint(f, uri)
if !s.IsValid() || (!fullForm && s.v.Start.isZero() && s.v.End.isZero()) {
return
}
// see which bits of start to write
printOffset := s.HasOffset() && (fullForm || preferOffset || !s.HasPosition())
printLine := s.HasPosition() && (fullForm || !printOffset)
printColumn := printLine && (fullForm || (s.v.Start.Column > 1 || s.v.End.Column > 1))
fmt.Fprint(f, ":")
if printLine {
fmt.Fprintf(f, "%d", s.v.Start.Line)
}
if printColumn {
fmt.Fprintf(f, ":%d", s.v.Start.Column)
}
if printOffset {
fmt.Fprintf(f, "#%d", s.v.Start.Offset)
}
// start is written, do we need end?
if s.IsPoint() {
return
}
// we don't print the line if it did not change
printLine = fullForm || (printLine && s.v.End.Line > s.v.Start.Line)
fmt.Fprint(f, "-")
if printLine {
fmt.Fprintf(f, "%d", s.v.End.Line)
}
if printColumn {
if printLine {
fmt.Fprint(f, ":")
}
fmt.Fprintf(f, "%d", s.v.End.Column)
}
if printOffset {
fmt.Fprintf(f, "#%d", s.v.End.Offset)
}
}
func (s Span) WithPosition(c Converter) (Span, error) {
if err := s.update(c, true, false); err != nil {
return Span{}, err
}
return s, nil
}
func (s Span) WithOffset(c Converter) (Span, error) {
if err := s.update(c, false, true); err != nil {
return Span{}, err
}
return s, nil
}
func (s Span) WithAll(c Converter) (Span, error) {
if err := s.update(c, true, true); err != nil {
return Span{}, err
}
return s, nil
}
func (s *Span) update(c Converter, withPos, withOffset bool) error {
if !s.IsValid() {
return fmt.Errorf("cannot add information to an invalid span")
}
if withPos && !s.HasPosition() {
if err := s.v.Start.updatePosition(c); err != nil {
return err
}
if s.v.End.Offset == s.v.Start.Offset {
s.v.End = s.v.Start
} else if err := s.v.End.updatePosition(c); err != nil {
return err
}
}
if withOffset && (!s.HasOffset() || (s.v.End.hasPosition() && !s.v.End.hasOffset())) {
if err := s.v.Start.updateOffset(c); err != nil {
return err
}
if s.v.End.Line == s.v.Start.Line && s.v.End.Column == s.v.Start.Column {
s.v.End.Offset = s.v.Start.Offset
} else if err := s.v.End.updateOffset(c); err != nil {
return err
}
}
return nil
}
func (p *point) updatePosition(c Converter) error {
line, col, err := c.ToPosition(p.Offset)
if err != nil {
return err
}
p.Line = line
p.Column = col
return nil
}
func (p *point) updateOffset(c Converter) error {
offset, err := c.ToOffset(p.Line, p.Column)
if err != nil {
return err
}
p.Offset = offset
return nil
}

194
vendor/github.com/hexops/gotextdiff/span/token.go generated vendored Normal file
View File

@@ -0,0 +1,194 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"go/token"
)
// Range represents a source code range in token.Pos form.
// It also carries the FileSet that produced the positions, so that it is
// self contained.
type Range struct {
FileSet *token.FileSet
Start token.Pos
End token.Pos
Converter Converter
}
type FileConverter struct {
file *token.File
}
// TokenConverter is a Converter backed by a token file set and file.
// It uses the file set methods to work out the conversions, which
// makes it fast and does not require the file contents.
type TokenConverter struct {
FileConverter
fset *token.FileSet
}
// NewRange creates a new Range from a FileSet and two positions.
// To represent a point pass a 0 as the end pos.
func NewRange(fset *token.FileSet, start, end token.Pos) Range {
return Range{
FileSet: fset,
Start: start,
End: end,
}
}
// NewTokenConverter returns an implementation of Converter backed by a
// token.File.
func NewTokenConverter(fset *token.FileSet, f *token.File) *TokenConverter {
return &TokenConverter{fset: fset, FileConverter: FileConverter{file: f}}
}
// NewContentConverter returns an implementation of Converter for the
// given file content.
func NewContentConverter(filename string, content []byte) *TokenConverter {
fset := token.NewFileSet()
f := fset.AddFile(filename, -1, len(content))
f.SetLinesForContent(content)
return NewTokenConverter(fset, f)
}
// IsPoint returns true if the range represents a single point.
func (r Range) IsPoint() bool {
return r.Start == r.End
}
// Span converts a Range to a Span that represents the Range.
// It will fill in all the members of the Span, calculating the line and column
// information.
func (r Range) Span() (Span, error) {
if !r.Start.IsValid() {
return Span{}, fmt.Errorf("start pos is not valid")
}
f := r.FileSet.File(r.Start)
if f == nil {
return Span{}, fmt.Errorf("file not found in FileSet")
}
return FileSpan(f, r.Converter, r.Start, r.End)
}
// FileSpan returns a span within tok, using converter to translate between
// offsets and positions.
func FileSpan(tok *token.File, converter Converter, start, end token.Pos) (Span, error) {
var s Span
var err error
var startFilename string
startFilename, s.v.Start.Line, s.v.Start.Column, err = position(tok, start)
if err != nil {
return Span{}, err
}
s.v.URI = URIFromPath(startFilename)
if end.IsValid() {
var endFilename string
endFilename, s.v.End.Line, s.v.End.Column, err = position(tok, end)
if err != nil {
return Span{}, err
}
// In the presence of line directives, a single File can have sections from
// multiple file names.
if endFilename != startFilename {
return Span{}, fmt.Errorf("span begins in file %q but ends in %q", startFilename, endFilename)
}
}
s.v.Start.clean()
s.v.End.clean()
s.v.clean()
if converter != nil {
return s.WithOffset(converter)
}
if startFilename != tok.Name() {
return Span{}, fmt.Errorf("must supply Converter for file %q containing lines from %q", tok.Name(), startFilename)
}
return s.WithOffset(&FileConverter{tok})
}
func position(f *token.File, pos token.Pos) (string, int, int, error) {
off, err := offset(f, pos)
if err != nil {
return "", 0, 0, err
}
return positionFromOffset(f, off)
}
func positionFromOffset(f *token.File, offset int) (string, int, int, error) {
if offset > f.Size() {
return "", 0, 0, fmt.Errorf("offset %v is past the end of the file %v", offset, f.Size())
}
pos := f.Pos(offset)
p := f.Position(pos)
// TODO(golang/go#41029): Consider returning line, column instead of line+1, 1 if
// the file's last character is not a newline.
if offset == f.Size() {
return p.Filename, p.Line + 1, 1, nil
}
return p.Filename, p.Line, p.Column, nil
}
// offset is a copy of the Offset function in go/token, but with the adjustment
// that it does not panic on invalid positions.
func offset(f *token.File, pos token.Pos) (int, error) {
if int(pos) < f.Base() || int(pos) > f.Base()+f.Size() {
return 0, fmt.Errorf("invalid pos")
}
return int(pos) - f.Base(), nil
}
// Range converts a Span to a Range that represents the Span for the supplied
// File.
func (s Span) Range(converter *TokenConverter) (Range, error) {
s, err := s.WithOffset(converter)
if err != nil {
return Range{}, err
}
// go/token will panic if the offset is larger than the file's size,
// so check here to avoid panicking.
if s.Start().Offset() > converter.file.Size() {
return Range{}, fmt.Errorf("start offset %v is past the end of the file %v", s.Start(), converter.file.Size())
}
if s.End().Offset() > converter.file.Size() {
return Range{}, fmt.Errorf("end offset %v is past the end of the file %v", s.End(), converter.file.Size())
}
return Range{
FileSet: converter.fset,
Start: converter.file.Pos(s.Start().Offset()),
End: converter.file.Pos(s.End().Offset()),
Converter: converter,
}, nil
}
func (l *FileConverter) ToPosition(offset int) (int, int, error) {
_, line, col, err := positionFromOffset(l.file, offset)
return line, col, err
}
func (l *FileConverter) ToOffset(line, col int) (int, error) {
if line < 0 {
return -1, fmt.Errorf("line is not valid")
}
lineMax := l.file.LineCount() + 1
if line > lineMax {
return -1, fmt.Errorf("line is beyond end of file %v", lineMax)
} else if line == lineMax {
if col > 1 {
return -1, fmt.Errorf("column is beyond end of file")
}
// at the end of the file, allowing for a trailing eol
return l.file.Size(), nil
}
pos := lineStart(l.file, line)
if !pos.IsValid() {
return -1, fmt.Errorf("line is not in file")
}
// we assume that column is in bytes here, and that the first byte of a
// line is at column 1
pos += token.Pos(col - 1)
return offset(l.file, pos)
}

39
vendor/github.com/hexops/gotextdiff/span/token111.go generated vendored Normal file
View File

@@ -0,0 +1,39 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !go1.12
package span
import (
"go/token"
)
// lineStart is the pre-Go 1.12 version of (*token.File).LineStart. For Go
// versions <= 1.11, we borrow logic from the analysisutil package.
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
func lineStart(f *token.File, line int) token.Pos {
// Use binary search to find the start offset of this line.
min := 0 // inclusive
max := f.Size() // exclusive
for {
offset := (min + max) / 2
pos := f.Pos(offset)
posn := f.Position(pos)
if posn.Line == line {
return pos - (token.Pos(posn.Column) - 1)
}
if min+1 >= max {
return token.NoPos
}
if posn.Line < line {
min = offset
} else {
max = offset
}
}
}

16
vendor/github.com/hexops/gotextdiff/span/token112.go generated vendored Normal file
View File

@@ -0,0 +1,16 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.12
package span
import (
"go/token"
)
// TODO(rstambler): Delete this file when we no longer support Go 1.11.
func lineStart(f *token.File, line int) token.Pos {
return f.LineStart(line)
}

169
vendor/github.com/hexops/gotextdiff/span/uri.go generated vendored Normal file
View File

@@ -0,0 +1,169 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"net/url"
"os"
"path"
"path/filepath"
"runtime"
"strings"
"unicode"
)
const fileScheme = "file"
// URI represents the full URI for a file.
type URI string
func (uri URI) IsFile() bool {
return strings.HasPrefix(string(uri), "file://")
}
// Filename returns the file path for the given URI.
// It is an error to call this on a URI that is not a valid filename.
func (uri URI) Filename() string {
filename, err := filename(uri)
if err != nil {
panic(err)
}
return filepath.FromSlash(filename)
}
func filename(uri URI) (string, error) {
if uri == "" {
return "", nil
}
u, err := url.ParseRequestURI(string(uri))
if err != nil {
return "", err
}
if u.Scheme != fileScheme {
return "", fmt.Errorf("only file URIs are supported, got %q from %q", u.Scheme, uri)
}
// If the URI is a Windows URI, we trim the leading "/" and lowercase
// the drive letter, which will never be case sensitive.
if isWindowsDriveURIPath(u.Path) {
u.Path = strings.ToUpper(string(u.Path[1])) + u.Path[2:]
}
return u.Path, nil
}
func URIFromURI(s string) URI {
if !strings.HasPrefix(s, "file://") {
return URI(s)
}
if !strings.HasPrefix(s, "file:///") {
// VS Code sends URLs with only two slashes, which are invalid. golang/go#39789.
s = "file:///" + s[len("file://"):]
}
// Even though the input is a URI, it may not be in canonical form. VS Code
// in particular over-escapes :, @, etc. Unescape and re-encode to canonicalize.
path, err := url.PathUnescape(s[len("file://"):])
if err != nil {
panic(err)
}
// File URIs from Windows may have lowercase drive letters.
// Since drive letters are guaranteed to be case insensitive,
// we change them to uppercase to remain consistent.
// For example, file:///c:/x/y/z becomes file:///C:/x/y/z.
if isWindowsDriveURIPath(path) {
path = path[:1] + strings.ToUpper(string(path[1])) + path[2:]
}
u := url.URL{Scheme: fileScheme, Path: path}
return URI(u.String())
}
func CompareURI(a, b URI) int {
if equalURI(a, b) {
return 0
}
if a < b {
return -1
}
return 1
}
func equalURI(a, b URI) bool {
if a == b {
return true
}
// If we have the same URI basename, we may still have the same file URIs.
if !strings.EqualFold(path.Base(string(a)), path.Base(string(b))) {
return false
}
fa, err := filename(a)
if err != nil {
return false
}
fb, err := filename(b)
if err != nil {
return false
}
// Stat the files to check if they are equal.
infoa, err := os.Stat(filepath.FromSlash(fa))
if err != nil {
return false
}
infob, err := os.Stat(filepath.FromSlash(fb))
if err != nil {
return false
}
return os.SameFile(infoa, infob)
}
// URIFromPath returns a span URI for the supplied file path.
// It will always have the file scheme.
func URIFromPath(path string) URI {
if path == "" {
return ""
}
// Handle standard library paths that contain the literal "$GOROOT".
// TODO(rstambler): The go/packages API should allow one to determine a user's $GOROOT.
const prefix = "$GOROOT"
if len(path) >= len(prefix) && strings.EqualFold(prefix, path[:len(prefix)]) {
suffix := path[len(prefix):]
path = runtime.GOROOT() + suffix
}
if !isWindowsDrivePath(path) {
if abs, err := filepath.Abs(path); err == nil {
path = abs
}
}
// Check the file path again, in case it became absolute.
if isWindowsDrivePath(path) {
path = "/" + strings.ToUpper(string(path[0])) + path[1:]
}
path = filepath.ToSlash(path)
u := url.URL{
Scheme: fileScheme,
Path: path,
}
return URI(u.String())
}
// isWindowsDrivePath returns true if the file path is of the form used by
// Windows. We check if the path begins with a drive letter, followed by a ":".
// For example: C:/x/y/z.
func isWindowsDrivePath(path string) bool {
if len(path) < 3 {
return false
}
return unicode.IsLetter(rune(path[0])) && path[1] == ':'
}
// isWindowsDriveURI returns true if the file URI is of the format used by
// Windows URIs. The url.Parse package does not specially handle Windows paths
// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
func isWindowsDriveURIPath(uri string) bool {
if len(uri) < 4 {
return false
}
return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
}

91
vendor/github.com/hexops/gotextdiff/span/utf16.go generated vendored Normal file
View File

@@ -0,0 +1,91 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package span
import (
"fmt"
"unicode/utf16"
"unicode/utf8"
)
// ToUTF16Column calculates the utf16 column expressed by the point given the
// supplied file contents.
// This is used to convert from the native (always in bytes) column
// representation and the utf16 counts used by some editors.
func ToUTF16Column(p Point, content []byte) (int, error) {
if !p.HasPosition() {
return -1, fmt.Errorf("ToUTF16Column: point is missing position")
}
if !p.HasOffset() {
return -1, fmt.Errorf("ToUTF16Column: point is missing offset")
}
offset := p.Offset() // 0-based
colZero := p.Column() - 1 // 0-based
if colZero == 0 {
// 0-based column 0, so it must be chr 1
return 1, nil
} else if colZero < 0 {
return -1, fmt.Errorf("ToUTF16Column: column is invalid (%v)", colZero)
}
// work out the offset at the start of the line using the column
lineOffset := offset - colZero
if lineOffset < 0 || offset > len(content) {
return -1, fmt.Errorf("ToUTF16Column: offsets %v-%v outside file contents (%v)", lineOffset, offset, len(content))
}
// Use the offset to pick out the line start.
// This cannot panic: offset > len(content) and lineOffset < offset.
start := content[lineOffset:]
// Now, truncate down to the supplied column.
start = start[:colZero]
// and count the number of utf16 characters
// in theory we could do this by hand more efficiently...
return len(utf16.Encode([]rune(string(start)))) + 1, nil
}
// FromUTF16Column advances the point by the utf16 character offset given the
// supplied line contents.
// This is used to convert from the utf16 counts used by some editors to the
// native (always in bytes) column representation.
func FromUTF16Column(p Point, chr int, content []byte) (Point, error) {
if !p.HasOffset() {
return Point{}, fmt.Errorf("FromUTF16Column: point is missing offset")
}
// if chr is 1 then no adjustment needed
if chr <= 1 {
return p, nil
}
if p.Offset() >= len(content) {
return p, fmt.Errorf("FromUTF16Column: offset (%v) greater than length of content (%v)", p.Offset(), len(content))
}
remains := content[p.Offset():]
// scan forward the specified number of characters
for count := 1; count < chr; count++ {
if len(remains) <= 0 {
return Point{}, fmt.Errorf("FromUTF16Column: chr goes beyond the content")
}
r, w := utf8.DecodeRune(remains)
if r == '\n' {
// Per the LSP spec:
//
// > If the character value is greater than the line length it
// > defaults back to the line length.
break
}
remains = remains[w:]
if r >= 0x10000 {
// a two point rune
count++
// if we finished in a two point rune, do not advance past the first
if count >= chr {
break
}
}
p.v.Column += w
p.v.Offset += w
}
return p, nil
}

210
vendor/github.com/hexops/gotextdiff/unified.go generated vendored Normal file
View File

@@ -0,0 +1,210 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gotextdiff
import (
"fmt"
"strings"
)
// Unified represents a set of edits as a unified diff.
type Unified struct {
// From is the name of the original file.
From string
// To is the name of the modified file.
To string
// Hunks is the set of edit hunks needed to transform the file content.
Hunks []*Hunk
}
// Hunk represents a contiguous set of line edits to apply.
type Hunk struct {
// The line in the original source where the hunk starts.
FromLine int
// The line in the original source where the hunk finishes.
ToLine int
// The set of line based edits to apply.
Lines []Line
}
// Line represents a single line operation to apply as part of a Hunk.
type Line struct {
// Kind is the type of line this represents, deletion, insertion or copy.
Kind OpKind
// Content is the content of this line.
// For deletion it is the line being removed, for all others it is the line
// to put in the output.
Content string
}
// OpKind is used to denote the type of operation a line represents.
type OpKind int
const (
// Delete is the operation kind for a line that is present in the input
// but not in the output.
Delete OpKind = iota
// Insert is the operation kind for a line that is new in the output.
Insert
// Equal is the operation kind for a line that is the same in the input and
// output, often used to provide context around edited lines.
Equal
)
// String returns a human readable representation of an OpKind. It is not
// intended for machine processing.
func (k OpKind) String() string {
switch k {
case Delete:
return "delete"
case Insert:
return "insert"
case Equal:
return "equal"
default:
panic("unknown operation kind")
}
}
const (
edge = 3
gap = edge * 2
)
// ToUnified takes a file contents and a sequence of edits, and calculates
// a unified diff that represents those edits.
func ToUnified(from, to string, content string, edits []TextEdit) Unified {
u := Unified{
From: from,
To: to,
}
if len(edits) == 0 {
return u
}
c, edits, partial := prepareEdits(content, edits)
if partial {
edits = lineEdits(content, c, edits)
}
lines := splitLines(content)
var h *Hunk
last := 0
toLine := 0
for _, edit := range edits {
start := edit.Span.Start().Line() - 1
end := edit.Span.End().Line() - 1
switch {
case h != nil && start == last:
//direct extension
case h != nil && start <= last+gap:
//within range of previous lines, add the joiners
addEqualLines(h, lines, last, start)
default:
//need to start a new hunk
if h != nil {
// add the edge to the previous hunk
addEqualLines(h, lines, last, last+edge)
u.Hunks = append(u.Hunks, h)
}
toLine += start - last
h = &Hunk{
FromLine: start + 1,
ToLine: toLine + 1,
}
// add the edge to the new hunk
delta := addEqualLines(h, lines, start-edge, start)
h.FromLine -= delta
h.ToLine -= delta
}
last = start
for i := start; i < end; i++ {
h.Lines = append(h.Lines, Line{Kind: Delete, Content: lines[i]})
last++
}
if edit.NewText != "" {
for _, line := range splitLines(edit.NewText) {
h.Lines = append(h.Lines, Line{Kind: Insert, Content: line})
toLine++
}
}
}
if h != nil {
// add the edge to the final hunk
addEqualLines(h, lines, last, last+edge)
u.Hunks = append(u.Hunks, h)
}
return u
}
func splitLines(text string) []string {
lines := strings.SplitAfter(text, "\n")
if lines[len(lines)-1] == "" {
lines = lines[:len(lines)-1]
}
return lines
}
func addEqualLines(h *Hunk, lines []string, start, end int) int {
delta := 0
for i := start; i < end; i++ {
if i < 0 {
continue
}
if i >= len(lines) {
return delta
}
h.Lines = append(h.Lines, Line{Kind: Equal, Content: lines[i]})
delta++
}
return delta
}
// Format converts a unified diff to the standard textual form for that diff.
// The output of this function can be passed to tools like patch.
func (u Unified) Format(f fmt.State, r rune) {
if len(u.Hunks) == 0 {
return
}
fmt.Fprintf(f, "--- %s\n", u.From)
fmt.Fprintf(f, "+++ %s\n", u.To)
for _, hunk := range u.Hunks {
fromCount, toCount := 0, 0
for _, l := range hunk.Lines {
switch l.Kind {
case Delete:
fromCount++
case Insert:
toCount++
default:
fromCount++
toCount++
}
}
fmt.Fprint(f, "@@")
if fromCount > 1 {
fmt.Fprintf(f, " -%d,%d", hunk.FromLine, fromCount)
} else {
fmt.Fprintf(f, " -%d", hunk.FromLine)
}
if toCount > 1 {
fmt.Fprintf(f, " +%d,%d", hunk.ToLine, toCount)
} else {
fmt.Fprintf(f, " +%d", hunk.ToLine)
}
fmt.Fprint(f, " @@\n")
for _, l := range hunk.Lines {
switch l.Kind {
case Delete:
fmt.Fprintf(f, "-%s", l.Content)
case Insert:
fmt.Fprintf(f, "+%s", l.Content)
default:
fmt.Fprintf(f, " %s", l.Content)
}
if !strings.HasSuffix(l.Content, "\n") {
fmt.Fprintf(f, "\n\\ No newline at end of file\n")
}
}
}
}

132
vendor/golang.org/x/sync/errgroup/errgroup.go generated vendored Normal file
View File

@@ -0,0 +1,132 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package errgroup provides synchronization, error propagation, and Context
// cancelation for groups of goroutines working on subtasks of a common task.
package errgroup
import (
"context"
"fmt"
"sync"
)
type token struct{}
// A Group is a collection of goroutines working on subtasks that are part of
// the same overall task.
//
// A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error.
type Group struct {
cancel func()
wg sync.WaitGroup
sem chan token
errOnce sync.Once
err error
}
func (g *Group) done() {
if g.sem != nil {
<-g.sem
}
g.wg.Done()
}
// WithContext returns a new Group and an associated Context derived from ctx.
//
// The derived Context is canceled the first time a function passed to Go
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := context.WithCancel(ctx)
return &Group{cancel: cancel}, ctx
}
// Wait blocks until all function calls from the Go method have returned, then
// returns the first non-nil error (if any) from them.
func (g *Group) Wait() error {
g.wg.Wait()
if g.cancel != nil {
g.cancel()
}
return g.err
}
// Go calls the given function in a new goroutine.
// It blocks until the new goroutine can be added without the number of
// active goroutines in the group exceeding the configured limit.
//
// The first call to return a non-nil error cancels the group; its error will be
// returned by Wait.
func (g *Group) Go(f func() error) {
if g.sem != nil {
g.sem <- token{}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
}()
}
// TryGo calls the given function in a new goroutine only if the number of
// active goroutines in the group is currently below the configured limit.
//
// The return value reports whether the goroutine was started.
func (g *Group) TryGo(f func() error) bool {
if g.sem != nil {
select {
case g.sem <- token{}:
// Note: this allows barging iff channels in general allow barging.
default:
return false
}
}
g.wg.Add(1)
go func() {
defer g.done()
if err := f(); err != nil {
g.errOnce.Do(func() {
g.err = err
if g.cancel != nil {
g.cancel()
}
})
}
}()
return true
}
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.
//
// The limit must not be modified while any goroutines in the group are active.
func (g *Group) SetLimit(n int) {
if n < 0 {
g.sem = nil
return
}
if len(g.sem) != 0 {
panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", len(g.sem)))
}
g.sem = make(chan token, n)
}

19
vendor/modules.txt vendored
View File

@@ -357,6 +357,19 @@ github.com/coreos/stream-metadata-go/stream/rhcos
github.com/coreos/vcontext/path
github.com/coreos/vcontext/report
github.com/coreos/vcontext/tree
# github.com/daixiang0/gci v0.9.0
## explicit; go 1.18
github.com/daixiang0/gci
github.com/daixiang0/gci/cmd/gci
github.com/daixiang0/gci/pkg/config
github.com/daixiang0/gci/pkg/format
github.com/daixiang0/gci/pkg/gci
github.com/daixiang0/gci/pkg/io
github.com/daixiang0/gci/pkg/log
github.com/daixiang0/gci/pkg/parse
github.com/daixiang0/gci/pkg/section
github.com/daixiang0/gci/pkg/specificity
github.com/daixiang0/gci/pkg/utils
# github.com/davecgh/go-spew v1.1.1
## explicit
github.com/davecgh/go-spew/spew
@@ -613,6 +626,11 @@ github.com/hashicorp/terraform-exec/tfexec
# github.com/hashicorp/terraform-json v0.14.0
## explicit; go 1.13
github.com/hashicorp/terraform-json
# github.com/hexops/gotextdiff v1.0.3
## explicit; go 1.16
github.com/hexops/gotextdiff
github.com/hexops/gotextdiff/myers
github.com/hexops/gotextdiff/span
# github.com/imdario/mergo v0.3.12
## explicit; go 1.13
github.com/imdario/mergo
@@ -9000,6 +9018,7 @@ golang.org/x/oauth2/jws
golang.org/x/oauth2/jwt
# golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4
## explicit
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
# golang.org/x/sys v0.0.0-20220928140112-f11e5e49a4ec
## explicit; go 1.17