unit tests

Signed-off-by: Casey Lee <cplee@nektos.com>
This commit is contained in:
Casey Lee 2020-02-10 15:27:05 -08:00
parent be75ee20b1
commit 0582306861
No known key found for this signature in database
GPG key ID: 1899120ECD0A1784
466 changed files with 94683 additions and 62526 deletions

View file

@ -1,14 +0,0 @@
FROM golangci/golangci-lint:v1.12.5
RUN apt-get install git
LABEL "com.github.actions.name"="Check"
LABEL "com.github.actions.description"="Run static analysis and unit tests"
LABEL "com.github.actions.icon"="check-circle"
LABEL "com.github.actions.color"="green"
COPY "entrypoint.sh" "/entrypoint.sh"
RUN chmod +x /entrypoint.sh
ENV GOFLAGS -mod=vendor
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,12 +0,0 @@
FROM golangci/golangci-lint:v1.12.5
LABEL "com.github.actions.name"="Check"
LABEL "com.github.actions.description"="Run integration tests"
LABEL "com.github.actions.icon"="check-circle"
LABEL "com.github.actions.color"="green"
COPY "entrypoint.sh" "/entrypoint.sh"
RUN chmod +x /entrypoint.sh
ENV GOFLAGS -mod=vendor
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -1,3 +0,0 @@
#!/bin/sh
set -e
go test -cover ./actions

48
.github/main.workflow vendored
View file

@ -1,48 +0,0 @@
workflow "check-and-release" {
on = "push"
resolves = ["release"]
}
action "check" {
uses = "./.github/actions/check"
}
action "release-filter" {
needs = ["check"]
uses = "actions/bin/filter@master"
args = "tag 'v*'"
}
# only release on `v*` tags
action "release" {
needs = ["release-filter"]
uses = "docker://goreleaser/goreleaser:v0.98"
args = "release"
secrets = ["GITHUB_TOKEN"]
}
# local action for `make build`
action "build" {
uses = "docker://goreleaser/goreleaser:v0.98"
args = "--snapshot --rm-dist"
secrets = ["SNAPSHOT_VERSION"]
}
# local action for `make vendor`
action "vendor" {
uses = "docker://golang:1.11.4"
args = "go mod vendor"
}
action "integration-tests" {
needs = ["check"]
uses = "./.github/actions/integration"
}
# local release
action "local-release" {
needs = ["integration-tests"]
uses = "docker://goreleaser/goreleaser:v0.98"
args = "release"
secrets = ["GITHUB_TOKEN"]
}

View file

@ -1,31 +0,0 @@
name: basic
on: push
jobs:
action:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
build:
runs-on: ubuntu-latest
container:
image: node:10.16-jessie
env:
NODE_ENV: development
steps:
- run: env
test:
runs-on: ubuntu-latest
needs: [build]
steps:
- run: cp $GITHUB_EVENT_PATH $HOME/foo.json
- run: ls $HOME
- run: cat $HOME/foo.json
- uses: docker://alpine:3.8
with:
args: uname -a
local-action:
runs-on: ubuntu-latest
steps:
- uses: ./.github/workflows/docker-url

9
.github/workflows/check/Dockerfile vendored Normal file
View file

@ -0,0 +1,9 @@
FROM golangci/golangci-lint:v1.23.6
RUN apt-get install git
COPY "entrypoint.sh" "/entrypoint.sh"
RUN chmod +x /entrypoint.sh
ENV GOFLAGS -mod=vendor
ENTRYPOINT ["/entrypoint.sh"]

8
.github/workflows/check/action.yml vendored Normal file
View file

@ -0,0 +1,8 @@
name: Check
description: Run static analysis and unit tests
branding:
icon: check-circle
color: green
runs:
using: 'docker'
image: 'Dockerfile'

View file

@ -0,0 +1,7 @@
FROM golangci/golangci-lint:v1.23.6
COPY "entrypoint.sh" "/entrypoint.sh"
RUN chmod +x /entrypoint.sh
ENV GOFLAGS -mod=vendor
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -0,0 +1,8 @@
name: Check
description: Run integration tests
branding:
icon: check-circle
color: green
runs:
using: 'docker'
image: 'Dockerfile'

View file

@ -0,0 +1,3 @@
#!/bin/sh
set -e
go test -cover ./runner

9
.github/workflows/push.yml vendored Normal file
View file

@ -0,0 +1,9 @@
name: push
on: push
jobs:
ci:
runs-on: ubuntu-latest
steps:
- uses: ./.github/workflows/check
- uses: ./.github/workflows/integration

20
.github/workflows/tag.yml vendored Normal file
View file

@ -0,0 +1,20 @@
name: tag
on:
push:
tags:
- 'v*'
jobs:
release:
runs-on: ubuntu-latest
steps:
- uses: ./.github/workflows/check
- uses: ./.github/workflows/integration
- uses: goreleaser/goreleaser-action@v1
- name: Run GoReleaser
uses: goreleaser/goreleaser-action@v1
with:
version: latest
args: release --rm-dist
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

View file

@ -21,7 +21,7 @@ test:
go test -cover -short ./...
check:
$(ACT) -ra check
$(ACT) -rj ci
build: check
$(eval export SNAPSHOT_VERSION=$(VERSION))

View file

@ -1,48 +0,0 @@
package actions
import (
"fmt"
"log"
"os"
"github.com/howeyc/gopass"
)
var secretCache map[string]string
type actionEnvironmentApplier struct {
*Action
}
type Action struct{}
func newActionEnvironmentApplier(action *Action) environmentApplier {
return &actionEnvironmentApplier{action}
}
func (action *actionEnvironmentApplier) applyEnvironment(env map[string]string) {
for envKey, envValue := range action.Env {
env[envKey] = envValue
}
for _, secret := range action.Secrets {
if secretVal, ok := os.LookupEnv(secret); ok {
env[secret] = secretVal
} else {
if secretCache == nil {
secretCache = make(map[string]string)
}
if _, ok := secretCache[secret]; !ok {
fmt.Printf("Provide value for '%s': ", secret)
val, err := gopass.GetPasswdMasked()
if err != nil {
log.Fatal("abort")
}
secretCache[secret] = string(val)
}
env[secret] = secretCache[secret]
}
}
}

View file

@ -1,255 +0,0 @@
package runner
import (
"archive/tar"
"bytes"
"fmt"
"io"
"path/filepath"
"regexp"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
log "github.com/sirupsen/logrus"
)
func (runner *runnerImpl) newRunExecutor(run *model.Run) common.Executor {
action := runner.workflowConfig.GetAction(actionName)
if action == nil {
return common.NewErrorExecutor(fmt.Errorf("Unable to find action named '%s'", actionName))
}
executors := make([]common.Executor, 0)
image, err := runner.addImageExecutor(action, &executors)
if err != nil {
return common.NewErrorExecutor(err)
}
err = runner.addRunExecutor(action, image, &executors)
if err != nil {
return common.NewErrorExecutor(err)
}
return common.NewPipelineExecutor(executors...)
}
func (runner *runnerImpl) addImageExecutor(action *Action, executors *[]common.Executor) (string, error) {
var image string
logger := newActionLogger(action.Identifier, runner.config.Dryrun)
log.Debugf("Using '%s' for action '%s'", action.Uses, action.Identifier)
in := container.DockerExecutorInput{
Ctx: runner.config.Ctx,
Logger: logger,
Dryrun: runner.config.Dryrun,
}
switch uses := action.Uses.(type) {
case *model.UsesDockerImage:
image = uses.Image
pull := runner.config.ForcePull
if !pull {
imageExists, err := container.ImageExistsLocally(runner.config.Ctx, image)
log.Debugf("Image exists? %v", imageExists)
if err != nil {
return "", fmt.Errorf("unable to determine if image already exists for image %q", image)
}
if !imageExists {
pull = true
}
}
if pull {
*executors = append(*executors, container.NewDockerPullExecutor(container.NewDockerPullExecutorInput{
DockerExecutorInput: in,
Image: image,
}))
}
case *model.UsesPath:
contextDir := filepath.Join(runner.config.WorkingDir, uses.String())
sha, _, err := common.FindGitRevision(contextDir)
if err != nil {
log.Warnf("Unable to determine git revision: %v", err)
sha = "latest"
}
image = fmt.Sprintf("%s:%s", filepath.Base(contextDir), sha)
*executors = append(*executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
DockerExecutorInput: in,
ContextDir: contextDir,
ImageTag: image,
}))
case *model.UsesRepository:
image = fmt.Sprintf("%s:%s", filepath.Base(uses.Repository), uses.Ref)
cloneURL := fmt.Sprintf("https://github.com/%s", uses.Repository)
cloneDir := filepath.Join(os.TempDir(), "act", action.Uses.String())
*executors = append(*executors, common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{
URL: cloneURL,
Ref: uses.Ref,
Dir: cloneDir,
Logger: logger,
Dryrun: runner.config.Dryrun,
}))
contextDir := filepath.Join(cloneDir, uses.Path)
*executors = append(*executors, container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
DockerExecutorInput: in,
ContextDir: contextDir,
ImageTag: image,
}))
default:
return "", fmt.Errorf("unable to determine executor type for image '%s'", action.Uses)
}
return image, nil
}
func (runner *runnerImpl) addRunExecutor(action *Action, image string, executors *[]common.Executor) error {
logger := newActionLogger(action.Identifier, runner.config.Dryrun)
log.Debugf("Using '%s' for action '%s'", action.Uses, action.Identifier)
in := container.DockerExecutorInput{
Ctx: runner.config.Ctx,
Logger: logger,
Dryrun: runner.config.Dryrun,
}
env := make(map[string]string)
for _, applier := range []environmentApplier{newActionEnvironmentApplier(action), runner} {
applier.applyEnvironment(env)
}
env["GITHUB_ACTION"] = action.Identifier
ghReader, err := runner.createGithubTarball()
if err != nil {
return err
}
envList := make([]string, 0)
for k, v := range env {
envList = append(envList, fmt.Sprintf("%s=%s", k, v))
}
var cmd, entrypoint []string
if action.Args != nil {
cmd = []string{
"/bin/sh",
"-c",
action.Args,
}
}
if action.Runs != nil {
entrypoint = action.Runs.Split()
}
*executors = append(*executors, container.NewDockerRunExecutor(container.NewDockerRunExecutorInput{
DockerExecutorInput: in,
Cmd: cmd,
Entrypoint: entrypoint,
Image: image,
WorkingDir: "/github/workspace",
Env: envList,
Name: runner.createContainerName(action.Identifier),
Binds: []string{
fmt.Sprintf("%s:%s", runner.config.WorkingDir, "/github/workspace"),
fmt.Sprintf("%s:%s", runner.tempDir, "/github/home"),
fmt.Sprintf("%s:%s", "/var/run/docker.sock", "/var/run/docker.sock"),
},
Content: map[string]io.Reader{"/github": ghReader},
ReuseContainers: runner.config.ReuseContainers,
}))
return nil
}
func (runner *runnerImpl) applyEnvironment(env map[string]string) {
repoPath := runner.config.WorkingDir
workflows := runner.workflowConfig.GetWorkflows(runner.config.EventName)
if len(workflows) == 0 {
return
}
workflowName := workflows[0].Identifier
env["HOME"] = "/github/home"
env["GITHUB_ACTOR"] = "nektos/act"
env["GITHUB_EVENT_PATH"] = "/github/workflow/event.json"
env["GITHUB_WORKSPACE"] = "/github/workspace"
env["GITHUB_WORKFLOW"] = workflowName
env["GITHUB_EVENT_NAME"] = runner.config.EventName
_, rev, err := common.FindGitRevision(repoPath)
if err != nil {
log.Warningf("unable to get git revision: %v", err)
} else {
env["GITHUB_SHA"] = rev
}
repo, err := common.FindGithubRepo(repoPath)
if err != nil {
log.Warningf("unable to get git repo: %v", err)
} else {
env["GITHUB_REPOSITORY"] = repo
}
ref, err := common.FindGitRef(repoPath)
if err != nil {
log.Warningf("unable to get git ref: %v", err)
} else {
log.Infof("using github ref: %s", ref)
env["GITHUB_REF"] = ref
}
}
func (runner *runnerImpl) createGithubTarball() (io.Reader, error) {
var buf bytes.Buffer
tw := tar.NewWriter(&buf)
var files = []struct {
Name string
Mode int64
Body string
}{
{"workflow/event.json", 0644, runner.eventJSON},
}
for _, file := range files {
log.Debugf("Writing entry to tarball %s len:%d", file.Name, len(runner.eventJSON))
hdr := &tar.Header{
Name: file.Name,
Mode: file.Mode,
Size: int64(len(runner.eventJSON)),
}
if err := tw.WriteHeader(hdr); err != nil {
return nil, err
}
if _, err := tw.Write([]byte(runner.eventJSON)); err != nil {
return nil, err
}
}
if err := tw.Close(); err != nil {
return nil, err
}
return &buf, nil
}
func (runner *runnerImpl) createContainerName(actionName string) string {
containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(actionName, "-")
prefix := fmt.Sprintf("%s-", trimToLen(filepath.Base(runner.config.WorkingDir), 10))
suffix := ""
containerName = trimToLen(containerName, 30-(len(prefix)+len(suffix)))
return fmt.Sprintf("%s%s%s", prefix, containerName, suffix)
}
func trimToLen(s string, l int) string {
if len(s) > l {
return s[:l]
}
return s
}

View file

@ -1 +0,0 @@
FROM alpine:3.8

View file

@ -1,21 +0,0 @@
workflow "build-and-deploy" {
on = "push"
resolves = ["deploy"]
}
action "build" {
uses = "./action1"
args = "echo 'build'"
}
action "test" {
uses = "docker://ubuntu:18.04"
args = "env"
needs = ["build"]
}
action "deploy" {
uses = "./action2"
runs = ["/bin/sh", "-c", "cat $GITHUB_EVENT_PATH"]
needs = ["test"]
}

View file

@ -1,2 +0,0 @@
FROM alpine:3.8
COPY foobar /foo/bar

View file

@ -1,8 +0,0 @@
workflow "test" {
on = "push"
resolves = ["test-action"]
}
action "test-action" {
uses = "./buildfail-action"
}

View file

@ -1,9 +0,0 @@
workflow "detect-event" {
on = "pull_request"
resolves = ["build"]
}
action "build" {
uses = "./action1"
args = "echo 'build'"
}

View file

@ -1,17 +0,0 @@
workflow "test" {
on = "push"
resolves = [
"test-action-repo",
"test-action-ref",
]
}
action "test-action-repo" {
uses = "docker://alpine:3.9"
runs = ["sh", "-c", "echo $GITHUB_REPOSITORY | grep '^nektos/act$'"]
}
action "test-action-ref" {
uses = "docker://alpine:3.9"
runs = ["sh", "-c", "echo $GITHUB_REF | grep '^refs/'"]
}

View file

@ -1,13 +0,0 @@
workflow "test" {
on = "push"
resolves = ["test-action"]
}
action "test-action" {
uses = "docker://alpine:3.9"
runs = ["sh", "-c", "echo $IN | grep $OUT"]
env = {
IN = "foo"
OUT = "bar"
}
}

View file

@ -1,14 +0,0 @@
workflow "New workflow" {
on = "push"
resolves = ["branch-ref","commit-ref"]
}
action "branch-ref" {
uses = "actions/docker/cli@master"
args = "version"
}
action "commit-ref" {
uses = "actions/docker/cli@c08a5fc9e0286844156fefff2c141072048141f6"
args = "version"
}

View file

@ -1,19 +0,0 @@
workflow "buildwf" {
on = "push"
resolves = ["build"]
}
action "build" {
uses = "./action1"
args = "echo 'build'"
}
workflow "deploywf" {
on = "release"
resolves = ["deploy"]
}
action "deploy" {
uses = "./action2"
runs = ["/bin/sh", "-c", "cat $GITHUB_EVENT_PATH"]
}

View file

@ -1,13 +0,0 @@
workflow "test" {
on = "push"
resolves = ["test-action"]
}
action "test-action" {
uses = "docker://alpine:3.9"
runs = ["sh", "-c", "echo $IN | grep $OUT"]
env = {
IN = "foo"
OUT = "foo"
}
}

View file

@ -1,9 +0,0 @@
workflow "New workflow" {
on = "push"
resolves = ["filter-version-before-deploy"]
}
action "filter-version-before-deploy" {
uses = "actions/bin/filter@master"
args = "tag z?[0-9]+\\.[0-9]+\\.[0-9]+"
}

1
go.sum
View file

@ -21,7 +21,6 @@ github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BU
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb h1:PyjxRdW1mqCmSoxy/6uP01P7CGbsD+woX+oOWbaUPwQ=
github.com/docker/engine v0.0.0-20181106193140-f5749085e9cb/go.mod h1:3CPr2caMgTHxxIAZgEMd3uLYPDlRvPqCpyeRf6ncPcY=
github.com/docker/engine v1.13.1 h1:Cks33UT9YBW5Xyc3MtGDq2IPgqfJtJ+qkFaxc2b0Euc=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk=

View file

@ -4,6 +4,7 @@ import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
@ -185,17 +186,16 @@ type NewGitCloneExecutorInput struct {
URL string
Ref string
Dir string
Logger *log.Entry
Dryrun bool
}
// NewGitCloneExecutor creates an executor to clone git repos
func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
return func(ctx context.Context) error {
input.Logger.Infof("git clone '%s' # ref=%s", input.URL, input.Ref)
input.Logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
logger := Logger(ctx)
logger.Infof("git clone '%s' # ref=%s", input.URL, input.Ref)
logger.Debugf(" cloning %s to %s", input.URL, input.Dir)
if input.Dryrun {
if Dryrun(ctx) {
return nil
}
@ -206,15 +206,26 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
r, err := git.PlainOpen(input.Dir)
if err != nil {
var progressWriter io.Writer
if entry, ok := logger.(*log.Entry); ok {
progressWriter = entry.WriterLevel(log.DebugLevel)
} else if lgr, ok := logger.(*log.Logger); ok {
progressWriter = lgr.WriterLevel(log.DebugLevel)
} else {
log.Errorf("Unable to get writer from logger (type=%T)", logger)
progressWriter = os.Stdout
}
r, err = git.PlainClone(input.Dir, false, &git.CloneOptions{
URL: input.URL,
Progress: input.Logger.WriterLevel(log.DebugLevel),
Progress: progressWriter,
//ReferenceName: refName,
})
if err != nil {
input.Logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err)
logger.Errorf("Unable to clone %v %s: %v", input.URL, refName, err)
return err
}
os.Chmod(input.Dir, 0755)
}
w, err := r.Worktree()
@ -227,13 +238,13 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
Force: true,
})
if err != nil && err.Error() != "already up-to-date" {
input.Logger.Errorf("Unable to pull %s: %v", refName, err)
logger.Errorf("Unable to pull %s: %v", refName, err)
}
input.Logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
logger.Debugf("Cloned %s to %s", input.URL, input.Dir)
hash, err := r.ResolveRevision(plumbing.Revision(input.Ref))
if err != nil {
input.Logger.Errorf("Unable to resolve %s: %v", input.Ref, err)
logger.Errorf("Unable to resolve %s: %v", input.Ref, err)
return err
}
@ -243,11 +254,11 @@ func NewGitCloneExecutor(input NewGitCloneExecutorInput) Executor {
Force: true,
})
if err != nil {
input.Logger.Errorf("Unable to checkout %s: %v", refName, err)
logger.Errorf("Unable to checkout %s: %v", refName, err)
return err
}
input.Logger.Debugf("Checked out %s", input.Ref)
logger.Debugf("Checked out %s", input.Ref)
return nil
}
}

View file

@ -41,6 +41,7 @@ func NewDockerBuildExecutor(input NewDockerBuildExecutorInput) common.Executor {
tags := []string{input.ImageTag}
options := types.ImageBuildOptions{
Tags: tags,
Remove: true,
}
buildContext, err := createBuildContext(input.ContextDir, "Dockerfile")

View file

@ -19,27 +19,28 @@ func TestImageExistsLocally(t *testing.T) {
if testing.Short() {
t.Skip("skipping integration test")
}
ctx := context.Background()
// to help make this test reliable and not flaky, we need to have
// an image that will exist, and onew that won't exist
exists, err := ImageExistsLocally(context.TODO(), "library/alpine:this-random-tag-will-never-exist")
exists, err := ImageExistsLocally(ctx, "library/alpine:this-random-tag-will-never-exist")
assert.Nil(t, err)
assert.Equal(t, false, exists)
// pull an image
cli, err := client.NewClientWithOpts(client.FromEnv)
assert.Nil(t, err)
cli.NegotiateAPIVersion(context.TODO())
cli.NegotiateAPIVersion(context.Background())
// Chose alpine latest because it's so small
// maybe we should build an image instead so that tests aren't reliable on dockerhub
reader, err := cli.ImagePull(context.TODO(), "alpine:latest", types.ImagePullOptions{})
reader, err := cli.ImagePull(ctx, "alpine:latest", types.ImagePullOptions{})
assert.Nil(t, err)
defer reader.Close()
_, err = ioutil.ReadAll(reader)
assert.Nil(t, err)
exists, err = ImageExistsLocally(context.TODO(), "alpine:latest")
exists, err = ImageExistsLocally(ctx, "alpine:latest")
assert.Nil(t, err)
assert.Equal(t, true, exists)
}

View file

@ -39,6 +39,7 @@ type ContainerSpec struct {
Options string `yaml:"options"`
Entrypoint string
Args string
Name string
}
// Step is the structure of one step in a job
@ -63,8 +64,8 @@ func (s *Step) GetEnv() map[string]string {
rtnEnv[k] = v
}
for k, v := range s.With {
envKey := fmt.Sprintf("INPUT_%s", strings.ToUpper(k))
envKey = regexp.MustCompile("[^A-Z0-9]").ReplaceAllString(envKey, "_")
envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(k), "_")
envKey = fmt.Sprintf("INPUT_%s", strings.ToUpper(envKey))
rtnEnv[envKey] = v
}
return rtnEnv

View file

@ -1,5 +0,0 @@
package runner
type environmentApplier interface {
applyEnvironment(map[string]string)
}

View file

@ -45,10 +45,13 @@ func (rc *RunContext) Close(ctx context.Context) error {
// Executor returns a pipeline executor for all the steps in the job
func (rc *RunContext) Executor() common.Executor {
rc.setupTempDir()
steps := make([]common.Executor, 0)
steps = append(steps, rc.setupTempDir())
for _, step := range rc.Run.Job().Steps {
for i, step := range rc.Run.Job().Steps {
if step.ID == "" {
step.ID = fmt.Sprintf("%d", i)
}
steps = append(steps, rc.newStepExecutor(step))
}
return common.NewPipelineExecutor(steps...).Finally(rc.Close)
@ -64,17 +67,16 @@ func mergeMaps(maps ...map[string]string) map[string]string {
return rtnMap
}
func (rc *RunContext) setupTempDir() common.Executor {
return func(ctx context.Context) error {
func (rc *RunContext) setupTempDir() error {
var err error
tempBase := ""
if runtime.GOOS == "darwin" {
tempBase = "/tmp"
}
rc.Tempdir, err = ioutil.TempDir(tempBase, "act-")
os.Chmod(rc.Tempdir, 0755)
log.Debugf("Setup tempdir %s", rc.Tempdir)
return err
}
}
func (rc *RunContext) pullImage(containerSpec *model.ContainerSpec) common.Executor {
@ -111,7 +113,7 @@ func (rc *RunContext) runContainer(containerSpec *model.ContainerSpec) common.Ex
Image: containerSpec.Image,
WorkingDir: "/github/workspace",
Env: envList,
Name: rc.createContainerName(),
Name: containerSpec.Name,
Binds: []string{
fmt.Sprintf("%s:%s", rc.Config.Workdir, "/github/workspace"),
fmt.Sprintf("%s:%s", rc.Tempdir, "/github/home"),
@ -155,8 +157,9 @@ func (rc *RunContext) createGithubTarball() (io.Reader, error) {
}
func (rc *RunContext) createContainerName() string {
containerName := regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(rc.Run.String(), "-")
func (rc *RunContext) createContainerName(stepID string) string {
containerName := fmt.Sprintf("%s-%s", stepID, rc.Tempdir)
containerName = regexp.MustCompile("[^a-zA-Z0-9]").ReplaceAllString(containerName, "-")
prefix := fmt.Sprintf("%s-", trimToLen(filepath.Base(rc.Config.Workdir), 10))
suffix := ""

View file

@ -2,28 +2,29 @@ package runner
import (
"context"
"fmt"
"testing"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
"gotest.tools/assert"
)
func TestGraphEvent(t *testing.T) {
runnerConfig := &Config{
WorkflowPath: "multi.workflow",
WorkingDir: "testdata",
EventName: "push",
}
runner, err := NewRunner(runnerConfig)
planner, err := model.NewWorkflowPlanner("testdata/basic")
assert.NilError(t, err)
graph, err := runner.GraphEvent("push")
plan := planner.PlanEvent("push")
assert.NilError(t, err)
assert.DeepEqual(t, graph, [][]string{{"build"}})
assert.Equal(t, len(plan.Stages), 2, "stages")
assert.Equal(t, len(plan.Stages[0].Runs), 1, "stage0.runs")
assert.Equal(t, len(plan.Stages[1].Runs), 1, "stage1.runs")
assert.Equal(t, plan.Stages[0].Runs[0].JobID, "build", "jobid")
assert.Equal(t, plan.Stages[1].Runs[0].JobID, "test", "jobid")
graph, err = runner.GraphEvent("release")
assert.NilError(t, err)
assert.DeepEqual(t, graph, [][]string{{"deploy"}})
plan = planner.PlanEvent("release")
assert.Equal(t, len(plan.Stages), 0, "stages")
}
func TestRunEvent(t *testing.T) {
@ -36,30 +37,36 @@ func TestRunEvent(t *testing.T) {
eventName string
errorMessage string
}{
{"basic.workflow", "push", ""},
{"pipe.workflow", "push", ""},
{"fail.workflow", "push", "exit with `FAILURE`: 1"},
{"buildfail.workflow", "push", "COPY failed"},
{"regex.workflow", "push", "exit with `NEUTRAL`: 78"},
{"gitref.workflow", "push", ""},
{"env.workflow", "push", ""},
{"detect_event.workflow", "", ""},
{"basic", "push", ""},
{"fail", "push", "exit with `FAILURE`: 1"},
{"runs-on", "push", ""},
{"job-container", "push", ""},
{"uses-docker-url", "push", ""},
{"remote-action-docker", "push", ""},
{"remote-action-js", "push", ""},
{"local-action-docker-url", "push", ""},
{"local-action-dockerfile", "push", ""},
}
log.SetLevel(log.DebugLevel)
ctx := context.Background()
for _, table := range tables {
table := table
t.Run(table.workflowPath, func(t *testing.T) {
runnerConfig := &RunnerConfig{
Ctx: context.Background(),
WorkflowPath: table.workflowPath,
WorkingDir: "testdata",
runnerConfig := &Config{
Workdir: "testdata",
EventName: table.eventName,
}
runner, err := NewRunner(runnerConfig)
runner, err := New(runnerConfig)
assert.NilError(t, err, table.workflowPath)
err = runner.RunEvent()
planner, err := model.NewWorkflowPlanner(fmt.Sprintf("testdata/%s", table.workflowPath))
assert.NilError(t, err, table.workflowPath)
plan := planner.PlanEvent(table.eventName)
err = runner.NewPlanExecutor(plan)(ctx)
if table.errorMessage == "" {
assert.NilError(t, err, table.workflowPath)
} else {

View file

@ -10,6 +10,7 @@ import (
"strings"
"github.com/nektos/act/pkg/common"
"github.com/nektos/act/pkg/container"
"github.com/nektos/act/pkg/model"
log "github.com/sirupsen/logrus"
)
@ -18,6 +19,7 @@ func (rc *RunContext) newStepExecutor(step *model.Step) common.Executor {
job := rc.Run.Job()
containerSpec := new(model.ContainerSpec)
containerSpec.Env = rc.StepEnv(step)
containerSpec.Name = rc.createContainerName(step.ID)
switch step.Type() {
case model.StepTypeRun:
@ -45,15 +47,26 @@ func (rc *RunContext) newStepExecutor(step *model.Step) common.Executor {
)
case model.StepTypeUsesActionLocal:
containerSpec.Image = fmt.Sprintf("%s:%s", containerSpec.Name, "latest")
return common.NewPipelineExecutor(
rc.setupAction(containerSpec, filepath.Join(rc.Config.Workdir, step.Uses)),
rc.pullImage(containerSpec),
rc.runContainer(containerSpec),
)
case model.StepTypeUsesActionRemote:
remoteAction := newRemoteAction(step.Uses)
cloneDir, err := ioutil.TempDir(rc.Tempdir, remoteAction.Repo)
if err != nil {
return common.NewErrorExecutor(err)
}
containerSpec.Image = fmt.Sprintf("%s:%s", remoteAction.Repo, remoteAction.Ref)
return common.NewPipelineExecutor(
rc.cloneAction(step.Uses),
rc.setupAction(containerSpec, step.Uses),
common.NewGitCloneExecutor(common.NewGitCloneExecutorInput{
URL: remoteAction.CloneURL(),
Ref: remoteAction.Ref,
Dir: cloneDir,
}),
rc.setupAction(containerSpec, filepath.Join(cloneDir, remoteAction.Path)),
rc.pullImage(containerSpec),
rc.runContainer(containerSpec),
)
@ -174,8 +187,8 @@ func (rc *RunContext) setupAction(containerSpec *model.ContainerSpec, actionDir
}
for inputID, input := range action.Inputs {
envKey := fmt.Sprintf("INPUT_%s", strings.ToUpper(inputID))
envKey = regexp.MustCompile("[^A-Z0-9]").ReplaceAllString(envKey, "_")
envKey := regexp.MustCompile("[^A-Z0-9-]").ReplaceAllString(strings.ToUpper(inputID), "_")
envKey = fmt.Sprintf("INPUT_%s", envKey)
if _, ok := containerSpec.Env[envKey]; !ok {
containerSpec.Env[envKey] = input.Default
}
@ -183,23 +196,54 @@ func (rc *RunContext) setupAction(containerSpec *model.ContainerSpec, actionDir
switch action.Runs.Using {
case model.ActionRunsUsingNode12:
containerSpec.Image = "node:12"
containerSpec.Args = action.Runs.Main
containerSpec.Image = "node:12-alpine"
if strings.HasPrefix(actionDir, rc.Config.Workdir) {
containerSpec.Args = fmt.Sprintf("node /github/workspace/%s/%s", strings.TrimPrefix(actionDir, rc.Config.Workdir), action.Runs.Main)
} else if strings.HasPrefix(actionDir, rc.Tempdir) {
containerSpec.Args = fmt.Sprintf("node /github/home/%s/%s", strings.TrimPrefix(actionDir, rc.Tempdir), action.Runs.Main)
}
case model.ActionRunsUsingDocker:
if strings.HasPrefix(action.Runs.Image, "docker://") {
containerSpec.Image = strings.TrimPrefix(action.Runs.Image, "docker://")
containerSpec.Entrypoint = strings.Join(action.Runs.Entrypoint, " ")
containerSpec.Args = strings.Join(action.Runs.Args, " ")
} else {
// TODO: docker build
contextDir := filepath.Join(actionDir, action.Runs.Main)
return container.NewDockerBuildExecutor(container.NewDockerBuildExecutorInput{
ContextDir: contextDir,
ImageTag: containerSpec.Image,
})(ctx)
}
}
return nil
}
}
func (rc *RunContext) cloneAction(action string) common.Executor {
return func(ctx context.Context) error {
return nil
}
type remoteAction struct {
Org string
Repo string
Path string
Ref string
}
func (ra *remoteAction) CloneURL() string {
return fmt.Sprintf("https://github.com/%s/%s", ra.Org, ra.Repo)
}
func newRemoteAction(action string) *remoteAction {
r := regexp.MustCompile(`^([^/@]+)/([^/@]+)(/([^@]*))?(@(.*))?$`)
matches := r.FindStringSubmatch(action)
ra := new(remoteAction)
ra.Org = matches[1]
ra.Repo = matches[2]
ra.Path = ""
ra.Ref = "master"
if len(matches) >= 5 {
ra.Path = matches[4]
}
if len(matches) >= 7 {
ra.Ref = matches[6]
}
return ra
}

View file

@ -0,0 +1,4 @@
name: 'action1'
runs:
using: 'docker'
image: 'Dockerfile'

View file

@ -0,0 +1,8 @@
# Container image that runs your code
FROM alpine:3.10
# Copies your code file from your action repository to the filesystem path `/` of the container
COPY entrypoint.sh /entrypoint.sh
# Code file to execute when the docker container starts up (`entrypoint.sh`)
ENTRYPOINT ["/entrypoint.sh"]

View file

@ -0,0 +1,15 @@
name: 'Hello World'
description: 'Greet someone and record the time'
inputs:
who-to-greet: # id of input
description: 'Who to greet'
required: true
default: 'World'
outputs:
time: # id of output
description: 'The time we greeted you'
runs:
using: 'docker'
image: 'Dockerfile'
args:
- ${{ inputs.who-to-greet }}

View file

@ -0,0 +1,5 @@
#!/bin/sh -l
echo "Hello $1"
time=$(date)
echo ::set-output name=time::$time

View file

@ -8,9 +8,9 @@ inputs:
default: World
runs:
using: docker
image: docker://alpine:3.8
#image: docker://alpine:3.8
image: docker://node:12-alpine
env:
TEST: enabled
args:
- echo
- ${INPUT_WHO_TO_GREET}
- env

17
pkg/runner/testdata/basic/push.yml vendored Normal file
View file

@ -0,0 +1,17 @@
name: basic
on: push
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: ./actions/action1
with:
args: echo 'build'
test:
runs-on: ubuntu-latest
needs: [build]
steps:
- uses: docker://ubuntu:18.04
with:
args: echo ${GITHUB_REF} | grep nektos/act

12
pkg/runner/testdata/fail/push.yml vendored Normal file
View file

@ -0,0 +1,12 @@
name: fail
on: push
jobs:
build:
runs-on: ubuntu-latest
container:
image: node:10.16-jessie
env:
TEST_ENV: test-value
steps:
- run: echo ${TEST_ENV} | grep bazooka

View file

@ -0,0 +1,12 @@
name: job-container
on: push
jobs:
test:
runs-on: ubuntu-latest
container:
image: node:10.16-jessie
env:
TEST_ENV: test-value
steps:
- run: echo ${TEST_ENV} | grep test-value

View file

@ -0,0 +1,8 @@
name: local-action-docker-url
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-url

View file

@ -0,0 +1,10 @@
name: local-action-dockerfile
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: ./actions/docker-local
with:
who-to-greet: 'Mona the Octocat'

View file

@ -0,0 +1,10 @@
name: remote-action-docker
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/hello-world-docker-action@master
with:
who-to-greet: 'Mona the Octocat'

View file

@ -0,0 +1,10 @@
name: remote-action-js
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: actions/hello-world-javascript-action@master
with:
who-to-greet: 'Mona the Octocat'

8
pkg/runner/testdata/runs-on/push.yml vendored Normal file
View file

@ -0,0 +1,8 @@
name: runs-on
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- run: echo ${GITHUB_ACTOR} | grep nektos/act

View file

@ -0,0 +1,11 @@
name: uses-docker-url
on: push
jobs:
test:
runs-on: ubuntu-latest
steps:
- uses: docker://alpine:3.8
with:
somekey: somevalue
args: echo ${INPUT_SOMEKEY} | grep somevalue

View file

@ -1,21 +0,0 @@
MIT License
Copyright (c) 2019 GitHub
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View file

@ -1,38 +0,0 @@
package model
import (
"strings"
)
// Command represents the optional "runs" and "args" attributes.
// Each one takes one of two forms:
// - runs="entrypoint arg1 arg2 ..."
// - runs=[ "entrypoint", "arg1", "arg2", ... ]
type Command interface {
isCommand()
Split() []string
}
// StringCommand represents the string based form of the "runs" or "args"
// attribute.
// - runs="entrypoint arg1 arg2 ..."
type StringCommand struct {
Value string
}
// ListCommand represents the list based form of the "runs" or "args" attribute.
// - runs=[ "entrypoint", "arg1", "arg2", ... ]
type ListCommand struct {
Values []string
}
func (s *StringCommand) isCommand() {}
func (l *ListCommand) isCommand() {}
func (s *StringCommand) Split() []string {
return strings.Fields(s.Value)
}
func (l *ListCommand) Split() []string {
return l.Values
}

View file

@ -1,64 +0,0 @@
package model
import (
"strings"
)
// Configuration is a parsed main.workflow file
type Configuration struct {
Actions []*Action
Workflows []*Workflow
}
// Action represents a single "action" stanza in a .workflow file.
type Action struct {
Identifier string
Uses Uses
Runs, Args Command
Needs []string
Env map[string]string
Secrets []string
}
// Workflow represents a single "workflow" stanza in a .workflow file.
type Workflow struct {
Identifier string
On string
Resolves []string
}
// GetAction looks up action by identifier.
//
// If the action is not found, nil is returned.
func (c *Configuration) GetAction(id string) *Action {
for _, action := range c.Actions {
if action.Identifier == id {
return action
}
}
return nil
}
// GetWorkflow looks up a workflow by identifier.
//
// If the workflow is not found, nil is returned.
func (c *Configuration) GetWorkflow(id string) *Workflow {
for _, workflow := range c.Workflows {
if workflow.Identifier == id {
return workflow
}
}
return nil
}
// GetWorkflows gets all Workflow structures that match a given type of event.
// e.g., GetWorkflows("push")
func (c *Configuration) GetWorkflows(eventType string) []*Workflow {
var ret []*Workflow
for _, workflow := range c.Workflows {
if strings.EqualFold(workflow.On, eventType) {
ret = append(ret, workflow)
}
}
return ret
}

View file

@ -1,57 +0,0 @@
package model
import (
"fmt"
)
type Uses interface {
fmt.Stringer
isUses()
}
// UsesDockerImage represents `uses = "docker://<image>"`
type UsesDockerImage struct {
Image string
}
// UsesRepository represents `uses = "<owner>/<repo>[/<path>]@<ref>"`
type UsesRepository struct {
Repository string
Path string
Ref string
}
// UsesPath represents `uses = "./<path>"`
type UsesPath struct {
Path string
}
// UsesInvalid represents any invalid `uses = "<raw>"` value
type UsesInvalid struct {
Raw string
}
func (u *UsesDockerImage) isUses() {}
func (u *UsesRepository) isUses() {}
func (u *UsesPath) isUses() {}
func (u *UsesInvalid) isUses() {}
func (u *UsesDockerImage) String() string {
return fmt.Sprintf("docker://%s", u.Image)
}
func (u *UsesRepository) String() string {
if u.Path == "" {
return fmt.Sprintf("%s@%s", u.Repository, u.Ref)
}
return fmt.Sprintf("%s/%s@%s", u.Repository, u.Path, u.Ref)
}
func (u *UsesPath) String() string {
return fmt.Sprintf("./%s", u.Path)
}
func (u *UsesInvalid) String() string {
return u.Raw
}

View file

@ -1,136 +0,0 @@
package parser
import (
"bytes"
"fmt"
"sort"
"strconv"
"strings"
"github.com/actions/workflow-parser/model"
)
type Error struct {
message string
Errors []*ParseError
Actions []*model.Action
Workflows []*model.Workflow
}
func (e *Error) Error() string {
buffer := bytes.NewBuffer(nil)
buffer.WriteString(e.message)
for _, pe := range e.Errors {
buffer.WriteString("\n ")
buffer.WriteString(pe.Error())
}
return buffer.String()
}
// FirstError searches a Configuration for the first error at or above a
// given severity level. Checking the return value against nil is a good
// way to see if the file has any errors at or above the given severity.
// A caller intending to execute the file might check for
// `errors.FirstError(parser.WARNING)`, while a caller intending to
// display the file might check for `errors.FirstError(parser.FATAL)`.
func (e *Error) FirstError(severity Severity) error {
for _, pe := range e.Errors {
if pe.Severity >= severity {
return pe
}
}
return nil
}
// ParseError represents an error identified by the parser, either syntactic
// (HCL) or semantic (.workflow) in nature. There are fields for location
// (File, Line, Column), severity, and base error string. The `Error()`
// function on this type concatenates whatever bits of the location are
// available with the message. The severity is only used for filtering.
type ParseError struct {
message string
Pos ErrorPos
Severity Severity
}
// ErrorPos represents the location of an error in a user's workflow
// file(s).
type ErrorPos struct {
File string
Line int
Column int
}
// newFatal creates a new error at the FATAL level, indicating that the
// file is so broken it should not be displayed.
func newFatal(pos ErrorPos, format string, a ...interface{}) *ParseError {
return &ParseError{
message: fmt.Sprintf(format, a...),
Pos: pos,
Severity: FATAL,
}
}
// newError creates a new error at the ERROR level, indicating that the
// file can be displayed but cannot be run.
func newError(pos ErrorPos, format string, a ...interface{}) *ParseError {
return &ParseError{
message: fmt.Sprintf(format, a...),
Pos: pos,
Severity: ERROR,
}
}
// newWarning creates a new error at the WARNING level, indicating that
// the file might be runnable but might not execute as intended.
func newWarning(pos ErrorPos, format string, a ...interface{}) *ParseError {
return &ParseError{
message: fmt.Sprintf(format, a...),
Pos: pos,
Severity: WARNING,
}
}
func (e *ParseError) Error() string {
var sb strings.Builder
if e.Pos.Line != 0 {
sb.WriteString("Line ") // nolint: errcheck
sb.WriteString(strconv.Itoa(e.Pos.Line)) // nolint: errcheck
sb.WriteString(": ") // nolint: errcheck
}
if sb.Len() > 0 {
sb.WriteString(e.message) // nolint: errcheck
return sb.String()
}
return e.message
}
const (
_ = iota
// WARNING indicates a mistake that might affect correctness
WARNING
// ERROR indicates a mistake that prevents execution of any workflows in the file
ERROR
// FATAL indicates a mistake that prevents even drawing the file
FATAL
)
// Severity represents the level of an error encountered while parsing a
// workflow file. See the comments for WARNING, ERROR, and FATAL, above.
type Severity int
type errorList []*ParseError
func (a errorList) Len() int { return len(a) }
func (a errorList) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
func (a errorList) Less(i, j int) bool { return a[i].Pos.Line < a[j].Pos.Line }
// sortErrors sorts the errors reported by the parser. Do this after
// parsing is complete. The sort is stable, so order is preserved within
// a single line: left to right, syntax errors before validation errors.
func (errors errorList) sort() {
sort.Stable(errors)
}

View file

@ -1,42 +0,0 @@
package parser
import (
"strings"
)
// isAllowedEventType returns true if the event type is supported.
func isAllowedEventType(eventType string) bool {
_, ok := eventTypeWhitelist[strings.ToLower(eventType)]
return ok
}
// https://developer.github.com/actions/creating-workflows/workflow-configuration-options/#events-supported-in-workflow-files
var eventTypeWhitelist = map[string]struct{}{
"check_run": {},
"check_suite": {},
"commit_comment": {},
"create": {},
"delete": {},
"deployment": {},
"deployment_status": {},
"fork": {},
"gollum": {},
"issue_comment": {},
"issues": {},
"label": {},
"member": {},
"milestone": {},
"page_build": {},
"project_card": {},
"project_column": {},
"project": {},
"public": {},
"pull_request_review_comment": {},
"pull_request_review": {},
"pull_request": {},
"push": {},
"release": {},
"repository_dispatch": {},
"status": {},
"watch": {},
}

View file

@ -1,15 +0,0 @@
package parser
type OptionFunc func(*Parser)
func WithSuppressWarnings() OptionFunc {
return func(ps *Parser) {
ps.suppressSeverity = WARNING
}
}
func WithSuppressErrors() OptionFunc {
return func(ps *Parser) {
ps.suppressSeverity = ERROR
}
}

View file

@ -1,807 +0,0 @@
package parser
import (
"fmt"
"io"
"io/ioutil"
"regexp"
"strings"
"github.com/actions/workflow-parser/model"
"github.com/hashicorp/hcl"
"github.com/hashicorp/hcl/hcl/ast"
hclparser "github.com/hashicorp/hcl/hcl/parser"
"github.com/hashicorp/hcl/hcl/token"
"github.com/soniakeys/graph"
)
const minVersion = 0
const maxVersion = 0
const maxSecrets = 100
type Parser struct {
version int
actions []*model.Action
workflows []*model.Workflow
errors errorList
posMap map[interface{}]ast.Node
suppressSeverity Severity
}
// Parse parses a .workflow file and return the actions and global variables found within.
func Parse(reader io.Reader, options ...OptionFunc) (*model.Configuration, error) {
// FIXME - check context for deadline?
b, err := ioutil.ReadAll(reader)
if err != nil {
return nil, err
}
root, err := hcl.ParseBytes(b)
if err != nil {
if pe, ok := err.(*hclparser.PosError); ok {
pos := ErrorPos{File: pe.Pos.Filename, Line: pe.Pos.Line, Column: pe.Pos.Column}
errors := errorList{newFatal(pos, pe.Err.Error())}
return nil, &Error{
message: "unable to parse",
Errors: errors,
}
}
return nil, err
}
p := parseAndValidate(root.Node, options...)
if len(p.errors) > 0 {
return nil, &Error{
message: "unable to parse and validate",
Errors: p.errors,
Actions: p.actions,
Workflows: p.workflows,
}
}
return &model.Configuration{
Actions: p.actions,
Workflows: p.workflows,
}, nil
}
// parseAndValidate converts a HCL AST into a Parser and validates
// high-level structure.
// Parameters:
// - root - the contents of a .workflow file, as AST
// Returns:
// - a Parser structure containing actions and workflow definitions
func parseAndValidate(root ast.Node, options ...OptionFunc) *Parser {
p := &Parser{
posMap: make(map[interface{}]ast.Node),
}
for _, option := range options {
option(p)
}
p.parseRoot(root)
p.validate()
p.errors.sort()
return p
}
func (p *Parser) validate() {
p.analyzeDependencies()
p.checkCircularDependencies()
p.checkActions()
p.checkFlows()
}
func uniqStrings(items []string) []string {
seen := make(map[string]bool)
ret := make([]string, 0, len(items))
for _, item := range items {
if !seen[item] {
seen[item] = true
ret = append(ret, item)
}
}
return ret
}
// checkCircularDependencies finds loops in the action graph.
// It emits a fatal error for each cycle it finds, in the order (top to
// bottom, left to right) they appear in the .workflow file.
func (p *Parser) checkCircularDependencies() {
// make a map from action name to node ID, which is the index in the p.actions array
// That is, p.actions[actionmap[X]].Identifier == X
actionmap := make(map[string]graph.NI)
for i, action := range p.actions {
actionmap[action.Identifier] = graph.NI(i)
}
// make an adjacency list representation of the action dependency graph
adjList := make(graph.AdjacencyList, len(p.actions))
for i, action := range p.actions {
adjList[i] = make([]graph.NI, 0, len(action.Needs))
for _, depName := range action.Needs {
if depIdx, ok := actionmap[depName]; ok {
adjList[i] = append(adjList[i], depIdx)
}
}
}
// find cycles, and print a fatal error for each one
g := graph.Directed{AdjacencyList: adjList}
g.Cycles(func(cycle []graph.NI) bool {
node := p.posMap[&p.actions[cycle[len(cycle)-1]].Needs]
p.addFatal(node, "Circular dependency on `%s'", p.actions[cycle[0]].Identifier)
return true
})
}
// checkActions returns error if any actions are syntactically correct but
// have structural errors
func (p *Parser) checkActions() {
secrets := make(map[string]bool)
for _, t := range p.actions {
// Ensure the Action has a `uses` attribute
if t.Uses == nil {
p.addError(p.posMap[t], "Action `%s' must have a `uses' attribute", t.Identifier)
// continue, checking other actions
}
// Ensure there aren't too many secrets
for _, str := range t.Secrets {
if !secrets[str] {
secrets[str] = true
if len(secrets) == maxSecrets+1 {
p.addError(p.posMap[&t.Secrets], "All actions combined must not have more than %d unique secrets", maxSecrets)
}
}
}
// Ensure that no environment variable or secret begins with
// "GITHUB_", unless it's "GITHUB_TOKEN".
// Also ensure that all environment variable names come from the legal
// form for environment variable names.
// Finally, ensure that the same key name isn't used more than once
// between env and secrets, combined.
for k := range t.Env {
p.checkEnvironmentVariable(k, p.posMap[&t.Env])
}
secretVars := make(map[string]bool)
for _, k := range t.Secrets {
p.checkEnvironmentVariable(k, p.posMap[&t.Secrets])
if _, found := t.Env[k]; found {
p.addError(p.posMap[&t.Secrets], "Secret `%s' conflicts with an environment variable with the same name", k)
}
if secretVars[k] {
p.addWarning(p.posMap[&t.Secrets], "Secret `%s' redefined", k)
}
secretVars[k] = true
}
}
}
var envVarChecker = regexp.MustCompile(`\A[A-Za-z_][A-Za-z_0-9]*\z`)
func (p *Parser) checkEnvironmentVariable(key string, node ast.Node) {
if key != "GITHUB_TOKEN" && strings.HasPrefix(key, "GITHUB_") {
p.addWarning(node, "Environment variables and secrets beginning with `GITHUB_' are reserved")
}
if !envVarChecker.MatchString(key) {
p.addWarning(node, "Environment variables and secrets must contain only A-Z, a-z, 0-9, and _ characters, got `%s'", key)
}
}
// checkFlows appends an error if any workflows are syntactically correct but
// have structural errors
func (p *Parser) checkFlows() {
actionmap := makeActionMap(p.actions)
for _, f := range p.workflows {
// make sure there's an `on` attribute
if f.On == "" {
p.addError(p.posMap[f], "Workflow `%s' must have an `on' attribute", f.Identifier)
// continue, checking other workflows
} else if !isAllowedEventType(f.On) {
p.addError(p.posMap[&f.On], "Workflow `%s' has unknown `on' value `%s'", f.Identifier, f.On)
// continue, checking other workflows
}
// make sure that the actions that are resolved all exist
for _, actionID := range f.Resolves {
_, ok := actionmap[actionID]
if !ok {
p.addError(p.posMap[&f.Resolves], "Workflow `%s' resolves unknown action `%s'", f.Identifier, actionID)
// continue, checking other workflows
}
}
}
}
func makeActionMap(actions []*model.Action) map[string]*model.Action {
actionmap := make(map[string]*model.Action)
for _, action := range actions {
actionmap[action.Identifier] = action
}
return actionmap
}
// Fill in Action dependencies for all actions based on explicit dependencies
// declarations.
//
// p.actions is an array of Action objects, as parsed. The Action objects in
// this array are mutated, by setting Action.dependencies for each.
func (p *Parser) analyzeDependencies() {
actionmap := makeActionMap(p.actions)
for _, action := range p.actions {
// analyze explicit dependencies for each "needs" keyword
p.analyzeNeeds(action, actionmap)
}
// uniq all the dependencies lists
for _, action := range p.actions {
if len(action.Needs) >= 2 {
action.Needs = uniqStrings(action.Needs)
}
}
}
func (p *Parser) analyzeNeeds(action *model.Action, actionmap map[string]*model.Action) {
for _, need := range action.Needs {
_, ok := actionmap[need]
if !ok {
p.addError(p.posMap[&action.Needs], "Action `%s' needs nonexistent action `%s'", action.Identifier, need)
// continue, checking other actions
}
}
}
// literalToStringMap converts a object value from the AST to a
// map[string]string. For example, the HCL `{ a="b" c="d" }` becomes the
// Go expression map[string]string{ "a": "b", "c": "d" }.
// If the value doesn't adhere to that format -- e.g.,
// if it's not an object, or it has non-assignment attributes, or if any
// of its values are anything other than a string, the function appends an
// appropriate error.
func (p *Parser) literalToStringMap(node ast.Node) map[string]string {
obj, ok := node.(*ast.ObjectType)
if !ok {
p.addError(node, "Expected object, got %s", typename(node))
return nil
}
p.checkAssignmentsOnly(obj.List, "")
ret := make(map[string]string)
for _, item := range obj.List.Items {
if !isAssignment(item) {
continue
}
str, ok := p.literalToString(item.Val)
if ok {
key := p.identString(item.Keys[0].Token)
if key != "" {
if _, found := ret[key]; found {
p.addWarning(node, "Environment variable `%s' redefined", key)
}
ret[key] = str
}
}
}
return ret
}
func (p *Parser) identString(t token.Token) string {
switch t.Type {
case token.STRING:
return t.Value().(string)
case token.IDENT:
return t.Text
default:
p.addErrorFromToken(t,
"Each identifier should be a string, got %s",
strings.ToLower(t.Type.String()))
return ""
}
}
// literalToStringArray converts a list value from the AST to a []string.
// For example, the HCL `[ "a", "b", "c" ]` becomes the Go expression
// []string{ "a", "b", "c" }.
// If the value doesn't adhere to that format -- it's not a list, or it
// contains anything other than strings, the function appends an
// appropriate error.
// If promoteScalars is true, then values that are scalar strings are
// promoted to a single-entry string array. E.g., "foo" becomes the Go
// expression []string{ "foo" }.
func (p *Parser) literalToStringArray(node ast.Node, promoteScalars bool) ([]string, bool) {
literal, ok := node.(*ast.LiteralType)
if ok {
if promoteScalars && literal.Token.Type == token.STRING {
return []string{literal.Token.Value().(string)}, true
}
p.addError(node, "Expected list, got %s", typename(node))
return nil, false
}
list, ok := node.(*ast.ListType)
if !ok {
p.addError(node, "Expected list, got %s", typename(node))
return nil, false
}
ret := make([]string, 0, len(list.List))
for _, literal := range list.List {
str, ok := p.literalToString(literal)
if ok {
ret = append(ret, str)
}
}
return ret, true
}
// literalToString converts a literal value from the AST into a string.
// If the value isn't a scalar or isn't a string, the function appends an
// appropriate error and returns "", false.
func (p *Parser) literalToString(node ast.Node) (string, bool) {
val := p.literalCast(node, token.STRING)
if val == nil {
return "", false
}
return val.(string), true
}
// literalToInt converts a literal value from the AST into an int64.
// Supported number formats are: 123, 0x123, and 0123.
// Exponents (1e6) and floats (123.456) generate errors.
// If the value isn't a scalar or isn't a number, the function appends an
// appropriate error and returns 0, false.
func (p *Parser) literalToInt(node ast.Node) (int64, bool) {
val := p.literalCast(node, token.NUMBER)
if val == nil {
return 0, false
}
return val.(int64), true
}
func (p *Parser) literalCast(node ast.Node, t token.Type) interface{} {
literal, ok := node.(*ast.LiteralType)
if !ok {
p.addError(node, "Expected %s, got %s", strings.ToLower(t.String()), typename(node))
return nil
}
if literal.Token.Type != t {
p.addError(node, "Expected %s, got %s", strings.ToLower(t.String()), typename(node))
return nil
}
return literal.Token.Value()
}
// parseRoot parses the root of the AST, filling in p.version, p.actions,
// and p.workflows.
func (p *Parser) parseRoot(node ast.Node) {
objectList, ok := node.(*ast.ObjectList)
if !ok {
// It should be impossible for HCL to return anything other than an
// ObjectList as the root node. This error should never happen.
p.addError(node, "Internal error: root node must be an ObjectList")
return
}
p.actions = make([]*model.Action, 0, len(objectList.Items))
p.workflows = make([]*model.Workflow, 0, len(objectList.Items))
identifiers := make(map[string]bool)
for idx, item := range objectList.Items {
if item.Assign.IsValid() {
p.parseVersion(idx, item)
continue
}
p.parseBlock(item, identifiers)
}
}
// parseBlock parses a single, top-level "action" or "workflow" block,
// appending it to p.actions or p.workflows as appropriate.
func (p *Parser) parseBlock(item *ast.ObjectItem, identifiers map[string]bool) {
if len(item.Keys) != 2 {
p.addError(item, "Invalid toplevel declaration")
return
}
cmd := p.identString(item.Keys[0].Token)
var id string
switch cmd {
case "action":
action := p.actionifyItem(item)
if action != nil {
id = action.Identifier
p.actions = append(p.actions, action)
}
case "workflow":
workflow := p.workflowifyItem(item)
if workflow != nil {
id = workflow.Identifier
p.workflows = append(p.workflows, workflow)
}
default:
p.addError(item, "Invalid toplevel keyword, `%s'", cmd)
return
}
if identifiers[id] {
p.addError(item, "Identifier `%s' redefined", id)
}
identifiers[id] = true
}
// parseVersion parses a top-level `version=N` statement, filling in
// p.version.
func (p *Parser) parseVersion(idx int, item *ast.ObjectItem) {
if len(item.Keys) != 1 || p.identString(item.Keys[0].Token) != "version" {
// not a valid `version` declaration
p.addError(item.Val, "Toplevel declarations cannot be assignments")
return
}
if idx != 0 {
p.addError(item.Val, "`version` must be the first declaration")
return
}
version, ok := p.literalToInt(item.Val)
if !ok {
return
}
if version < minVersion || version > maxVersion {
p.addError(item.Val, "`version = %d` is not supported", version)
return
}
p.version = int(version)
}
// parseIdentifier parses the double-quoted identifier (name) for a
// "workflow" or "action" block.
func (p *Parser) parseIdentifier(key *ast.ObjectKey) string {
id := key.Token.Text
if len(id) < 3 || id[0] != '"' || id[len(id)-1] != '"' {
p.addError(key, "Invalid format for identifier `%s'", id)
return ""
}
return id[1 : len(id)-1]
}
// parseRequiredString parses a string value, setting its value into the
// out-parameter `value` and returning true if successful.
func (p *Parser) parseRequiredString(value *string, val ast.Node, nodeType, name, id string) bool {
if *value != "" {
p.addWarning(val, "`%s' redefined in %s `%s'", name, nodeType, id)
// continue, allowing the redefinition
}
newVal, ok := p.literalToString(val)
if !ok {
p.addError(val, "Invalid format for `%s' in %s `%s', expected string", name, nodeType, id)
return false
}
if newVal == "" {
p.addError(val, "`%s' value in %s `%s' cannot be blank", name, nodeType, id)
return false
}
*value = newVal
return true
}
// parseBlockPreamble parses the beginning of a "workflow" or "action"
// block.
func (p *Parser) parseBlockPreamble(item *ast.ObjectItem, nodeType string) (string, *ast.ObjectType) {
id := p.parseIdentifier(item.Keys[1])
if id == "" {
return "", nil
}
node := item.Val
obj, ok := node.(*ast.ObjectType)
if !ok {
p.addError(node, "Each %s must have an { ... } block", nodeType)
return "", nil
}
p.checkAssignmentsOnly(obj.List, id)
return id, obj
}
// actionifyItem converts an AST block to an Action object.
func (p *Parser) actionifyItem(item *ast.ObjectItem) *model.Action {
id, obj := p.parseBlockPreamble(item, "action")
if obj == nil {
return nil
}
action := &model.Action{
Identifier: id,
}
p.posMap[action] = item
for _, item := range obj.List.Items {
p.parseActionAttribute(p.identString(item.Keys[0].Token), action, item.Val)
}
return action
}
// parseActionAttribute parses a single key-value pair from an "action"
// block. This function rejects any unknown keys and enforces formatting
// requirements on all values.
// It also has higher-than-normal cyclomatic complexity, so we ask the
// gocyclo linter to ignore it.
// nolint: gocyclo
func (p *Parser) parseActionAttribute(name string, action *model.Action, val ast.Node) {
switch name {
case "uses":
p.parseUses(action, val)
case "needs":
if needs, ok := p.literalToStringArray(val, true); ok {
action.Needs = needs
p.posMap[&action.Needs] = val
}
case "runs":
if runs := p.parseCommand(action, action.Runs, name, val, false); runs != nil {
action.Runs = runs
}
case "args":
if args := p.parseCommand(action, action.Args, name, val, true); args != nil {
action.Args = args
}
case "env":
if env := p.literalToStringMap(val); env != nil {
action.Env = env
}
p.posMap[&action.Env] = val
case "secrets":
if secrets, ok := p.literalToStringArray(val, false); ok {
action.Secrets = secrets
p.posMap[&action.Secrets] = val
}
default:
p.addWarning(val, "Unknown action attribute `%s'", name)
}
}
// parseUses sets the action.Uses value based on the contents of the AST
// node. This function enforces formatting requirements on the value.
func (p *Parser) parseUses(action *model.Action, node ast.Node) {
if action.Uses != nil {
p.addWarning(node, "`uses' redefined in action `%s'", action.Identifier)
// continue, allowing the redefinition
}
strVal, ok := p.literalToString(node)
if !ok {
return
}
if strVal == "" {
action.Uses = &model.UsesInvalid{}
p.addError(node, "`uses' value in action `%s' cannot be blank", action.Identifier)
return
}
if strings.HasPrefix(strVal, "./") {
action.Uses = &model.UsesPath{Path: strings.TrimPrefix(strVal, "./")}
return
}
if strings.HasPrefix(strVal, "docker://") {
action.Uses = &model.UsesDockerImage{Image: strings.TrimPrefix(strVal, "docker://")}
return
}
tok := strings.Split(strVal, "@")
if len(tok) != 2 {
action.Uses = &model.UsesInvalid{Raw: strVal}
p.addError(node, "The `uses' attribute must be a path, a Docker image, or owner/repo@ref")
return
}
ref := tok[1]
tok = strings.SplitN(tok[0], "/", 3)
if len(tok) < 2 {
action.Uses = &model.UsesInvalid{Raw: strVal}
p.addError(node, "The `uses' attribute must be a path, a Docker image, or owner/repo@ref")
return
}
usesRepo := &model.UsesRepository{Repository: tok[0] + "/" + tok[1], Ref: ref}
action.Uses = usesRepo
if len(tok) == 3 {
usesRepo.Path = tok[2]
}
}
// parseUses sets the action.Runs or action.Args value based on the
// contents of the AST node. This function enforces formatting
// requirements on the value.
func (p *Parser) parseCommand(action *model.Action, cmd model.Command, name string, node ast.Node, allowBlank bool) model.Command {
if cmd != nil {
p.addWarning(node, "`%s' redefined in action `%s'", name, action.Identifier)
// continue, allowing the redefinition
}
// Is it a list?
if _, ok := node.(*ast.ListType); ok {
if parsed, ok := p.literalToStringArray(node, false); ok {
return &model.ListCommand{Values: parsed}
}
return nil
}
// If not, parse a whitespace-separated string into a list.
var raw string
var ok bool
if raw, ok = p.literalToString(node); !ok {
p.addError(node, "The `%s' attribute must be a string or a list", name)
return nil
}
if raw == "" && !allowBlank {
p.addError(node, "`%s' value in action `%s' cannot be blank", name, action.Identifier)
return nil
}
return &model.StringCommand{Value: raw}
}
func typename(val interface{}) string {
switch cast := val.(type) {
case *ast.ListType:
return "list"
case *ast.LiteralType:
return strings.ToLower(cast.Token.Type.String())
case *ast.ObjectType:
return "object"
default:
return fmt.Sprintf("%T", val)
}
}
// workflowifyItem converts an AST block to a Workflow object.
func (p *Parser) workflowifyItem(item *ast.ObjectItem) *model.Workflow {
id, obj := p.parseBlockPreamble(item, "workflow")
if obj == nil {
return nil
}
var ok bool
workflow := &model.Workflow{Identifier: id}
for _, item := range obj.List.Items {
name := p.identString(item.Keys[0].Token)
switch name {
case "on":
ok = p.parseRequiredString(&workflow.On, item.Val, "workflow", name, id)
if ok {
p.posMap[&workflow.On] = item
}
case "resolves":
if workflow.Resolves != nil {
p.addWarning(item.Val, "`resolves' redefined in workflow `%s'", id)
// continue, allowing the redefinition
}
workflow.Resolves, ok = p.literalToStringArray(item.Val, true)
p.posMap[&workflow.Resolves] = item
if !ok {
p.addError(item.Val, "Invalid format for `resolves' in workflow `%s', expected list of strings", id)
// continue, allowing workflow with no `resolves`
}
default:
p.addWarning(item.Val, "Unknown workflow attribute `%s'", name)
// continue, treat as no-op
}
}
p.posMap[workflow] = item
return workflow
}
func isAssignment(item *ast.ObjectItem) bool {
return len(item.Keys) == 1 && item.Assign.IsValid()
}
// checkAssignmentsOnly ensures that all elements in the object are "key =
// value" pairs.
func (p *Parser) checkAssignmentsOnly(objectList *ast.ObjectList, actionID string) {
for _, item := range objectList.Items {
if !isAssignment(item) {
var desc string
if actionID == "" {
desc = "the object"
} else {
desc = fmt.Sprintf("action `%s'", actionID)
}
p.addErrorFromObjectItem(item, "Each attribute of %s must be an assignment", desc)
continue
}
child, ok := item.Val.(*ast.ObjectType)
if ok {
p.checkAssignmentsOnly(child.List, actionID)
}
}
}
func (p *Parser) addWarning(node ast.Node, format string, a ...interface{}) {
if p.suppressSeverity < WARNING {
p.errors = append(p.errors, newWarning(posFromNode(node), format, a...))
}
}
func (p *Parser) addError(node ast.Node, format string, a ...interface{}) {
if p.suppressSeverity < ERROR {
p.errors = append(p.errors, newError(posFromNode(node), format, a...))
}
}
func (p *Parser) addErrorFromToken(t token.Token, format string, a ...interface{}) {
if p.suppressSeverity < ERROR {
p.errors = append(p.errors, newError(posFromToken(t), format, a...))
}
}
func (p *Parser) addErrorFromObjectItem(objectItem *ast.ObjectItem, format string, a ...interface{}) {
if p.suppressSeverity < ERROR {
p.errors = append(p.errors, newError(posFromObjectItem(objectItem), format, a...))
}
}
func (p *Parser) addFatal(node ast.Node, format string, a ...interface{}) {
if p.suppressSeverity < FATAL {
p.errors = append(p.errors, newFatal(posFromNode(node), format, a...))
}
}
// posFromNode returns an ErrorPos (file, line, and column) from an AST
// node, so we can report specific locations for each parse error.
func posFromNode(node ast.Node) ErrorPos {
var pos *token.Pos
switch cast := node.(type) {
case *ast.ObjectList:
if len(cast.Items) > 0 {
if len(cast.Items[0].Keys) > 0 {
pos = &cast.Items[0].Keys[0].Token.Pos
}
}
case *ast.ObjectItem:
return posFromNode(cast.Val)
case *ast.ObjectType:
pos = &cast.Lbrace
case *ast.LiteralType:
pos = &cast.Token.Pos
case *ast.ListType:
pos = &cast.Lbrack
case *ast.ObjectKey:
pos = &cast.Token.Pos
}
if pos == nil {
return ErrorPos{}
}
return ErrorPos{File: pos.Filename, Line: pos.Line, Column: pos.Column}
}
// posFromObjectItem returns an ErrorPos from an ObjectItem. This is for
// cases where posFromNode(item) would fail because the item has no Val
// set.
func posFromObjectItem(item *ast.ObjectItem) ErrorPos {
if len(item.Keys) > 0 {
return posFromNode(item.Keys[0])
}
return ErrorPos{}
}
// posFromToken returns an ErrorPos from a Token. We can't use
// posFromNode here because Tokens aren't Nodes.
func posFromToken(token token.Token) ErrorPos {
return ErrorPos{File: token.Pos.Filename, Line: token.Pos.Line, Column: token.Pos.Column}
}

View file

@ -1,97 +0,0 @@
// +build ignore
// Simple tool to create an archive stream from an old and new directory
//
// By default it will stream the comparison of two temporary directories with junk files
package main
import (
"flag"
"fmt"
"io"
"io/ioutil"
"os"
"path"
"github.com/docker/docker/pkg/archive"
"github.com/sirupsen/logrus"
)
var (
flDebug = flag.Bool("D", false, "debugging output")
flNewDir = flag.String("newdir", "", "")
flOldDir = flag.String("olddir", "", "")
log = logrus.New()
)
func main() {
flag.Usage = func() {
fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)")
fmt.Printf("%s [OPTIONS]\n", os.Args[0])
flag.PrintDefaults()
}
flag.Parse()
log.Out = os.Stderr
if (len(os.Getenv("DEBUG")) > 0) || *flDebug {
logrus.SetLevel(logrus.DebugLevel)
}
var newDir, oldDir string
if len(*flNewDir) == 0 {
var err error
newDir, err = ioutil.TempDir("", "docker-test-newDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(newDir)
if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil {
log.Fatal(err)
}
} else {
newDir = *flNewDir
}
if len(*flOldDir) == 0 {
oldDir, err := ioutil.TempDir("", "docker-test-oldDir")
if err != nil {
log.Fatal(err)
}
defer os.RemoveAll(oldDir)
} else {
oldDir = *flOldDir
}
changes, err := archive.ChangesDirs(newDir, oldDir)
if err != nil {
log.Fatal(err)
}
a, err := archive.ExportChanges(newDir, changes)
if err != nil {
log.Fatal(err)
}
defer a.Close()
i, err := io.Copy(os.Stdout, a)
if err != nil && err != io.EOF {
log.Fatal(err)
}
fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i)
}
func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) {
fileData := []byte("fooo")
for n := 0; n < numberOfFiles; n++ {
fileName := fmt.Sprintf("file-%d", n)
if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil {
return 0, err
}
if makeLinks {
if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil {
return 0, err
}
}
}
totalSize := numberOfFiles * len(fileData)
return totalSize, nil
}

View file

@ -1,9 +0,0 @@
y.output
# ignore intellij files
.idea
*.iml
*.ipr
*.iws
*.test

View file

@ -1,13 +0,0 @@
sudo: false
language: go
go:
- 1.x
- tip
branches:
only:
- master
script: make test

View file

@ -1,354 +0,0 @@
Mozilla Public License, version 2.0
1. Definitions
1.1. “Contributor”
means each individual or legal entity that creates, contributes to the
creation of, or owns Covered Software.
1.2. “Contributor Version”
means the combination of the Contributions of others (if any) used by a
Contributor and that particular Contributors Contribution.
1.3. “Contribution”
means Covered Software of a particular Contributor.
1.4. “Covered Software”
means Source Code Form to which the initial Contributor has attached the
notice in Exhibit A, the Executable Form of such Source Code Form, and
Modifications of such Source Code Form, in each case including portions
thereof.
1.5. “Incompatible With Secondary Licenses”
means
a. that the initial Contributor has attached the notice described in
Exhibit B to the Covered Software; or
b. that the Covered Software was made available under the terms of version
1.1 or earlier of the License, but not also under the terms of a
Secondary License.
1.6. “Executable Form”
means any form of the work other than Source Code Form.
1.7. “Larger Work”
means a work that combines Covered Software with other material, in a separate
file or files, that is not Covered Software.
1.8. “License”
means this document.
1.9. “Licensable”
means having the right to grant, to the maximum extent possible, whether at the
time of the initial grant or subsequently, any and all of the rights conveyed by
this License.
1.10. “Modifications”
means any of the following:
a. any file in Source Code Form that results from an addition to, deletion
from, or modification of the contents of Covered Software; or
b. any new file in Source Code Form that contains any Covered Software.
1.11. “Patent Claims” of a Contributor
means any patent claim(s), including without limitation, method, process,
and apparatus claims, in any patent Licensable by such Contributor that
would be infringed, but for the grant of the License, by the making,
using, selling, offering for sale, having made, import, or transfer of
either its Contributions or its Contributor Version.
1.12. “Secondary License”
means either the GNU General Public License, Version 2.0, the GNU Lesser
General Public License, Version 2.1, the GNU Affero General Public
License, Version 3.0, or any later versions of those licenses.
1.13. “Source Code Form”
means the form of the work preferred for making modifications.
1.14. “You” (or “Your”)
means an individual or a legal entity exercising rights under this
License. For legal entities, “You” includes any entity that controls, is
controlled by, or is under common control with You. For purposes of this
definition, “control” means (a) the power, direct or indirect, to cause
the direction or management of such entity, whether by contract or
otherwise, or (b) ownership of more than fifty percent (50%) of the
outstanding shares or beneficial ownership of such entity.
2. License Grants and Conditions
2.1. Grants
Each Contributor hereby grants You a world-wide, royalty-free,
non-exclusive license:
a. under intellectual property rights (other than patent or trademark)
Licensable by such Contributor to use, reproduce, make available,
modify, display, perform, distribute, and otherwise exploit its
Contributions, either on an unmodified basis, with Modifications, or as
part of a Larger Work; and
b. under Patent Claims of such Contributor to make, use, sell, offer for
sale, have made, import, and otherwise transfer either its Contributions
or its Contributor Version.
2.2. Effective Date
The licenses granted in Section 2.1 with respect to any Contribution become
effective for each Contribution on the date the Contributor first distributes
such Contribution.
2.3. Limitations on Grant Scope
The licenses granted in this Section 2 are the only rights granted under this
License. No additional rights or licenses will be implied from the distribution
or licensing of Covered Software under this License. Notwithstanding Section
2.1(b) above, no patent license is granted by a Contributor:
a. for any code that a Contributor has removed from Covered Software; or
b. for infringements caused by: (i) Your and any other third partys
modifications of Covered Software, or (ii) the combination of its
Contributions with other software (except as part of its Contributor
Version); or
c. under Patent Claims infringed by Covered Software in the absence of its
Contributions.
This License does not grant any rights in the trademarks, service marks, or
logos of any Contributor (except as may be necessary to comply with the
notice requirements in Section 3.4).
2.4. Subsequent Licenses
No Contributor makes additional grants as a result of Your choice to
distribute the Covered Software under a subsequent version of this License
(see Section 10.2) or under the terms of a Secondary License (if permitted
under the terms of Section 3.3).
2.5. Representation
Each Contributor represents that the Contributor believes its Contributions
are its original creation(s) or it has sufficient rights to grant the
rights to its Contributions conveyed by this License.
2.6. Fair Use
This License is not intended to limit any rights You have under applicable
copyright doctrines of fair use, fair dealing, or other equivalents.
2.7. Conditions
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
Section 2.1.
3. Responsibilities
3.1. Distribution of Source Form
All distribution of Covered Software in Source Code Form, including any
Modifications that You create or to which You contribute, must be under the
terms of this License. You must inform recipients that the Source Code Form
of the Covered Software is governed by the terms of this License, and how
they can obtain a copy of this License. You may not attempt to alter or
restrict the recipients rights in the Source Code Form.
3.2. Distribution of Executable Form
If You distribute Covered Software in Executable Form then:
a. such Covered Software must also be made available in Source Code Form,
as described in Section 3.1, and You must inform recipients of the
Executable Form how they can obtain a copy of such Source Code Form by
reasonable means in a timely manner, at a charge no more than the cost
of distribution to the recipient; and
b. You may distribute such Executable Form under the terms of this License,
or sublicense it under different terms, provided that the license for
the Executable Form does not attempt to limit or alter the recipients
rights in the Source Code Form under this License.
3.3. Distribution of a Larger Work
You may create and distribute a Larger Work under terms of Your choice,
provided that You also comply with the requirements of this License for the
Covered Software. If the Larger Work is a combination of Covered Software
with a work governed by one or more Secondary Licenses, and the Covered
Software is not Incompatible With Secondary Licenses, this License permits
You to additionally distribute such Covered Software under the terms of
such Secondary License(s), so that the recipient of the Larger Work may, at
their option, further distribute the Covered Software under the terms of
either this License or such Secondary License(s).
3.4. Notices
You may not remove or alter the substance of any license notices (including
copyright notices, patent notices, disclaimers of warranty, or limitations
of liability) contained within the Source Code Form of the Covered
Software, except that You may alter any license notices to the extent
required to remedy known factual inaccuracies.
3.5. Application of Additional Terms
You may choose to offer, and to charge a fee for, warranty, support,
indemnity or liability obligations to one or more recipients of Covered
Software. However, You may do so only on Your own behalf, and not on behalf
of any Contributor. You must make it absolutely clear that any such
warranty, support, indemnity, or liability obligation is offered by You
alone, and You hereby agree to indemnify every Contributor for any
liability incurred by such Contributor as a result of warranty, support,
indemnity or liability terms You offer. You may include additional
disclaimers of warranty and limitations of liability specific to any
jurisdiction.
4. Inability to Comply Due to Statute or Regulation
If it is impossible for You to comply with any of the terms of this License
with respect to some or all of the Covered Software due to statute, judicial
order, or regulation then You must: (a) comply with the terms of this License
to the maximum extent possible; and (b) describe the limitations and the code
they affect. Such description must be placed in a text file included with all
distributions of the Covered Software under this License. Except to the
extent prohibited by statute or regulation, such description must be
sufficiently detailed for a recipient of ordinary skill to be able to
understand it.
5. Termination
5.1. The rights granted under this License will terminate automatically if You
fail to comply with any of its terms. However, if You become compliant,
then the rights granted under this License from a particular Contributor
are reinstated (a) provisionally, unless and until such Contributor
explicitly and finally terminates Your grants, and (b) on an ongoing basis,
if such Contributor fails to notify You of the non-compliance by some
reasonable means prior to 60 days after You have come back into compliance.
Moreover, Your grants from a particular Contributor are reinstated on an
ongoing basis if such Contributor notifies You of the non-compliance by
some reasonable means, this is the first time You have received notice of
non-compliance with this License from such Contributor, and You become
compliant prior to 30 days after Your receipt of the notice.
5.2. If You initiate litigation against any entity by asserting a patent
infringement claim (excluding declaratory judgment actions, counter-claims,
and cross-claims) alleging that a Contributor Version directly or
indirectly infringes any patent, then the rights granted to You by any and
all Contributors for the Covered Software under Section 2.1 of this License
shall terminate.
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
license agreements (excluding distributors and resellers) which have been
validly granted by You or Your distributors under this License prior to
termination shall survive termination.
6. Disclaimer of Warranty
Covered Software is provided under this License on an “as is” basis, without
warranty of any kind, either expressed, implied, or statutory, including,
without limitation, warranties that the Covered Software is free of defects,
merchantable, fit for a particular purpose or non-infringing. The entire
risk as to the quality and performance of the Covered Software is with You.
Should any Covered Software prove defective in any respect, You (not any
Contributor) assume the cost of any necessary servicing, repair, or
correction. This disclaimer of warranty constitutes an essential part of this
License. No use of any Covered Software is authorized under this License
except under this disclaimer.
7. Limitation of Liability
Under no circumstances and under no legal theory, whether tort (including
negligence), contract, or otherwise, shall any Contributor, or anyone who
distributes Covered Software as permitted above, be liable to You for any
direct, indirect, special, incidental, or consequential damages of any
character including, without limitation, damages for lost profits, loss of
goodwill, work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses, even if such party shall have been
informed of the possibility of such damages. This limitation of liability
shall not apply to liability for death or personal injury resulting from such
partys negligence to the extent applicable law prohibits such limitation.
Some jurisdictions do not allow the exclusion or limitation of incidental or
consequential damages, so this exclusion and limitation may not apply to You.
8. Litigation
Any litigation relating to this License may be brought only in the courts of
a jurisdiction where the defendant maintains its principal place of business
and such litigation shall be governed by laws of that jurisdiction, without
reference to its conflict-of-law provisions. Nothing in this Section shall
prevent a partys ability to bring cross-claims or counter-claims.
9. Miscellaneous
This License represents the complete agreement concerning the subject matter
hereof. If any provision of this License is held to be unenforceable, such
provision shall be reformed only to the extent necessary to make it
enforceable. Any law or regulation which provides that the language of a
contract shall be construed against the drafter shall not be used to construe
this License against a Contributor.
10. Versions of the License
10.1. New Versions
Mozilla Foundation is the license steward. Except as provided in Section
10.3, no one other than the license steward has the right to modify or
publish new versions of this License. Each version will be given a
distinguishing version number.
10.2. Effect of New Versions
You may distribute the Covered Software under the terms of the version of
the License under which You originally received the Covered Software, or
under the terms of any subsequent version published by the license
steward.
10.3. Modified Versions
If you create software not governed by this License, and you want to
create a new license for such software, you may create and use a modified
version of this License if you rename the license and remove any
references to the name of the license steward (except to note that such
modified license differs from this License).
10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses
If You choose to distribute Source Code Form that is Incompatible With
Secondary Licenses under the terms of this version of the License, the
notice described in Exhibit B of this License must be attached.
Exhibit A - Source Code Form License Notice
This Source Code Form is subject to the
terms of the Mozilla Public License, v.
2.0. If a copy of the MPL was not
distributed with this file, You can
obtain one at
http://mozilla.org/MPL/2.0/.
If it is not possible or desirable to put the notice in a particular file, then
You may include the notice in a location (such as a LICENSE file in a relevant
directory) where a recipient would be likely to look for such a notice.
You may add additional accurate notices of copyright ownership.
Exhibit B - “Incompatible With Secondary Licenses” Notice
This Source Code Form is “Incompatible
With Secondary Licenses”, as defined by
the Mozilla Public License, v. 2.0.

View file

@ -1,18 +0,0 @@
TEST?=./...
default: test
fmt: generate
go fmt ./...
test: generate
go get -t ./...
go test $(TEST) $(TESTARGS)
generate:
go generate ./...
updatedeps:
go get -u golang.org/x/tools/cmd/stringer
.PHONY: default generate test updatedeps

View file

@ -1,125 +0,0 @@
# HCL
[![GoDoc](https://godoc.org/github.com/hashicorp/hcl?status.png)](https://godoc.org/github.com/hashicorp/hcl) [![Build Status](https://travis-ci.org/hashicorp/hcl.svg?branch=master)](https://travis-ci.org/hashicorp/hcl)
HCL (HashiCorp Configuration Language) is a configuration language built
by HashiCorp. The goal of HCL is to build a structured configuration language
that is both human and machine friendly for use with command-line tools, but
specifically targeted towards DevOps tools, servers, etc.
HCL is also fully JSON compatible. That is, JSON can be used as completely
valid input to a system expecting HCL. This helps makes systems
interoperable with other systems.
HCL is heavily inspired by
[libucl](https://github.com/vstakhov/libucl),
nginx configuration, and others similar.
## Why?
A common question when viewing HCL is to ask the question: why not
JSON, YAML, etc.?
Prior to HCL, the tools we built at [HashiCorp](http://www.hashicorp.com)
used a variety of configuration languages from full programming languages
such as Ruby to complete data structure languages such as JSON. What we
learned is that some people wanted human-friendly configuration languages
and some people wanted machine-friendly languages.
JSON fits a nice balance in this, but is fairly verbose and most
importantly doesn't support comments. With YAML, we found that beginners
had a really hard time determining what the actual structure was, and
ended up guessing more often than not whether to use a hyphen, colon, etc.
in order to represent some configuration key.
Full programming languages such as Ruby enable complex behavior
a configuration language shouldn't usually allow, and also forces
people to learn some set of Ruby.
Because of this, we decided to create our own configuration language
that is JSON-compatible. Our configuration language (HCL) is designed
to be written and modified by humans. The API for HCL allows JSON
as an input so that it is also machine-friendly (machines can generate
JSON instead of trying to generate HCL).
Our goal with HCL is not to alienate other configuration languages.
It is instead to provide HCL as a specialized language for our tools,
and JSON as the interoperability layer.
## Syntax
For a complete grammar, please see the parser itself. A high-level overview
of the syntax and grammar is listed here.
* Single line comments start with `#` or `//`
* Multi-line comments are wrapped in `/*` and `*/`. Nested block comments
are not allowed. A multi-line comment (also known as a block comment)
terminates at the first `*/` found.
* Values are assigned with the syntax `key = value` (whitespace doesn't
matter). The value can be any primitive: a string, number, boolean,
object, or list.
* Strings are double-quoted and can contain any UTF-8 characters.
Example: `"Hello, World"`
* Multi-line strings start with `<<EOF` at the end of a line, and end
with `EOF` on its own line ([here documents](https://en.wikipedia.org/wiki/Here_document)).
Any text may be used in place of `EOF`. Example:
```
<<FOO
hello
world
FOO
```
* Numbers are assumed to be base 10. If you prefix a number with 0x,
it is treated as a hexadecimal. If it is prefixed with 0, it is
treated as an octal. Numbers can be in scientific notation: "1e10".
* Boolean values: `true`, `false`
* Arrays can be made by wrapping it in `[]`. Example:
`["foo", "bar", 42]`. Arrays can contain primitives,
other arrays, and objects. As an alternative, lists
of objects can be created with repeated blocks, using
this structure:
```hcl
service {
key = "value"
}
service {
key = "value"
}
```
Objects and nested objects are created using the structure shown below:
```
variable "ami" {
description = "the AMI to use"
}
```
This would be equivalent to the following json:
``` json
{
"variable": {
"ami": {
"description": "the AMI to use"
}
}
}
```
## Thanks
Thanks to:
* [@vstakhov](https://github.com/vstakhov) - The original libucl parser
and syntax that HCL was based off of.
* [@fatih](https://github.com/fatih) - The rewritten HCL parser
in pure Go (no goyacc) and support for a printer.

View file

@ -1,19 +0,0 @@
version: "build-{branch}-{build}"
image: Visual Studio 2015
clone_folder: c:\gopath\src\github.com\hashicorp\hcl
environment:
GOPATH: c:\gopath
init:
- git config --global core.autocrlf false
install:
- cmd: >-
echo %Path%
go version
go env
go get -t ./...
build_script:
- cmd: go test -v ./...

View file

@ -1,729 +0,0 @@
package hcl
import (
"errors"
"fmt"
"reflect"
"sort"
"strconv"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/parser"
"github.com/hashicorp/hcl/hcl/token"
)
// This is the tag to use with structures to have settings for HCL
const tagName = "hcl"
var (
// nodeType holds a reference to the type of ast.Node
nodeType reflect.Type = findNodeType()
)
// Unmarshal accepts a byte slice as input and writes the
// data to the value pointed to by v.
func Unmarshal(bs []byte, v interface{}) error {
root, err := parse(bs)
if err != nil {
return err
}
return DecodeObject(v, root)
}
// Decode reads the given input and decodes it into the structure
// given by `out`.
func Decode(out interface{}, in string) error {
obj, err := Parse(in)
if err != nil {
return err
}
return DecodeObject(out, obj)
}
// DecodeObject is a lower-level version of Decode. It decodes a
// raw Object into the given output.
func DecodeObject(out interface{}, n ast.Node) error {
val := reflect.ValueOf(out)
if val.Kind() != reflect.Ptr {
return errors.New("result must be a pointer")
}
// If we have the file, we really decode the root node
if f, ok := n.(*ast.File); ok {
n = f.Node
}
var d decoder
return d.decode("root", n, val.Elem())
}
type decoder struct {
stack []reflect.Kind
}
func (d *decoder) decode(name string, node ast.Node, result reflect.Value) error {
k := result
// If we have an interface with a valid value, we use that
// for the check.
if result.Kind() == reflect.Interface {
elem := result.Elem()
if elem.IsValid() {
k = elem
}
}
// Push current onto stack unless it is an interface.
if k.Kind() != reflect.Interface {
d.stack = append(d.stack, k.Kind())
// Schedule a pop
defer func() {
d.stack = d.stack[:len(d.stack)-1]
}()
}
switch k.Kind() {
case reflect.Bool:
return d.decodeBool(name, node, result)
case reflect.Float32, reflect.Float64:
return d.decodeFloat(name, node, result)
case reflect.Int, reflect.Int32, reflect.Int64:
return d.decodeInt(name, node, result)
case reflect.Interface:
// When we see an interface, we make our own thing
return d.decodeInterface(name, node, result)
case reflect.Map:
return d.decodeMap(name, node, result)
case reflect.Ptr:
return d.decodePtr(name, node, result)
case reflect.Slice:
return d.decodeSlice(name, node, result)
case reflect.String:
return d.decodeString(name, node, result)
case reflect.Struct:
return d.decodeStruct(name, node, result)
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown kind to decode into: %s", name, k.Kind()),
}
}
}
func (d *decoder) decodeBool(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
if n.Token.Type == token.BOOL {
v, err := strconv.ParseBool(n.Token.Text)
if err != nil {
return err
}
result.Set(reflect.ValueOf(v))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeFloat(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
if n.Token.Type == token.FLOAT || n.Token.Type == token.NUMBER {
v, err := strconv.ParseFloat(n.Token.Text, 64)
if err != nil {
return err
}
result.Set(reflect.ValueOf(v).Convert(result.Type()))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeInt(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
switch n.Token.Type {
case token.NUMBER:
v, err := strconv.ParseInt(n.Token.Text, 0, 0)
if err != nil {
return err
}
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil
case token.STRING:
v, err := strconv.ParseInt(n.Token.Value().(string), 0, 0)
if err != nil {
return err
}
if result.Kind() == reflect.Interface {
result.Set(reflect.ValueOf(int(v)))
} else {
result.SetInt(v)
}
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type %T", name, node),
}
}
func (d *decoder) decodeInterface(name string, node ast.Node, result reflect.Value) error {
// When we see an ast.Node, we retain the value to enable deferred decoding.
// Very useful in situations where we want to preserve ast.Node information
// like Pos
if result.Type() == nodeType && result.CanSet() {
result.Set(reflect.ValueOf(node))
return nil
}
var set reflect.Value
redecode := true
// For testing types, ObjectType should just be treated as a list. We
// set this to a temporary var because we want to pass in the real node.
testNode := node
if ot, ok := node.(*ast.ObjectType); ok {
testNode = ot.List
}
switch n := testNode.(type) {
case *ast.ObjectList:
// If we're at the root or we're directly within a slice, then we
// decode objects into map[string]interface{}, otherwise we decode
// them into lists.
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
var temp map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeMap(
reflect.MapOf(
reflect.TypeOf(""),
tempVal.Type().Elem()))
set = result
} else {
var temp []map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, len(n.Items))
set = result
}
case *ast.ObjectType:
// If we're at the root or we're directly within a slice, then we
// decode objects into map[string]interface{}, otherwise we decode
// them into lists.
if len(d.stack) == 0 || d.stack[len(d.stack)-1] == reflect.Slice {
var temp map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeMap(
reflect.MapOf(
reflect.TypeOf(""),
tempVal.Type().Elem()))
set = result
} else {
var temp []map[string]interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, 1)
set = result
}
case *ast.ListType:
var temp []interface{}
tempVal := reflect.ValueOf(temp)
result := reflect.MakeSlice(
reflect.SliceOf(tempVal.Type().Elem()), 0, 0)
set = result
case *ast.LiteralType:
switch n.Token.Type {
case token.BOOL:
var result bool
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.FLOAT:
var result float64
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.NUMBER:
var result int
set = reflect.Indirect(reflect.New(reflect.TypeOf(result)))
case token.STRING, token.HEREDOC:
set = reflect.Indirect(reflect.New(reflect.TypeOf("")))
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: cannot decode into interface: %T", name, node),
}
}
default:
return fmt.Errorf(
"%s: cannot decode into interface: %T",
name, node)
}
// Set the result to what its supposed to be, then reset
// result so we don't reflect into this method anymore.
result.Set(set)
if redecode {
// Revisit the node so that we can use the newly instantiated
// thing and populate it.
if err := d.decode(name, node, result); err != nil {
return err
}
}
return nil
}
func (d *decoder) decodeMap(name string, node ast.Node, result reflect.Value) error {
if item, ok := node.(*ast.ObjectItem); ok {
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
}
if ot, ok := node.(*ast.ObjectType); ok {
node = ot.List
}
n, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: not an object type for map (%T)", name, node),
}
}
// If we have an interface, then we can address the interface,
// but not the slice itself, so get the element but set the interface
set := result
if result.Kind() == reflect.Interface {
result = result.Elem()
}
resultType := result.Type()
resultElemType := resultType.Elem()
resultKeyType := resultType.Key()
if resultKeyType.Kind() != reflect.String {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: map must have string keys", name),
}
}
// Make a map if it is nil
resultMap := result
if result.IsNil() {
resultMap = reflect.MakeMap(
reflect.MapOf(resultKeyType, resultElemType))
}
// Go through each element and decode it.
done := make(map[string]struct{})
for _, item := range n.Items {
if item.Val == nil {
continue
}
// github.com/hashicorp/terraform/issue/5740
if len(item.Keys) == 0 {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: map must have string keys", name),
}
}
// Get the key we're dealing with, which is the first item
keyStr := item.Keys[0].Token.Value().(string)
// If we've already processed this key, then ignore it
if _, ok := done[keyStr]; ok {
continue
}
// Determine the value. If we have more than one key, then we
// get the objectlist of only these keys.
itemVal := item.Val
if len(item.Keys) > 1 {
itemVal = n.Filter(keyStr)
done[keyStr] = struct{}{}
}
// Make the field name
fieldName := fmt.Sprintf("%s.%s", name, keyStr)
// Get the key/value as reflection values
key := reflect.ValueOf(keyStr)
val := reflect.Indirect(reflect.New(resultElemType))
// If we have a pre-existing value in the map, use that
oldVal := resultMap.MapIndex(key)
if oldVal.IsValid() {
val.Set(oldVal)
}
// Decode!
if err := d.decode(fieldName, itemVal, val); err != nil {
return err
}
// Set the value on the map
resultMap.SetMapIndex(key, val)
}
// Set the final map if we can
set.Set(resultMap)
return nil
}
func (d *decoder) decodePtr(name string, node ast.Node, result reflect.Value) error {
// Create an element of the concrete (non pointer) type and decode
// into that. Then set the value of the pointer to this type.
resultType := result.Type()
resultElemType := resultType.Elem()
val := reflect.New(resultElemType)
if err := d.decode(name, node, reflect.Indirect(val)); err != nil {
return err
}
result.Set(val)
return nil
}
func (d *decoder) decodeSlice(name string, node ast.Node, result reflect.Value) error {
// If we have an interface, then we can address the interface,
// but not the slice itself, so get the element but set the interface
set := result
if result.Kind() == reflect.Interface {
result = result.Elem()
}
// Create the slice if it isn't nil
resultType := result.Type()
resultElemType := resultType.Elem()
if result.IsNil() {
resultSliceType := reflect.SliceOf(resultElemType)
result = reflect.MakeSlice(
resultSliceType, 0, 0)
}
// Figure out the items we'll be copying into the slice
var items []ast.Node
switch n := node.(type) {
case *ast.ObjectList:
items = make([]ast.Node, len(n.Items))
for i, item := range n.Items {
items[i] = item
}
case *ast.ObjectType:
items = []ast.Node{n}
case *ast.ListType:
items = n.List
default:
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("unknown slice type: %T", node),
}
}
for i, item := range items {
fieldName := fmt.Sprintf("%s[%d]", name, i)
// Decode
val := reflect.Indirect(reflect.New(resultElemType))
// if item is an object that was decoded from ambiguous JSON and
// flattened, make sure it's expanded if it needs to decode into a
// defined structure.
item := expandObject(item, val)
if err := d.decode(fieldName, item, val); err != nil {
return err
}
// Append it onto the slice
result = reflect.Append(result, val)
}
set.Set(result)
return nil
}
// expandObject detects if an ambiguous JSON object was flattened to a List which
// should be decoded into a struct, and expands the ast to properly deocode.
func expandObject(node ast.Node, result reflect.Value) ast.Node {
item, ok := node.(*ast.ObjectItem)
if !ok {
return node
}
elemType := result.Type()
// our target type must be a struct
switch elemType.Kind() {
case reflect.Ptr:
switch elemType.Elem().Kind() {
case reflect.Struct:
//OK
default:
return node
}
case reflect.Struct:
//OK
default:
return node
}
// A list value will have a key and field name. If it had more fields,
// it wouldn't have been flattened.
if len(item.Keys) != 2 {
return node
}
keyToken := item.Keys[0].Token
item.Keys = item.Keys[1:]
// we need to un-flatten the ast enough to decode
newNode := &ast.ObjectItem{
Keys: []*ast.ObjectKey{
&ast.ObjectKey{
Token: keyToken,
},
},
Val: &ast.ObjectType{
List: &ast.ObjectList{
Items: []*ast.ObjectItem{item},
},
},
}
return newNode
}
func (d *decoder) decodeString(name string, node ast.Node, result reflect.Value) error {
switch n := node.(type) {
case *ast.LiteralType:
switch n.Token.Type {
case token.NUMBER:
result.Set(reflect.ValueOf(n.Token.Text).Convert(result.Type()))
return nil
case token.STRING, token.HEREDOC:
result.Set(reflect.ValueOf(n.Token.Value()).Convert(result.Type()))
return nil
}
}
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unknown type for string %T", name, node),
}
}
func (d *decoder) decodeStruct(name string, node ast.Node, result reflect.Value) error {
var item *ast.ObjectItem
if it, ok := node.(*ast.ObjectItem); ok {
item = it
node = it.Val
}
if ot, ok := node.(*ast.ObjectType); ok {
node = ot.List
}
// Handle the special case where the object itself is a literal. Previously
// the yacc parser would always ensure top-level elements were arrays. The new
// parser does not make the same guarantees, thus we need to convert any
// top-level literal elements into a list.
if _, ok := node.(*ast.LiteralType); ok && item != nil {
node = &ast.ObjectList{Items: []*ast.ObjectItem{item}}
}
list, ok := node.(*ast.ObjectList)
if !ok {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: not an object type for struct (%T)", name, node),
}
}
// This slice will keep track of all the structs we'll be decoding.
// There can be more than one struct if there are embedded structs
// that are squashed.
structs := make([]reflect.Value, 1, 5)
structs[0] = result
// Compile the list of all the fields that we're going to be decoding
// from all the structs.
type field struct {
field reflect.StructField
val reflect.Value
}
fields := []field{}
for len(structs) > 0 {
structVal := structs[0]
structs = structs[1:]
structType := structVal.Type()
for i := 0; i < structType.NumField(); i++ {
fieldType := structType.Field(i)
tagParts := strings.Split(fieldType.Tag.Get(tagName), ",")
// Ignore fields with tag name "-"
if tagParts[0] == "-" {
continue
}
if fieldType.Anonymous {
fieldKind := fieldType.Type.Kind()
if fieldKind != reflect.Struct {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: unsupported type to struct: %s",
fieldType.Name, fieldKind),
}
}
// We have an embedded field. We "squash" the fields down
// if specified in the tag.
squash := false
for _, tag := range tagParts[1:] {
if tag == "squash" {
squash = true
break
}
}
if squash {
structs = append(
structs, result.FieldByName(fieldType.Name))
continue
}
}
// Normal struct field, store it away
fields = append(fields, field{fieldType, structVal.Field(i)})
}
}
usedKeys := make(map[string]struct{})
decodedFields := make([]string, 0, len(fields))
decodedFieldsVal := make([]reflect.Value, 0)
unusedKeysVal := make([]reflect.Value, 0)
for _, f := range fields {
field, fieldValue := f.field, f.val
if !fieldValue.IsValid() {
// This should never happen
panic("field is not valid")
}
// If we can't set the field, then it is unexported or something,
// and we just continue onwards.
if !fieldValue.CanSet() {
continue
}
fieldName := field.Name
tagValue := field.Tag.Get(tagName)
tagParts := strings.SplitN(tagValue, ",", 2)
if len(tagParts) >= 2 {
switch tagParts[1] {
case "decodedFields":
decodedFieldsVal = append(decodedFieldsVal, fieldValue)
continue
case "key":
if item == nil {
return &parser.PosError{
Pos: node.Pos(),
Err: fmt.Errorf("%s: %s asked for 'key', impossible",
name, fieldName),
}
}
fieldValue.SetString(item.Keys[0].Token.Value().(string))
continue
case "unusedKeys":
unusedKeysVal = append(unusedKeysVal, fieldValue)
continue
}
}
if tagParts[0] != "" {
fieldName = tagParts[0]
}
// Determine the element we'll use to decode. If it is a single
// match (only object with the field), then we decode it exactly.
// If it is a prefix match, then we decode the matches.
filter := list.Filter(fieldName)
prefixMatches := filter.Children()
matches := filter.Elem()
if len(matches.Items) == 0 && len(prefixMatches.Items) == 0 {
continue
}
// Track the used key
usedKeys[fieldName] = struct{}{}
// Create the field name and decode. We range over the elements
// because we actually want the value.
fieldName = fmt.Sprintf("%s.%s", name, fieldName)
if len(prefixMatches.Items) > 0 {
if err := d.decode(fieldName, prefixMatches, fieldValue); err != nil {
return err
}
}
for _, match := range matches.Items {
var decodeNode ast.Node = match.Val
if ot, ok := decodeNode.(*ast.ObjectType); ok {
decodeNode = &ast.ObjectList{Items: ot.List.Items}
}
if err := d.decode(fieldName, decodeNode, fieldValue); err != nil {
return err
}
}
decodedFields = append(decodedFields, field.Name)
}
if len(decodedFieldsVal) > 0 {
// Sort it so that it is deterministic
sort.Strings(decodedFields)
for _, v := range decodedFieldsVal {
v.Set(reflect.ValueOf(decodedFields))
}
}
return nil
}
// findNodeType returns the type of ast.Node
func findNodeType() reflect.Type {
var nodeContainer struct {
Node ast.Node
}
value := reflect.ValueOf(nodeContainer).FieldByName("Node")
return value.Type()
}

View file

@ -1,3 +0,0 @@
module github.com/hashicorp/hcl
require github.com/davecgh/go-spew v1.1.1

View file

@ -1,2 +0,0 @@
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View file

@ -1,11 +0,0 @@
// Package hcl decodes HCL into usable Go structures.
//
// hcl input can come in either pure HCL format or JSON format.
// It can be parsed into an AST, and then decoded into a structure,
// or it can be decoded directly from a string into a structure.
//
// If you choose to parse HCL into a raw AST, the benefit is that you
// can write custom visitor implementations to implement custom
// semantic checks. By default, HCL does not perform any semantic
// checks.
package hcl

View file

@ -1,219 +0,0 @@
// Package ast declares the types used to represent syntax trees for HCL
// (HashiCorp Configuration Language)
package ast
import (
"fmt"
"strings"
"github.com/hashicorp/hcl/hcl/token"
)
// Node is an element in the abstract syntax tree.
type Node interface {
node()
Pos() token.Pos
}
func (File) node() {}
func (ObjectList) node() {}
func (ObjectKey) node() {}
func (ObjectItem) node() {}
func (Comment) node() {}
func (CommentGroup) node() {}
func (ObjectType) node() {}
func (LiteralType) node() {}
func (ListType) node() {}
// File represents a single HCL file
type File struct {
Node Node // usually a *ObjectList
Comments []*CommentGroup // list of all comments in the source
}
func (f *File) Pos() token.Pos {
return f.Node.Pos()
}
// ObjectList represents a list of ObjectItems. An HCL file itself is an
// ObjectList.
type ObjectList struct {
Items []*ObjectItem
}
func (o *ObjectList) Add(item *ObjectItem) {
o.Items = append(o.Items, item)
}
// Filter filters out the objects with the given key list as a prefix.
//
// The returned list of objects contain ObjectItems where the keys have
// this prefix already stripped off. This might result in objects with
// zero-length key lists if they have no children.
//
// If no matches are found, an empty ObjectList (non-nil) is returned.
func (o *ObjectList) Filter(keys ...string) *ObjectList {
var result ObjectList
for _, item := range o.Items {
// If there aren't enough keys, then ignore this
if len(item.Keys) < len(keys) {
continue
}
match := true
for i, key := range item.Keys[:len(keys)] {
key := key.Token.Value().(string)
if key != keys[i] && !strings.EqualFold(key, keys[i]) {
match = false
break
}
}
if !match {
continue
}
// Strip off the prefix from the children
newItem := *item
newItem.Keys = newItem.Keys[len(keys):]
result.Add(&newItem)
}
return &result
}
// Children returns further nested objects (key length > 0) within this
// ObjectList. This should be used with Filter to get at child items.
func (o *ObjectList) Children() *ObjectList {
var result ObjectList
for _, item := range o.Items {
if len(item.Keys) > 0 {
result.Add(item)
}
}
return &result
}
// Elem returns items in the list that are direct element assignments
// (key length == 0). This should be used with Filter to get at elements.
func (o *ObjectList) Elem() *ObjectList {
var result ObjectList
for _, item := range o.Items {
if len(item.Keys) == 0 {
result.Add(item)
}
}
return &result
}
func (o *ObjectList) Pos() token.Pos {
// always returns the uninitiliazed position
return o.Items[0].Pos()
}
// ObjectItem represents a HCL Object Item. An item is represented with a key
// (or keys). It can be an assignment or an object (both normal and nested)
type ObjectItem struct {
// keys is only one length long if it's of type assignment. If it's a
// nested object it can be larger than one. In that case "assign" is
// invalid as there is no assignments for a nested object.
Keys []*ObjectKey
// assign contains the position of "=", if any
Assign token.Pos
// val is the item itself. It can be an object,list, number, bool or a
// string. If key length is larger than one, val can be only of type
// Object.
Val Node
LeadComment *CommentGroup // associated lead comment
LineComment *CommentGroup // associated line comment
}
func (o *ObjectItem) Pos() token.Pos {
// I'm not entirely sure what causes this, but removing this causes
// a test failure. We should investigate at some point.
if len(o.Keys) == 0 {
return token.Pos{}
}
return o.Keys[0].Pos()
}
// ObjectKeys are either an identifier or of type string.
type ObjectKey struct {
Token token.Token
}
func (o *ObjectKey) Pos() token.Pos {
return o.Token.Pos
}
// LiteralType represents a literal of basic type. Valid types are:
// token.NUMBER, token.FLOAT, token.BOOL and token.STRING
type LiteralType struct {
Token token.Token
// comment types, only used when in a list
LeadComment *CommentGroup
LineComment *CommentGroup
}
func (l *LiteralType) Pos() token.Pos {
return l.Token.Pos
}
// ListStatement represents a HCL List type
type ListType struct {
Lbrack token.Pos // position of "["
Rbrack token.Pos // position of "]"
List []Node // the elements in lexical order
}
func (l *ListType) Pos() token.Pos {
return l.Lbrack
}
func (l *ListType) Add(node Node) {
l.List = append(l.List, node)
}
// ObjectType represents a HCL Object Type
type ObjectType struct {
Lbrace token.Pos // position of "{"
Rbrace token.Pos // position of "}"
List *ObjectList // the nodes in lexical order
}
func (o *ObjectType) Pos() token.Pos {
return o.Lbrace
}
// Comment node represents a single //, # style or /*- style commment
type Comment struct {
Start token.Pos // position of / or #
Text string
}
func (c *Comment) Pos() token.Pos {
return c.Start
}
// CommentGroup node represents a sequence of comments with no other tokens and
// no empty lines between.
type CommentGroup struct {
List []*Comment // len(List) > 0
}
func (c *CommentGroup) Pos() token.Pos {
return c.List[0].Pos()
}
//-------------------------------------------------------------------
// GoStringer
//-------------------------------------------------------------------
func (o *ObjectKey) GoString() string { return fmt.Sprintf("*%#v", *o) }
func (o *ObjectList) GoString() string { return fmt.Sprintf("*%#v", *o) }

View file

@ -1,52 +0,0 @@
package ast
import "fmt"
// WalkFunc describes a function to be called for each node during a Walk. The
// returned node can be used to rewrite the AST. Walking stops the returned
// bool is false.
type WalkFunc func(Node) (Node, bool)
// Walk traverses an AST in depth-first order: It starts by calling fn(node);
// node must not be nil. If fn returns true, Walk invokes fn recursively for
// each of the non-nil children of node, followed by a call of fn(nil). The
// returned node of fn can be used to rewrite the passed node to fn.
func Walk(node Node, fn WalkFunc) Node {
rewritten, ok := fn(node)
if !ok {
return rewritten
}
switch n := node.(type) {
case *File:
n.Node = Walk(n.Node, fn)
case *ObjectList:
for i, item := range n.Items {
n.Items[i] = Walk(item, fn).(*ObjectItem)
}
case *ObjectKey:
// nothing to do
case *ObjectItem:
for i, k := range n.Keys {
n.Keys[i] = Walk(k, fn).(*ObjectKey)
}
if n.Val != nil {
n.Val = Walk(n.Val, fn)
}
case *LiteralType:
// nothing to do
case *ListType:
for i, l := range n.List {
n.List[i] = Walk(l, fn)
}
case *ObjectType:
n.List = Walk(n.List, fn).(*ObjectList)
default:
// should we panic here?
fmt.Printf("unknown type: %T\n", n)
}
fn(nil)
return rewritten
}

View file

@ -1,17 +0,0 @@
package parser
import (
"fmt"
"github.com/hashicorp/hcl/hcl/token"
)
// PosError is a parse error that contains a position.
type PosError struct {
Pos token.Pos
Err error
}
func (e *PosError) Error() string {
return fmt.Sprintf("At %s: %s", e.Pos, e.Err)
}

View file

@ -1,532 +0,0 @@
// Package parser implements a parser for HCL (HashiCorp Configuration
// Language)
package parser
import (
"bytes"
"errors"
"fmt"
"strings"
"github.com/hashicorp/hcl/hcl/ast"
"github.com/hashicorp/hcl/hcl/scanner"
"github.com/hashicorp/hcl/hcl/token"
)
type Parser struct {
sc *scanner.Scanner
// Last read token
tok token.Token
commaPrev token.Token
comments []*ast.CommentGroup
leadComment *ast.CommentGroup // last lead comment
lineComment *ast.CommentGroup // last line comment
enableTrace bool
indent int
n int // buffer size (max = 1)
}
func newParser(src []byte) *Parser {
return &Parser{
sc: scanner.New(src),
}
}
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
// normalize all line endings
// since the scanner and output only work with "\n" line endings, we may
// end up with dangling "\r" characters in the parsed data.
src = bytes.Replace(src, []byte("\r\n"), []byte("\n"), -1)
p := newParser(src)
return p.Parse()
}
var errEofToken = errors.New("EOF token found")
// Parse returns the fully parsed source and returns the abstract syntax tree.
func (p *Parser) Parse() (*ast.File, error) {
f := &ast.File{}
var err, scerr error
p.sc.Error = func(pos token.Pos, msg string) {
scerr = &PosError{Pos: pos, Err: errors.New(msg)}
}
f.Node, err = p.objectList(false)
if scerr != nil {
return nil, scerr
}
if err != nil {
return nil, err
}
f.Comments = p.comments
return f, nil
}
// objectList parses a list of items within an object (generally k/v pairs).
// The parameter" obj" tells this whether to we are within an object (braces:
// '{', '}') or just at the top level. If we're within an object, we end
// at an RBRACE.
func (p *Parser) objectList(obj bool) (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
if obj {
tok := p.scan()
p.unscan()
if tok.Type == token.RBRACE {
break
}
}
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
}
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
// object lists can be optionally comma-delimited e.g. when a list of maps
// is being expressed, so a comma is allowed here - it's simply consumed
tok := p.scan()
if tok.Type != token.COMMA {
p.unscan()
}
}
return node, nil
}
func (p *Parser) consumeComment() (comment *ast.Comment, endline int) {
endline = p.tok.Pos.Line
// count the endline if it's multiline comment, ie starting with /*
if len(p.tok.Text) > 1 && p.tok.Text[1] == '*' {
// don't use range here - no need to decode Unicode code points
for i := 0; i < len(p.tok.Text); i++ {
if p.tok.Text[i] == '\n' {
endline++
}
}
}
comment = &ast.Comment{Start: p.tok.Pos, Text: p.tok.Text}
p.tok = p.sc.Scan()
return
}
func (p *Parser) consumeCommentGroup(n int) (comments *ast.CommentGroup, endline int) {
var list []*ast.Comment
endline = p.tok.Pos.Line
for p.tok.Type == token.COMMENT && p.tok.Pos.Line <= endline+n {
var comment *ast.Comment
comment, endline = p.consumeComment()
list = append(list, comment)
}
// add comment group to the comments list
comments = &ast.CommentGroup{List: list}
p.comments = append(p.comments, comments)
return
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if len(keys) > 0 && err == errEofToken {
// We ignore eof token here since it is an error if we didn't
// receive a value (but we did receive a key) for the item.
err = nil
}
if len(keys) > 0 && err != nil && p.tok.Type == token.RBRACE {
// This is a strange boolean statement, but what it means is:
// We have keys with no value, and we're likely in an object
// (since RBrace ends an object). For this, we set err to nil so
// we continue and get the error below of having the wrong value
// type.
err = nil
// Reset the token type so we don't think it completed fine. See
// objectType which uses p.tok.Type to check if we're done with
// the object.
p.tok.Type = token.EOF
}
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
if p.leadComment != nil {
o.LeadComment = p.leadComment
p.leadComment = nil
}
switch p.tok.Type {
case token.ASSIGN:
o.Assign = p.tok.Pos
o.Val, err = p.object()
if err != nil {
return nil, err
}
case token.LBRACE:
o.Val, err = p.objectType()
if err != nil {
return nil, err
}
default:
keyStr := make([]string, 0, len(keys))
for _, k := range keys {
keyStr = append(keyStr, k.Token.Text)
}
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf(
"key '%s' expected start of object ('{') or assignment ('=')",
strings.Join(keyStr, " ")),
}
}
// key=#comment
// val
if p.lineComment != nil {
o.LineComment, p.lineComment = p.lineComment, nil
}
// do a look-ahead for line comment
p.scan()
if len(keys) > 0 && o.Val.Pos().Line == keys[0].Pos().Line && p.lineComment != nil {
o.LineComment = p.lineComment
p.lineComment = nil
}
p.unscan()
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
// It is very important to also return the keys here as well as
// the error. This is because we need to be able to tell if we
// did parse keys prior to finding the EOF, or if we just found
// a bare EOF.
return keys, errEofToken
case token.ASSIGN:
// assignment or object only, but not nested objects. this is not
// allowed: `foo bar = {}`
if keyCount > 1 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("nested object expected: LBRACE got: %s", p.tok.Type),
}
}
if keyCount == 0 {
return nil, &PosError{
Pos: p.tok.Pos,
Err: errors.New("no object keys found!"),
}
}
return keys, nil
case token.LBRACE:
var err error
// If we have no keys, then it is a syntax error. i.e. {{}} is not
// allowed.
if len(keys) == 0 {
err = &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING got: %s", p.tok.Type),
}
}
// object
return keys, err
case token.IDENT, token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{Token: p.tok})
case token.ILLEGAL:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("illegal character"),
}
default:
return keys, &PosError{
Pos: p.tok.Pos,
Err: fmt.Errorf("expected: IDENT | STRING | ASSIGN | LBRACE got: %s", p.tok.Type),
}
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (ast.Node, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.STRING, token.HEREDOC:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.COMMENT:
// implement comment
case token.EOF:
return nil, errEofToken
}
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("Unknown token: %+v", tok),
}
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{
Lbrace: p.tok.Pos,
}
l, err := p.objectList(true)
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
// No error, scan and expect the ending to be a brace
if tok := p.scan(); tok.Type != token.RBRACE {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("object expected closing RBRACE got: %s", tok.Type),
}
}
o.List = l
o.Rbrace = p.tok.Pos // advanced via parseObjectList
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{
Lbrack: p.tok.Pos,
}
needComma := false
for {
tok := p.scan()
if needComma {
switch tok.Type {
case token.COMMA, token.RBRACK:
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error parsing list, expected comma or list end, got: %s",
tok.Type),
}
}
}
switch tok.Type {
case token.BOOL, token.NUMBER, token.FLOAT, token.STRING, token.HEREDOC:
node, err := p.literalType()
if err != nil {
return nil, err
}
// If there is a lead comment, apply it
if p.leadComment != nil {
node.LeadComment = p.leadComment
p.leadComment = nil
}
l.Add(node)
needComma = true
case token.COMMA:
// get next list item or we are at the end
// do a look-ahead for line comment
p.scan()
if p.lineComment != nil && len(l.List) > 0 {
lit, ok := l.List[len(l.List)-1].(*ast.LiteralType)
if ok {
lit.LineComment = p.lineComment
l.List[len(l.List)-1] = lit
p.lineComment = nil
}
}
p.unscan()
needComma = false
continue
case token.LBRACE:
// Looks like a nested object, so parse it out
node, err := p.objectType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse object within list: %s", err),
}
}
l.Add(node)
needComma = true
case token.LBRACK:
node, err := p.listType()
if err != nil {
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf(
"error while trying to parse list within list: %s", err),
}
}
l.Add(node)
case token.RBRACK:
// finished
l.Rbrack = p.tok.Pos
return l, nil
default:
return nil, &PosError{
Pos: tok.Pos,
Err: fmt.Errorf("unexpected token while parsing list: %s", tok.Type),
}
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok,
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead. In the process, it collects any
// comment groups encountered, and remembers the last lead and line comments.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
// Otherwise read the next token from the scanner and Save it to the buffer
// in case we unscan later.
prev := p.tok
p.tok = p.sc.Scan()
if p.tok.Type == token.COMMENT {
var comment *ast.CommentGroup
var endline int
// fmt.Printf("p.tok.Pos.Line = %+v prev: %d endline %d \n",
// p.tok.Pos.Line, prev.Pos.Line, endline)
if p.tok.Pos.Line == prev.Pos.Line {
// The comment is on same line as the previous token; it
// cannot be a lead comment but may be a line comment.
comment, endline = p.consumeCommentGroup(0)
if p.tok.Pos.Line != endline {
// The next token is on a different line, thus
// the last comment group is a line comment.
p.lineComment = comment
}
}
// consume successor comments, if any
endline = -1
for p.tok.Type == token.COMMENT {
comment, endline = p.consumeCommentGroup(1)
}
if endline+1 == p.tok.Pos.Line && p.tok.Type != token.RBRACE {
switch p.tok.Type {
case token.RBRACE, token.RBRACK:
// Do not count for these cases
default:
// The next token is following on the line immediately after the
// comment group, thus the last comment group is a lead comment.
p.leadComment = comment
}
}
}
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *Parser, msg string) *Parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *Parser) {
p.indent--
p.printTrace(")")
}

View file

@ -1,652 +0,0 @@
// Package scanner implements a scanner for HCL (HashiCorp Configuration
// Language) source text.
package scanner
import (
"bytes"
"fmt"
"os"
"regexp"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hcl/hcl/token"
)
// eof represents a marker rune for the end of the reader.
const eof = rune(0)
// Scanner defines a lexical scanner
type Scanner struct {
buf *bytes.Buffer // Source buffer for advancing and scanning
src []byte // Source buffer for immutable access
// Source Position
srcPos token.Pos // current position
prevPos token.Pos // previous position, used for peek() method
lastCharLen int // length of last character in bytes
lastLineLen int // length of last line in characters (for correct column reporting)
tokStart int // token text start position
tokEnd int // token text end position
// Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr.
Error func(pos token.Pos, msg string)
// ErrorCount is incremented by one for each error encountered.
ErrorCount int
// tokPos is the start position of most recently scanned token; set by
// Scan. The Filename field is always left untouched by the Scanner. If
// an error is reported (via Error) and Position is invalid, the scanner is
// not inside a token.
tokPos token.Pos
}
// New creates and initializes a new instance of Scanner using src as
// its source content.
func New(src []byte) *Scanner {
// even though we accept a src, we read from a io.Reader compatible type
// (*bytes.Buffer). So in the future we might easily change it to streaming
// read.
b := bytes.NewBuffer(src)
s := &Scanner{
buf: b,
src: src,
}
// srcPosition always starts with 1
s.srcPos.Line = 1
return s
}
// next reads the next rune from the bufferred reader. Returns the rune(0) if
// an error occurs (or io.EOF is returned).
func (s *Scanner) next() rune {
ch, size, err := s.buf.ReadRune()
if err != nil {
// advance for error reporting
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
return eof
}
// remember last position
s.prevPos = s.srcPos
s.srcPos.Column++
s.lastCharLen = size
s.srcPos.Offset += size
if ch == utf8.RuneError && size == 1 {
s.err("illegal UTF-8 encoding")
return ch
}
if ch == '\n' {
s.srcPos.Line++
s.lastLineLen = s.srcPos.Column
s.srcPos.Column = 0
}
if ch == '\x00' {
s.err("unexpected null character (0x00)")
return eof
}
if ch == '\uE123' {
s.err("unicode code point U+E123 reserved for internal use")
return utf8.RuneError
}
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
}
// unread unreads the previous read Rune and updates the source position
func (s *Scanner) unread() {
if err := s.buf.UnreadRune(); err != nil {
panic(err) // this is user fault, we should catch it
}
s.srcPos = s.prevPos // put back last position
}
// peek returns the next rune without advancing the reader.
func (s *Scanner) peek() rune {
peek, _, err := s.buf.ReadRune()
if err != nil {
return eof
}
s.buf.UnreadRune()
return peek
}
// Scan scans the next token and returns the token.
func (s *Scanner) Scan() token.Token {
ch := s.next()
// skip white space
for isWhitespace(ch) {
ch = s.next()
}
var tok token.Type
// token text markings
s.tokStart = s.srcPos.Offset - s.lastCharLen
// token position, initial next() is moving the offset by one(size of rune
// actually), though we are interested with the starting point
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
if s.srcPos.Column > 0 {
// common case: last character was not a '\n'
s.tokPos.Line = s.srcPos.Line
s.tokPos.Column = s.srcPos.Column
} else {
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
s.tokPos.Line = s.srcPos.Line - 1
s.tokPos.Column = s.lastLineLen
}
switch {
case isLetter(ch):
tok = token.IDENT
lit := s.scanIdentifier()
if lit == "true" || lit == "false" {
tok = token.BOOL
}
case isDecimal(ch):
tok = s.scanNumber(ch)
default:
switch ch {
case eof:
tok = token.EOF
case '"':
tok = token.STRING
s.scanString()
case '#', '/':
tok = token.COMMENT
s.scanComment(ch)
case '.':
tok = token.PERIOD
ch = s.peek()
if isDecimal(ch) {
tok = token.FLOAT
ch = s.scanMantissa(ch)
ch = s.scanExponent(ch)
}
case '<':
tok = token.HEREDOC
s.scanHeredoc()
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case '{':
tok = token.LBRACE
case '}':
tok = token.RBRACE
case ',':
tok = token.COMMA
case '=':
tok = token.ASSIGN
case '+':
tok = token.ADD
case '-':
if isDecimal(s.peek()) {
ch := s.next()
tok = s.scanNumber(ch)
} else {
tok = token.SUB
}
default:
s.err("illegal char")
}
}
// finish token ending
s.tokEnd = s.srcPos.Offset
// create token literal
var tokenText string
if s.tokStart >= 0 {
tokenText = string(s.src[s.tokStart:s.tokEnd])
}
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
return token.Token{
Type: tok,
Pos: s.tokPos,
Text: tokenText,
}
}
func (s *Scanner) scanComment(ch rune) {
// single line comments
if ch == '#' || (ch == '/' && s.peek() != '*') {
if ch == '/' && s.peek() != '/' {
s.err("expected '/' for comment")
return
}
ch = s.next()
for ch != '\n' && ch >= 0 && ch != eof {
ch = s.next()
}
if ch != eof && ch >= 0 {
s.unread()
}
return
}
// be sure we get the character after /* This allows us to find comment's
// that are not erminated
if ch == '/' {
s.next()
ch = s.next() // read character after "/*"
}
// look for /* - style comments
for {
if ch < 0 || ch == eof {
s.err("comment not terminated")
break
}
ch0 := ch
ch = s.next()
if ch0 == '*' && ch == '/' {
break
}
}
}
// scanNumber scans a HCL number definition starting with the given rune
func (s *Scanner) scanNumber(ch rune) token.Type {
if ch == '0' {
// check for hexadecimal, octal or float
ch = s.next()
if ch == 'x' || ch == 'X' {
// hexadecimal
ch = s.next()
found := false
for isHexadecimal(ch) {
ch = s.next()
found = true
}
if !found {
s.err("illegal hexadecimal number")
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
// now it's either something like: 0421(octal) or 0.1231(float)
illegalOctal := false
for isDecimal(ch) {
ch = s.next()
if ch == '8' || ch == '9' {
// this is just a possibility. For example 0159 is illegal, but
// 0159.23 is valid. So we mark a possible illegal octal. If
// the next character is not a period, we'll print the error.
illegalOctal = true
}
}
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if illegalOctal {
s.err("illegal octal number")
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
s.scanMantissa(ch)
ch = s.next() // seek forward
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if ch != eof {
s.unread()
}
return token.NUMBER
}
// scanMantissa scans the mantissa beginning from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
for isDecimal(ch) {
ch = s.next()
scanned = true
}
if scanned && ch != eof {
s.unread()
}
return ch
}
// scanFraction scans the fraction after the '.' rune
func (s *Scanner) scanFraction(ch rune) rune {
if ch == '.' {
ch = s.peek() // we peek just to see if we can move forward
ch = s.scanMantissa(ch)
}
return ch
}
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
// rune.
func (s *Scanner) scanExponent(ch rune) rune {
if ch == 'e' || ch == 'E' {
ch = s.next()
if ch == '-' || ch == '+' {
ch = s.next()
}
ch = s.scanMantissa(ch)
}
return ch
}
// scanHeredoc scans a heredoc string
func (s *Scanner) scanHeredoc() {
// Scan the second '<' in example: '<<EOF'
if s.next() != '<' {
s.err("heredoc expected second '<', didn't see it")
return
}
// Get the original offset so we can read just the heredoc ident
offs := s.srcPos.Offset
// Scan the identifier
ch := s.next()
// Indented heredoc syntax
if ch == '-' {
ch = s.next()
}
for isLetter(ch) || isDigit(ch) {
ch = s.next()
}
// If we reached an EOF then that is not good
if ch == eof {
s.err("heredoc not terminated")
return
}
// Ignore the '\r' in Windows line endings
if ch == '\r' {
if s.peek() == '\n' {
ch = s.next()
}
}
// If we didn't reach a newline then that is also not good
if ch != '\n' {
s.err("invalid characters in heredoc anchor")
return
}
// Read the identifier
identBytes := s.src[offs : s.srcPos.Offset-s.lastCharLen]
if len(identBytes) == 0 || (len(identBytes) == 1 && identBytes[0] == '-') {
s.err("zero-length heredoc anchor")
return
}
var identRegexp *regexp.Regexp
if identBytes[0] == '-' {
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes[1:]))
} else {
identRegexp = regexp.MustCompile(fmt.Sprintf(`^[[:space:]]*%s\r*\z`, identBytes))
}
// Read the actual string value
lineStart := s.srcPos.Offset
for {
ch := s.next()
// Special newline handling.
if ch == '\n' {
// Math is fast, so we first compare the byte counts to see if we have a chance
// of seeing the same identifier - if the length is less than the number of bytes
// in the identifier, this cannot be a valid terminator.
lineBytesLen := s.srcPos.Offset - s.lastCharLen - lineStart
if lineBytesLen >= len(identBytes) && identRegexp.Match(s.src[lineStart:s.srcPos.Offset-s.lastCharLen]) {
break
}
// Not an anchor match, record the start of a new line
lineStart = s.srcPos.Offset
}
if ch == eof {
s.err("heredoc not terminated")
return
}
}
return
}
// scanString scans a quoted string
func (s *Scanner) scanString() {
braces := 0
for {
// '"' opening already consumed
// read character after quote
ch := s.next()
if (ch == '\n' && braces == 0) || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
if ch == '"' && braces == 0 {
break
}
// If we're going into a ${} then we can ignore quotes for awhile
if braces == 0 && ch == '$' && s.peek() == '{' {
braces++
s.next()
} else if braces > 0 && ch == '{' {
braces++
}
if braces > 0 && ch == '}' {
braces--
}
if ch == '\\' {
s.scanEscape()
}
}
return
}
// scanEscape scans an escape sequence
func (s *Scanner) scanEscape() rune {
// http://en.cppreference.com/w/cpp/language/escape
ch := s.next() // read character after '/'
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
// nothing to do
case '0', '1', '2', '3', '4', '5', '6', '7':
// octal notation
ch = s.scanDigits(ch, 8, 3)
case 'x':
// hexademical notation
ch = s.scanDigits(s.next(), 16, 2)
case 'u':
// universal character name
ch = s.scanDigits(s.next(), 16, 4)
case 'U':
// universal character name
ch = s.scanDigits(s.next(), 16, 8)
default:
s.err("illegal char escape")
}
return ch
}
// scanDigits scans a rune with the given base for n times. For example an
// octal notation \184 would yield in scanDigits(ch, 8, 3)
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
start := n
for n > 0 && digitVal(ch) < base {
ch = s.next()
if ch == eof {
// If we see an EOF, we halt any more scanning of digits
// immediately.
break
}
n--
}
if n > 0 {
s.err("illegal char escape")
}
if n != start && ch != eof {
// we scanned all digits, put the last non digit char back,
// only if we read anything at all
s.unread()
}
return ch
}
// scanIdentifier scans an identifier and returns the literal string
func (s *Scanner) scanIdentifier() string {
offs := s.srcPos.Offset - s.lastCharLen
ch := s.next()
for isLetter(ch) || isDigit(ch) || ch == '-' || ch == '.' {
ch = s.next()
}
if ch != eof {
s.unread() // we got identifier, put back latest char
}
return string(s.src[offs:s.srcPos.Offset])
}
// recentPosition returns the position of the character immediately after the
// character or token returned by the last call to Scan.
func (s *Scanner) recentPosition() (pos token.Pos) {
pos.Offset = s.srcPos.Offset - s.lastCharLen
switch {
case s.srcPos.Column > 0:
// common case: last character was not a '\n'
pos.Line = s.srcPos.Line
pos.Column = s.srcPos.Column
case s.lastLineLen > 0:
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
pos.Line = s.srcPos.Line - 1
pos.Column = s.lastLineLen
default:
// at the beginning of the source
pos.Line = 1
pos.Column = 1
}
return
}
// err prints the error of any scanning to s.Error function. If the function is
// not defined, by default it prints them to os.Stderr
func (s *Scanner) err(msg string) {
s.ErrorCount++
pos := s.recentPosition()
if s.Error != nil {
s.Error(pos, msg)
return
}
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
}
// isHexadecimal returns true if the given rune is a letter
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
// isDigit returns true if the given rune is a decimal digit
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
// isDecimal returns true if the given rune is a decimal number
func isDecimal(ch rune) bool {
return '0' <= ch && ch <= '9'
}
// isHexadecimal returns true if the given rune is an hexadecimal number
func isHexadecimal(ch rune) bool {
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
}
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
}
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch - '0')
case 'a' <= ch && ch <= 'f':
return int(ch - 'a' + 10)
case 'A' <= ch && ch <= 'F':
return int(ch - 'A' + 10)
}
return 16 // larger than any legal digit val
}

View file

@ -1,241 +0,0 @@
package strconv
import (
"errors"
"unicode/utf8"
)
// ErrSyntax indicates that a value does not have the right syntax for the target type.
var ErrSyntax = errors.New("invalid syntax")
// Unquote interprets s as a single-quoted, double-quoted,
// or backquoted Go string literal, returning the string value
// that s quotes. (If s is single-quoted, it would be a Go
// character literal; Unquote returns the corresponding
// one-character string.)
func Unquote(s string) (t string, err error) {
n := len(s)
if n < 2 {
return "", ErrSyntax
}
quote := s[0]
if quote != s[n-1] {
return "", ErrSyntax
}
s = s[1 : n-1]
if quote != '"' {
return "", ErrSyntax
}
if !contains(s, '$') && !contains(s, '{') && contains(s, '\n') {
return "", ErrSyntax
}
// Is it trivial? Avoid allocation.
if !contains(s, '\\') && !contains(s, quote) && !contains(s, '$') {
switch quote {
case '"':
return s, nil
case '\'':
r, size := utf8.DecodeRuneInString(s)
if size == len(s) && (r != utf8.RuneError || size != 1) {
return s, nil
}
}
}
var runeTmp [utf8.UTFMax]byte
buf := make([]byte, 0, 3*len(s)/2) // Try to avoid more allocations.
for len(s) > 0 {
// If we're starting a '${}' then let it through un-unquoted.
// Specifically: we don't unquote any characters within the `${}`
// section.
if s[0] == '$' && len(s) > 1 && s[1] == '{' {
buf = append(buf, '$', '{')
s = s[2:]
// Continue reading until we find the closing brace, copying as-is
braces := 1
for len(s) > 0 && braces > 0 {
r, size := utf8.DecodeRuneInString(s)
if r == utf8.RuneError {
return "", ErrSyntax
}
s = s[size:]
n := utf8.EncodeRune(runeTmp[:], r)
buf = append(buf, runeTmp[:n]...)
switch r {
case '{':
braces++
case '}':
braces--
}
}
if braces != 0 {
return "", ErrSyntax
}
if len(s) == 0 {
// If there's no string left, we're done!
break
} else {
// If there's more left, we need to pop back up to the top of the loop
// in case there's another interpolation in this string.
continue
}
}
if s[0] == '\n' {
return "", ErrSyntax
}
c, multibyte, ss, err := unquoteChar(s, quote)
if err != nil {
return "", err
}
s = ss
if c < utf8.RuneSelf || !multibyte {
buf = append(buf, byte(c))
} else {
n := utf8.EncodeRune(runeTmp[:], c)
buf = append(buf, runeTmp[:n]...)
}
if quote == '\'' && len(s) != 0 {
// single-quoted must be single character
return "", ErrSyntax
}
}
return string(buf), nil
}
// contains reports whether the string contains the byte c.
func contains(s string, c byte) bool {
for i := 0; i < len(s); i++ {
if s[i] == c {
return true
}
}
return false
}
func unhex(b byte) (v rune, ok bool) {
c := rune(b)
switch {
case '0' <= c && c <= '9':
return c - '0', true
case 'a' <= c && c <= 'f':
return c - 'a' + 10, true
case 'A' <= c && c <= 'F':
return c - 'A' + 10, true
}
return
}
func unquoteChar(s string, quote byte) (value rune, multibyte bool, tail string, err error) {
// easy cases
switch c := s[0]; {
case c == quote && (quote == '\'' || quote == '"'):
err = ErrSyntax
return
case c >= utf8.RuneSelf:
r, size := utf8.DecodeRuneInString(s)
return r, true, s[size:], nil
case c != '\\':
return rune(s[0]), false, s[1:], nil
}
// hard case: c is backslash
if len(s) <= 1 {
err = ErrSyntax
return
}
c := s[1]
s = s[2:]
switch c {
case 'a':
value = '\a'
case 'b':
value = '\b'
case 'f':
value = '\f'
case 'n':
value = '\n'
case 'r':
value = '\r'
case 't':
value = '\t'
case 'v':
value = '\v'
case 'x', 'u', 'U':
n := 0
switch c {
case 'x':
n = 2
case 'u':
n = 4
case 'U':
n = 8
}
var v rune
if len(s) < n {
err = ErrSyntax
return
}
for j := 0; j < n; j++ {
x, ok := unhex(s[j])
if !ok {
err = ErrSyntax
return
}
v = v<<4 | x
}
s = s[n:]
if c == 'x' {
// single-byte string, possibly not UTF-8
value = v
break
}
if v > utf8.MaxRune {
err = ErrSyntax
return
}
value = v
multibyte = true
case '0', '1', '2', '3', '4', '5', '6', '7':
v := rune(c) - '0'
if len(s) < 2 {
err = ErrSyntax
return
}
for j := 0; j < 2; j++ { // one digit already; two more
x := rune(s[j]) - '0'
if x < 0 || x > 7 {
err = ErrSyntax
return
}
v = (v << 3) | x
}
s = s[2:]
if v > 255 {
err = ErrSyntax
return
}
value = v
case '\\':
value = '\\'
case '\'', '"':
if c != quote {
err = ErrSyntax
return
}
value = rune(c)
default:
err = ErrSyntax
return
}
tail = s
return
}

View file

@ -1,46 +0,0 @@
package token
import "fmt"
// Pos describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
type Pos struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (p *Pos) IsValid() bool { return p.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
func (p Pos) String() string {
s := p.Filename
if p.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
}
if s == "" {
s = "-"
}
return s
}
// Before reports whether the position p is before u.
func (p Pos) Before(u Pos) bool {
return u.Offset > p.Offset || u.Line > p.Line
}
// After reports whether the position p is after u.
func (p Pos) After(u Pos) bool {
return u.Offset < p.Offset || u.Line < p.Line
}

View file

@ -1,219 +0,0 @@
// Package token defines constants representing the lexical tokens for HCL
// (HashiCorp Configuration Language)
package token
import (
"fmt"
"strconv"
"strings"
hclstrconv "github.com/hashicorp/hcl/hcl/strconv"
)
// Token defines a single HCL token which can be obtained via the Scanner
type Token struct {
Type Type
Pos Pos
Text string
JSON bool
}
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
type Type int
const (
// Special tokens
ILLEGAL Type = iota
EOF
COMMENT
identifier_beg
IDENT // literals
literal_beg
NUMBER // 12345
FLOAT // 123.45
BOOL // true,false
STRING // "abc"
HEREDOC // <<FOO\nbar\nFOO
literal_end
identifier_end
operator_beg
LBRACK // [
LBRACE // {
COMMA // ,
PERIOD // .
RBRACK // ]
RBRACE // }
ASSIGN // =
ADD // +
SUB // -
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
COMMENT: "COMMENT",
IDENT: "IDENT",
NUMBER: "NUMBER",
FLOAT: "FLOAT",
BOOL: "BOOL",
STRING: "STRING",
LBRACK: "LBRACK",
LBRACE: "LBRACE",
COMMA: "COMMA",
PERIOD: "PERIOD",
HEREDOC: "HEREDOC",
RBRACK: "RBRACK",
RBRACE: "RBRACE",
ASSIGN: "ASSIGN",
ADD: "ADD",
SUB: "SUB",
}
// String returns the string corresponding to the token tok.
func (t Type) String() string {
s := ""
if 0 <= t && t < Type(len(tokens)) {
s = tokens[t]
}
if s == "" {
s = "token(" + strconv.Itoa(int(t)) + ")"
}
return s
}
// IsIdentifier returns true for tokens corresponding to identifiers and basic
// type literals; it returns false otherwise.
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
// IsLiteral returns true for tokens corresponding to basic type literals; it
// returns false otherwise.
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
// String returns the token's literal text. Note that this is only
// applicable for certain token types, such as token.IDENT,
// token.STRING, etc..
func (t Token) String() string {
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
}
// Value returns the properly typed value for this token. The type of
// the returned interface{} is guaranteed based on the Type field.
//
// This can only be called for literal types. If it is called for any other
// type, this will panic.
func (t Token) Value() interface{} {
switch t.Type {
case BOOL:
if t.Text == "true" {
return true
} else if t.Text == "false" {
return false
}
panic("unknown bool value: " + t.Text)
case FLOAT:
v, err := strconv.ParseFloat(t.Text, 64)
if err != nil {
panic(err)
}
return float64(v)
case NUMBER:
v, err := strconv.ParseInt(t.Text, 0, 64)
if err != nil {
panic(err)
}
return int64(v)
case IDENT:
return t.Text
case HEREDOC:
return unindentHeredoc(t.Text)
case STRING:
// Determine the Unquote method to use. If it came from JSON,
// then we need to use the built-in unquote since we have to
// escape interpolations there.
f := hclstrconv.Unquote
if t.JSON {
f = strconv.Unquote
}
// This case occurs if json null is used
if t.Text == "" {
return ""
}
v, err := f(t.Text)
if err != nil {
panic(fmt.Sprintf("unquote %s err: %s", t.Text, err))
}
return v
default:
panic(fmt.Sprintf("unimplemented Value for type: %s", t.Type))
}
}
// unindentHeredoc returns the string content of a HEREDOC if it is started with <<
// and the content of a HEREDOC with the hanging indent removed if it is started with
// a <<-, and the terminating line is at least as indented as the least indented line.
func unindentHeredoc(heredoc string) string {
// We need to find the end of the marker
idx := strings.IndexByte(heredoc, '\n')
if idx == -1 {
panic("heredoc doesn't contain newline")
}
unindent := heredoc[2] == '-'
// We can optimize if the heredoc isn't marked for indentation
if !unindent {
return string(heredoc[idx+1 : len(heredoc)-idx+1])
}
// We need to unindent each line based on the indentation level of the marker
lines := strings.Split(string(heredoc[idx+1:len(heredoc)-idx+2]), "\n")
whitespacePrefix := lines[len(lines)-1]
isIndented := true
for _, v := range lines {
if strings.HasPrefix(v, whitespacePrefix) {
continue
}
isIndented = false
break
}
// If all lines are not at least as indented as the terminating mark, return the
// heredoc as is, but trim the leading space from the marker on the final line.
if !isIndented {
return strings.TrimRight(string(heredoc[idx+1:len(heredoc)-idx+1]), " \t")
}
unindentedLines := make([]string, len(lines))
for k, v := range lines {
if k == len(lines)-1 {
unindentedLines[k] = ""
break
}
unindentedLines[k] = strings.TrimPrefix(v, whitespacePrefix)
}
return strings.Join(unindentedLines, "\n")
}

View file

@ -1,117 +0,0 @@
package parser
import "github.com/hashicorp/hcl/hcl/ast"
// flattenObjects takes an AST node, walks it, and flattens
func flattenObjects(node ast.Node) {
ast.Walk(node, func(n ast.Node) (ast.Node, bool) {
// We only care about lists, because this is what we modify
list, ok := n.(*ast.ObjectList)
if !ok {
return n, true
}
// Rebuild the item list
items := make([]*ast.ObjectItem, 0, len(list.Items))
frontier := make([]*ast.ObjectItem, len(list.Items))
copy(frontier, list.Items)
for len(frontier) > 0 {
// Pop the current item
n := len(frontier)
item := frontier[n-1]
frontier = frontier[:n-1]
switch v := item.Val.(type) {
case *ast.ObjectType:
items, frontier = flattenObjectType(v, item, items, frontier)
case *ast.ListType:
items, frontier = flattenListType(v, item, items, frontier)
default:
items = append(items, item)
}
}
// Reverse the list since the frontier model runs things backwards
for i := len(items)/2 - 1; i >= 0; i-- {
opp := len(items) - 1 - i
items[i], items[opp] = items[opp], items[i]
}
// Done! Set the original items
list.Items = items
return n, true
})
}
func flattenListType(
ot *ast.ListType,
item *ast.ObjectItem,
items []*ast.ObjectItem,
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
// If the list is empty, keep the original list
if len(ot.List) == 0 {
items = append(items, item)
return items, frontier
}
// All the elements of this object must also be objects!
for _, subitem := range ot.List {
if _, ok := subitem.(*ast.ObjectType); !ok {
items = append(items, item)
return items, frontier
}
}
// Great! We have a match go through all the items and flatten
for _, elem := range ot.List {
// Add it to the frontier so that we can recurse
frontier = append(frontier, &ast.ObjectItem{
Keys: item.Keys,
Assign: item.Assign,
Val: elem,
LeadComment: item.LeadComment,
LineComment: item.LineComment,
})
}
return items, frontier
}
func flattenObjectType(
ot *ast.ObjectType,
item *ast.ObjectItem,
items []*ast.ObjectItem,
frontier []*ast.ObjectItem) ([]*ast.ObjectItem, []*ast.ObjectItem) {
// If the list has no items we do not have to flatten anything
if ot.List.Items == nil {
items = append(items, item)
return items, frontier
}
// All the elements of this object must also be objects!
for _, subitem := range ot.List.Items {
if _, ok := subitem.Val.(*ast.ObjectType); !ok {
items = append(items, item)
return items, frontier
}
}
// Great! We have a match go through all the items and flatten
for _, subitem := range ot.List.Items {
// Copy the new key
keys := make([]*ast.ObjectKey, len(item.Keys)+len(subitem.Keys))
copy(keys, item.Keys)
copy(keys[len(item.Keys):], subitem.Keys)
// Add it to the frontier so that we can recurse
frontier = append(frontier, &ast.ObjectItem{
Keys: keys,
Assign: item.Assign,
Val: subitem.Val,
LeadComment: item.LeadComment,
LineComment: item.LineComment,
})
}
return items, frontier
}

View file

@ -1,313 +0,0 @@
package parser
import (
"errors"
"fmt"
"github.com/hashicorp/hcl/hcl/ast"
hcltoken "github.com/hashicorp/hcl/hcl/token"
"github.com/hashicorp/hcl/json/scanner"
"github.com/hashicorp/hcl/json/token"
)
type Parser struct {
sc *scanner.Scanner
// Last read token
tok token.Token
commaPrev token.Token
enableTrace bool
indent int
n int // buffer size (max = 1)
}
func newParser(src []byte) *Parser {
return &Parser{
sc: scanner.New(src),
}
}
// Parse returns the fully parsed source and returns the abstract syntax tree.
func Parse(src []byte) (*ast.File, error) {
p := newParser(src)
return p.Parse()
}
var errEofToken = errors.New("EOF token found")
// Parse returns the fully parsed source and returns the abstract syntax tree.
func (p *Parser) Parse() (*ast.File, error) {
f := &ast.File{}
var err, scerr error
p.sc.Error = func(pos token.Pos, msg string) {
scerr = fmt.Errorf("%s: %s", pos, msg)
}
// The root must be an object in JSON
object, err := p.object()
if scerr != nil {
return nil, scerr
}
if err != nil {
return nil, err
}
// We make our final node an object list so it is more HCL compatible
f.Node = object.List
// Flatten it, which finds patterns and turns them into more HCL-like
// AST trees.
flattenObjects(f.Node)
return f, nil
}
func (p *Parser) objectList() (*ast.ObjectList, error) {
defer un(trace(p, "ParseObjectList"))
node := &ast.ObjectList{}
for {
n, err := p.objectItem()
if err == errEofToken {
break // we are finished
}
// we don't return a nil node, because might want to use already
// collected items.
if err != nil {
return node, err
}
node.Add(n)
// Check for a followup comma. If it isn't a comma, then we're done
if tok := p.scan(); tok.Type != token.COMMA {
break
}
}
return node, nil
}
// objectItem parses a single object item
func (p *Parser) objectItem() (*ast.ObjectItem, error) {
defer un(trace(p, "ParseObjectItem"))
keys, err := p.objectKey()
if err != nil {
return nil, err
}
o := &ast.ObjectItem{
Keys: keys,
}
switch p.tok.Type {
case token.COLON:
pos := p.tok.Pos
o.Assign = hcltoken.Pos{
Filename: pos.Filename,
Offset: pos.Offset,
Line: pos.Line,
Column: pos.Column,
}
o.Val, err = p.objectValue()
if err != nil {
return nil, err
}
}
return o, nil
}
// objectKey parses an object key and returns a ObjectKey AST
func (p *Parser) objectKey() ([]*ast.ObjectKey, error) {
keyCount := 0
keys := make([]*ast.ObjectKey, 0)
for {
tok := p.scan()
switch tok.Type {
case token.EOF:
return nil, errEofToken
case token.STRING:
keyCount++
keys = append(keys, &ast.ObjectKey{
Token: p.tok.HCLToken(),
})
case token.COLON:
// If we have a zero keycount it means that we never got
// an object key, i.e. `{ :`. This is a syntax error.
if keyCount == 0 {
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}
// Done
return keys, nil
case token.ILLEGAL:
return nil, errors.New("illegal")
default:
return nil, fmt.Errorf("expected: STRING got: %s", p.tok.Type)
}
}
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) objectValue() (ast.Node, error) {
defer un(trace(p, "ParseObjectValue"))
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.BOOL, token.NULL, token.STRING:
return p.literalType()
case token.LBRACE:
return p.objectType()
case token.LBRACK:
return p.listType()
case token.EOF:
return nil, errEofToken
}
return nil, fmt.Errorf("Expected object value, got unknown token: %+v", tok)
}
// object parses any type of object, such as number, bool, string, object or
// list.
func (p *Parser) object() (*ast.ObjectType, error) {
defer un(trace(p, "ParseType"))
tok := p.scan()
switch tok.Type {
case token.LBRACE:
return p.objectType()
case token.EOF:
return nil, errEofToken
}
return nil, fmt.Errorf("Expected object, got unknown token: %+v", tok)
}
// objectType parses an object type and returns a ObjectType AST
func (p *Parser) objectType() (*ast.ObjectType, error) {
defer un(trace(p, "ParseObjectType"))
// we assume that the currently scanned token is a LBRACE
o := &ast.ObjectType{}
l, err := p.objectList()
// if we hit RBRACE, we are good to go (means we parsed all Items), if it's
// not a RBRACE, it's an syntax error and we just return it.
if err != nil && p.tok.Type != token.RBRACE {
return nil, err
}
o.List = l
return o, nil
}
// listType parses a list type and returns a ListType AST
func (p *Parser) listType() (*ast.ListType, error) {
defer un(trace(p, "ParseListType"))
// we assume that the currently scanned token is a LBRACK
l := &ast.ListType{}
for {
tok := p.scan()
switch tok.Type {
case token.NUMBER, token.FLOAT, token.STRING:
node, err := p.literalType()
if err != nil {
return nil, err
}
l.Add(node)
case token.COMMA:
continue
case token.LBRACE:
node, err := p.objectType()
if err != nil {
return nil, err
}
l.Add(node)
case token.BOOL:
// TODO(arslan) should we support? not supported by HCL yet
case token.LBRACK:
// TODO(arslan) should we support nested lists? Even though it's
// written in README of HCL, it's not a part of the grammar
// (not defined in parse.y)
case token.RBRACK:
// finished
return l, nil
default:
return nil, fmt.Errorf("unexpected token while parsing list: %s", tok.Type)
}
}
}
// literalType parses a literal type and returns a LiteralType AST
func (p *Parser) literalType() (*ast.LiteralType, error) {
defer un(trace(p, "ParseLiteral"))
return &ast.LiteralType{
Token: p.tok.HCLToken(),
}, nil
}
// scan returns the next token from the underlying scanner. If a token has
// been unscanned then read that instead.
func (p *Parser) scan() token.Token {
// If we have a token on the buffer, then return it.
if p.n != 0 {
p.n = 0
return p.tok
}
p.tok = p.sc.Scan()
return p.tok
}
// unscan pushes the previously read token back onto the buffer.
func (p *Parser) unscan() {
p.n = 1
}
// ----------------------------------------------------------------------------
// Parsing support
func (p *Parser) printTrace(a ...interface{}) {
if !p.enableTrace {
return
}
const dots = ". . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . "
const n = len(dots)
fmt.Printf("%5d:%3d: ", p.tok.Pos.Line, p.tok.Pos.Column)
i := 2 * p.indent
for i > n {
fmt.Print(dots)
i -= n
}
// i <= n
fmt.Print(dots[0:i])
fmt.Println(a...)
}
func trace(p *Parser, msg string) *Parser {
p.printTrace(msg, "(")
p.indent++
return p
}
// Usage pattern: defer un(trace(p, "..."))
func un(p *Parser) {
p.indent--
p.printTrace(")")
}

View file

@ -1,451 +0,0 @@
package scanner
import (
"bytes"
"fmt"
"os"
"unicode"
"unicode/utf8"
"github.com/hashicorp/hcl/json/token"
)
// eof represents a marker rune for the end of the reader.
const eof = rune(0)
// Scanner defines a lexical scanner
type Scanner struct {
buf *bytes.Buffer // Source buffer for advancing and scanning
src []byte // Source buffer for immutable access
// Source Position
srcPos token.Pos // current position
prevPos token.Pos // previous position, used for peek() method
lastCharLen int // length of last character in bytes
lastLineLen int // length of last line in characters (for correct column reporting)
tokStart int // token text start position
tokEnd int // token text end position
// Error is called for each error encountered. If no Error
// function is set, the error is reported to os.Stderr.
Error func(pos token.Pos, msg string)
// ErrorCount is incremented by one for each error encountered.
ErrorCount int
// tokPos is the start position of most recently scanned token; set by
// Scan. The Filename field is always left untouched by the Scanner. If
// an error is reported (via Error) and Position is invalid, the scanner is
// not inside a token.
tokPos token.Pos
}
// New creates and initializes a new instance of Scanner using src as
// its source content.
func New(src []byte) *Scanner {
// even though we accept a src, we read from a io.Reader compatible type
// (*bytes.Buffer). So in the future we might easily change it to streaming
// read.
b := bytes.NewBuffer(src)
s := &Scanner{
buf: b,
src: src,
}
// srcPosition always starts with 1
s.srcPos.Line = 1
return s
}
// next reads the next rune from the bufferred reader. Returns the rune(0) if
// an error occurs (or io.EOF is returned).
func (s *Scanner) next() rune {
ch, size, err := s.buf.ReadRune()
if err != nil {
// advance for error reporting
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
return eof
}
if ch == utf8.RuneError && size == 1 {
s.srcPos.Column++
s.srcPos.Offset += size
s.lastCharLen = size
s.err("illegal UTF-8 encoding")
return ch
}
// remember last position
s.prevPos = s.srcPos
s.srcPos.Column++
s.lastCharLen = size
s.srcPos.Offset += size
if ch == '\n' {
s.srcPos.Line++
s.lastLineLen = s.srcPos.Column
s.srcPos.Column = 0
}
// debug
// fmt.Printf("ch: %q, offset:column: %d:%d\n", ch, s.srcPos.Offset, s.srcPos.Column)
return ch
}
// unread unreads the previous read Rune and updates the source position
func (s *Scanner) unread() {
if err := s.buf.UnreadRune(); err != nil {
panic(err) // this is user fault, we should catch it
}
s.srcPos = s.prevPos // put back last position
}
// peek returns the next rune without advancing the reader.
func (s *Scanner) peek() rune {
peek, _, err := s.buf.ReadRune()
if err != nil {
return eof
}
s.buf.UnreadRune()
return peek
}
// Scan scans the next token and returns the token.
func (s *Scanner) Scan() token.Token {
ch := s.next()
// skip white space
for isWhitespace(ch) {
ch = s.next()
}
var tok token.Type
// token text markings
s.tokStart = s.srcPos.Offset - s.lastCharLen
// token position, initial next() is moving the offset by one(size of rune
// actually), though we are interested with the starting point
s.tokPos.Offset = s.srcPos.Offset - s.lastCharLen
if s.srcPos.Column > 0 {
// common case: last character was not a '\n'
s.tokPos.Line = s.srcPos.Line
s.tokPos.Column = s.srcPos.Column
} else {
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
s.tokPos.Line = s.srcPos.Line - 1
s.tokPos.Column = s.lastLineLen
}
switch {
case isLetter(ch):
lit := s.scanIdentifier()
if lit == "true" || lit == "false" {
tok = token.BOOL
} else if lit == "null" {
tok = token.NULL
} else {
s.err("illegal char")
}
case isDecimal(ch):
tok = s.scanNumber(ch)
default:
switch ch {
case eof:
tok = token.EOF
case '"':
tok = token.STRING
s.scanString()
case '.':
tok = token.PERIOD
ch = s.peek()
if isDecimal(ch) {
tok = token.FLOAT
ch = s.scanMantissa(ch)
ch = s.scanExponent(ch)
}
case '[':
tok = token.LBRACK
case ']':
tok = token.RBRACK
case '{':
tok = token.LBRACE
case '}':
tok = token.RBRACE
case ',':
tok = token.COMMA
case ':':
tok = token.COLON
case '-':
if isDecimal(s.peek()) {
ch := s.next()
tok = s.scanNumber(ch)
} else {
s.err("illegal char")
}
default:
s.err("illegal char: " + string(ch))
}
}
// finish token ending
s.tokEnd = s.srcPos.Offset
// create token literal
var tokenText string
if s.tokStart >= 0 {
tokenText = string(s.src[s.tokStart:s.tokEnd])
}
s.tokStart = s.tokEnd // ensure idempotency of tokenText() call
return token.Token{
Type: tok,
Pos: s.tokPos,
Text: tokenText,
}
}
// scanNumber scans a HCL number definition starting with the given rune
func (s *Scanner) scanNumber(ch rune) token.Type {
zero := ch == '0'
pos := s.srcPos
s.scanMantissa(ch)
ch = s.next() // seek forward
if ch == 'e' || ch == 'E' {
ch = s.scanExponent(ch)
return token.FLOAT
}
if ch == '.' {
ch = s.scanFraction(ch)
if ch == 'e' || ch == 'E' {
ch = s.next()
ch = s.scanExponent(ch)
}
return token.FLOAT
}
if ch != eof {
s.unread()
}
// If we have a larger number and this is zero, error
if zero && pos != s.srcPos {
s.err("numbers cannot start with 0")
}
return token.NUMBER
}
// scanMantissa scans the mantissa beginning from the rune. It returns the next
// non decimal rune. It's used to determine wheter it's a fraction or exponent.
func (s *Scanner) scanMantissa(ch rune) rune {
scanned := false
for isDecimal(ch) {
ch = s.next()
scanned = true
}
if scanned && ch != eof {
s.unread()
}
return ch
}
// scanFraction scans the fraction after the '.' rune
func (s *Scanner) scanFraction(ch rune) rune {
if ch == '.' {
ch = s.peek() // we peek just to see if we can move forward
ch = s.scanMantissa(ch)
}
return ch
}
// scanExponent scans the remaining parts of an exponent after the 'e' or 'E'
// rune.
func (s *Scanner) scanExponent(ch rune) rune {
if ch == 'e' || ch == 'E' {
ch = s.next()
if ch == '-' || ch == '+' {
ch = s.next()
}
ch = s.scanMantissa(ch)
}
return ch
}
// scanString scans a quoted string
func (s *Scanner) scanString() {
braces := 0
for {
// '"' opening already consumed
// read character after quote
ch := s.next()
if ch == '\n' || ch < 0 || ch == eof {
s.err("literal not terminated")
return
}
if ch == '"' {
break
}
// If we're going into a ${} then we can ignore quotes for awhile
if braces == 0 && ch == '$' && s.peek() == '{' {
braces++
s.next()
} else if braces > 0 && ch == '{' {
braces++
}
if braces > 0 && ch == '}' {
braces--
}
if ch == '\\' {
s.scanEscape()
}
}
return
}
// scanEscape scans an escape sequence
func (s *Scanner) scanEscape() rune {
// http://en.cppreference.com/w/cpp/language/escape
ch := s.next() // read character after '/'
switch ch {
case 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\', '"':
// nothing to do
case '0', '1', '2', '3', '4', '5', '6', '7':
// octal notation
ch = s.scanDigits(ch, 8, 3)
case 'x':
// hexademical notation
ch = s.scanDigits(s.next(), 16, 2)
case 'u':
// universal character name
ch = s.scanDigits(s.next(), 16, 4)
case 'U':
// universal character name
ch = s.scanDigits(s.next(), 16, 8)
default:
s.err("illegal char escape")
}
return ch
}
// scanDigits scans a rune with the given base for n times. For example an
// octal notation \184 would yield in scanDigits(ch, 8, 3)
func (s *Scanner) scanDigits(ch rune, base, n int) rune {
for n > 0 && digitVal(ch) < base {
ch = s.next()
n--
}
if n > 0 {
s.err("illegal char escape")
}
// we scanned all digits, put the last non digit char back
s.unread()
return ch
}
// scanIdentifier scans an identifier and returns the literal string
func (s *Scanner) scanIdentifier() string {
offs := s.srcPos.Offset - s.lastCharLen
ch := s.next()
for isLetter(ch) || isDigit(ch) || ch == '-' {
ch = s.next()
}
if ch != eof {
s.unread() // we got identifier, put back latest char
}
return string(s.src[offs:s.srcPos.Offset])
}
// recentPosition returns the position of the character immediately after the
// character or token returned by the last call to Scan.
func (s *Scanner) recentPosition() (pos token.Pos) {
pos.Offset = s.srcPos.Offset - s.lastCharLen
switch {
case s.srcPos.Column > 0:
// common case: last character was not a '\n'
pos.Line = s.srcPos.Line
pos.Column = s.srcPos.Column
case s.lastLineLen > 0:
// last character was a '\n'
// (we cannot be at the beginning of the source
// since we have called next() at least once)
pos.Line = s.srcPos.Line - 1
pos.Column = s.lastLineLen
default:
// at the beginning of the source
pos.Line = 1
pos.Column = 1
}
return
}
// err prints the error of any scanning to s.Error function. If the function is
// not defined, by default it prints them to os.Stderr
func (s *Scanner) err(msg string) {
s.ErrorCount++
pos := s.recentPosition()
if s.Error != nil {
s.Error(pos, msg)
return
}
fmt.Fprintf(os.Stderr, "%s: %s\n", pos, msg)
}
// isHexadecimal returns true if the given rune is a letter
func isLetter(ch rune) bool {
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= 0x80 && unicode.IsLetter(ch)
}
// isHexadecimal returns true if the given rune is a decimal digit
func isDigit(ch rune) bool {
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
}
// isHexadecimal returns true if the given rune is a decimal number
func isDecimal(ch rune) bool {
return '0' <= ch && ch <= '9'
}
// isHexadecimal returns true if the given rune is an hexadecimal number
func isHexadecimal(ch rune) bool {
return '0' <= ch && ch <= '9' || 'a' <= ch && ch <= 'f' || 'A' <= ch && ch <= 'F'
}
// isWhitespace returns true if the rune is a space, tab, newline or carriage return
func isWhitespace(ch rune) bool {
return ch == ' ' || ch == '\t' || ch == '\n' || ch == '\r'
}
// digitVal returns the integer value of a given octal,decimal or hexadecimal rune
func digitVal(ch rune) int {
switch {
case '0' <= ch && ch <= '9':
return int(ch - '0')
case 'a' <= ch && ch <= 'f':
return int(ch - 'a' + 10)
case 'A' <= ch && ch <= 'F':
return int(ch - 'A' + 10)
}
return 16 // larger than any legal digit val
}

View file

@ -1,46 +0,0 @@
package token
import "fmt"
// Pos describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
type Pos struct {
Filename string // filename, if any
Offset int // offset, starting at 0
Line int // line number, starting at 1
Column int // column number, starting at 1 (character count)
}
// IsValid returns true if the position is valid.
func (p *Pos) IsValid() bool { return p.Line > 0 }
// String returns a string in one of several forms:
//
// file:line:column valid position with file name
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
func (p Pos) String() string {
s := p.Filename
if p.IsValid() {
if s != "" {
s += ":"
}
s += fmt.Sprintf("%d:%d", p.Line, p.Column)
}
if s == "" {
s = "-"
}
return s
}
// Before reports whether the position p is before u.
func (p Pos) Before(u Pos) bool {
return u.Offset > p.Offset || u.Line > p.Line
}
// After reports whether the position p is after u.
func (p Pos) After(u Pos) bool {
return u.Offset < p.Offset || u.Line < p.Line
}

View file

@ -1,118 +0,0 @@
package token
import (
"fmt"
"strconv"
hcltoken "github.com/hashicorp/hcl/hcl/token"
)
// Token defines a single HCL token which can be obtained via the Scanner
type Token struct {
Type Type
Pos Pos
Text string
}
// Type is the set of lexical tokens of the HCL (HashiCorp Configuration Language)
type Type int
const (
// Special tokens
ILLEGAL Type = iota
EOF
identifier_beg
literal_beg
NUMBER // 12345
FLOAT // 123.45
BOOL // true,false
STRING // "abc"
NULL // null
literal_end
identifier_end
operator_beg
LBRACK // [
LBRACE // {
COMMA // ,
PERIOD // .
COLON // :
RBRACK // ]
RBRACE // }
operator_end
)
var tokens = [...]string{
ILLEGAL: "ILLEGAL",
EOF: "EOF",
NUMBER: "NUMBER",
FLOAT: "FLOAT",
BOOL: "BOOL",
STRING: "STRING",
NULL: "NULL",
LBRACK: "LBRACK",
LBRACE: "LBRACE",
COMMA: "COMMA",
PERIOD: "PERIOD",
COLON: "COLON",
RBRACK: "RBRACK",
RBRACE: "RBRACE",
}
// String returns the string corresponding to the token tok.
func (t Type) String() string {
s := ""
if 0 <= t && t < Type(len(tokens)) {
s = tokens[t]
}
if s == "" {
s = "token(" + strconv.Itoa(int(t)) + ")"
}
return s
}
// IsIdentifier returns true for tokens corresponding to identifiers and basic
// type literals; it returns false otherwise.
func (t Type) IsIdentifier() bool { return identifier_beg < t && t < identifier_end }
// IsLiteral returns true for tokens corresponding to basic type literals; it
// returns false otherwise.
func (t Type) IsLiteral() bool { return literal_beg < t && t < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
func (t Type) IsOperator() bool { return operator_beg < t && t < operator_end }
// String returns the token's literal text. Note that this is only
// applicable for certain token types, such as token.IDENT,
// token.STRING, etc..
func (t Token) String() string {
return fmt.Sprintf("%s %s %s", t.Pos.String(), t.Type.String(), t.Text)
}
// HCLToken converts this token to an HCL token.
//
// The token type must be a literal type or this will panic.
func (t Token) HCLToken() hcltoken.Token {
switch t.Type {
case BOOL:
return hcltoken.Token{Type: hcltoken.BOOL, Text: t.Text}
case FLOAT:
return hcltoken.Token{Type: hcltoken.FLOAT, Text: t.Text}
case NULL:
return hcltoken.Token{Type: hcltoken.STRING, Text: ""}
case NUMBER:
return hcltoken.Token{Type: hcltoken.NUMBER, Text: t.Text}
case STRING:
return hcltoken.Token{Type: hcltoken.STRING, Text: t.Text, JSON: true}
default:
panic(fmt.Sprintf("unimplemented HCLToken for type: %s", t.Type))
}
}

View file

@ -1,38 +0,0 @@
package hcl
import (
"unicode"
"unicode/utf8"
)
type lexModeValue byte
const (
lexModeUnknown lexModeValue = iota
lexModeHcl
lexModeJson
)
// lexMode returns whether we're going to be parsing in JSON
// mode or HCL mode.
func lexMode(v []byte) lexModeValue {
var (
r rune
w int
offset int
)
for {
r, w = utf8.DecodeRune(v[offset:])
offset += w
if unicode.IsSpace(r) {
continue
}
if r == '{' {
return lexModeJson
}
break
}
return lexModeHcl
}

View file

@ -1,39 +0,0 @@
package hcl
import (
"fmt"
"github.com/hashicorp/hcl/hcl/ast"
hclParser "github.com/hashicorp/hcl/hcl/parser"
jsonParser "github.com/hashicorp/hcl/json/parser"
)
// ParseBytes accepts as input byte slice and returns ast tree.
//
// Input can be either JSON or HCL
func ParseBytes(in []byte) (*ast.File, error) {
return parse(in)
}
// ParseString accepts input as a string and returns ast tree.
func ParseString(input string) (*ast.File, error) {
return parse([]byte(input))
}
func parse(in []byte) (*ast.File, error) {
switch lexMode(in) {
case lexModeHcl:
return hclParser.Parse(in)
case lexModeJson:
return jsonParser.Parse(in)
}
return nil, fmt.Errorf("unknown config format")
}
// Parse parses the given input and returns the root object.
//
// The input format can be either HCL or JSON.
func Parse(input string) (*ast.File, error) {
return parse([]byte(input))
}

View file

@ -1,9 +0,0 @@
sudo: false
language: go
go: master
before_script:
- go vet
- go get github.com/client9/misspell/cmd/misspell
- misspell -error *
- go get github.com/soniakeys/vetc
- vetc

View file

@ -1,463 +0,0 @@
// Copyright 2017 Sonia Keys
// License MIT: http://opensource.org/licenses/MIT
// Bits implements methods on a bit array type.
//
// The Bits type holds a fixed size array of bits, numbered consecutively
// from zero. Some set-like operations are possible, but the API is more
// array-like or register-like.
package bits
import (
"fmt"
mb "math/bits"
)
// Bits holds a fixed number of bits.
//
// Bit number 0 is stored in the LSB, or bit 0, of the word indexed at 0.
//
// When Num is not a multiple of 64, the last element of Bits will hold some
// bits beyond Num. These bits are undefined. They are not required to be
// zero but do not have any meaning. Bits methods are not required to leave
// them undisturbed.
type Bits struct {
Num int // number of bits
Bits []uint64
}
// New constructs a Bits value with the given number of bits.
//
// It panics if num is negative.
func New(num int) Bits {
if num < 0 {
panic("negative number of bits")
}
return Bits{num, make([]uint64, (num+63)>>6)}
}
// NewGivens constructs a Bits value with the given bits nums set to 1.
//
// The number of bits will be just enough to hold the largest bit value
// listed. That is, the number of bits will be the max bit number plus one.
//
// It panics if any bit number is negative.
func NewGivens(nums ...int) Bits {
max := -1
for _, p := range nums {
if p > max {
max = p
}
}
b := New(max + 1)
for _, p := range nums {
b.SetBit(p, 1)
}
return b
}
// AllOnes returns true if all Num bits are 1.
func (b Bits) AllOnes() bool {
last := len(b.Bits) - 1
for _, w := range b.Bits[:last] {
if w != ^uint64(0) {
return false
}
}
return ^b.Bits[last]<<uint(64*len(b.Bits)-b.Num) == 0
}
// AllZeros returns true if all Num bits are 0.
func (b Bits) AllZeros() bool {
last := len(b.Bits) - 1
for _, w := range b.Bits[:last] {
if w != 0 {
return false
}
}
return b.Bits[last]<<uint(64*len(b.Bits)-b.Num) == 0
}
// And sets z = x & y.
//
// It panics if x and y do not have the same Num.
func (z *Bits) And(x, y Bits) {
if x.Num != y.Num {
panic("arguments have different number of bits")
}
if z.Num != x.Num {
*z = New(x.Num)
}
for i, w := range y.Bits {
z.Bits[i] = x.Bits[i] & w
}
}
// AndNot sets z = x &^ y.
//
// It panics if x and y do not have the same Num.
func (z *Bits) AndNot(x, y Bits) {
if x.Num != y.Num {
panic("arguments have different number of bits")
}
if z.Num != x.Num {
*z = New(x.Num)
}
for i, w := range y.Bits {
z.Bits[i] = x.Bits[i] &^ w
}
}
// Bit returns the value of the n'th bit of receiver b.
func (b Bits) Bit(n int) int {
if n < 0 || n >= b.Num {
panic("bit number out of range")
}
return int(b.Bits[n>>6] >> uint(n&63) & 1)
}
// ClearAll sets all bits to 0.
func (b Bits) ClearAll() {
for i := range b.Bits {
b.Bits[i] = 0
}
}
// ClearBits sets the given bits to 0 in receiver b.
//
// Other bits of b are left unchanged.
//
// It panics if any bit number is out of range.
// That is, negative or >= the number of bits.
func (b Bits) ClearBits(nums ...int) {
for _, p := range nums {
b.SetBit(p, 0)
}
}
// Equal returns true if all Num bits of a and b are equal.
//
// It panics if a and b have different Num.
func (a Bits) Equal(b Bits) bool {
if a.Num != b.Num {
panic("receiver and argument have different number of bits")
}
if a.Num == 0 {
return true
}
last := len(a.Bits) - 1
for i, w := range a.Bits[:last] {
if w != b.Bits[i] {
return false
}
}
return (a.Bits[last]^b.Bits[last])<<uint(len(a.Bits)*64-a.Num) == 0
}
// IterateOnes calls visitor function v for each bit with a value of 1, in order
// from lowest bit to highest bit.
//
// Iteration continues to the highest bit as long as v returns true.
// It stops if v returns false.
//
// IterateOnes returns true normally. It returns false if v returns false.
//
// IterateOnes may not be sensitive to changes if bits are changed during
// iteration, by the vistor function for example.
// See OneFrom for an iteration method sensitive to changes during iteration.
func (b Bits) IterateOnes(v func(int) bool) bool {
for x, w := range b.Bits {
if w != 0 {
t := mb.TrailingZeros64(w)
i := t // index in w of next 1 bit
for {
n := x<<6 | i
if n >= b.Num {
return true
}
if !v(x<<6 | i) {
return false
}
w >>= uint(t + 1)
if w == 0 {
break
}
t = mb.TrailingZeros64(w)
i += 1 + t
}
}
}
return true
}
// IterateZeros calls visitor function v for each bit with a value of 0,
// in order from lowest bit to highest bit.
//
// Iteration continues to the highest bit as long as v returns true.
// It stops if v returns false.
//
// IterateZeros returns true normally. It returns false if v returns false.
//
// IterateZeros may not be sensitive to changes if bits are changed during
// iteration, by the vistor function for example.
// See ZeroFrom for an iteration method sensitive to changes during iteration.
func (b Bits) IterateZeros(v func(int) bool) bool {
for x, w := range b.Bits {
w = ^w
if w != 0 {
t := mb.TrailingZeros64(w)
i := t // index in w of next 1 bit
for {
n := x<<6 | i
if n >= b.Num {
return true
}
if !v(x<<6 | i) {
return false
}
w >>= uint(t + 1)
if w == 0 {
break
}
t = mb.TrailingZeros64(w)
i += 1 + t
}
}
}
return true
}
// Not sets receiver z to the complement of b.
func (z *Bits) Not(b Bits) {
if z.Num != b.Num {
*z = New(b.Num)
}
for i, w := range b.Bits {
z.Bits[i] = ^w
}
}
// OneFrom returns the number of the first 1 bit at or after (from) bit num.
//
// It returns -1 if there is no one bit at or after num.
//
// This provides one way to iterate over one bits.
// To iterate over the one bits, call OneFrom with n = 0 to get the the first
// one bit, then call with the result + 1 to get successive one bits.
// Unlike the Iterate method, this technique is stateless and so allows
// bits to be changed between successive calls.
//
// There is no panic for calling OneFrom with an argument >= b.Num.
// In this case OneFrom simply returns -1.
//
// See also Iterate.
func (b Bits) OneFrom(num int) int {
if num >= b.Num {
return -1
}
x := num >> 6
// test for 1 in this word at or after n
if wx := b.Bits[x] >> uint(num&63); wx != 0 {
num += mb.TrailingZeros64(wx)
if num >= b.Num {
return -1
}
return num
}
x++
for y, wy := range b.Bits[x:] {
if wy != 0 {
num = (x+y)<<6 | mb.TrailingZeros64(wy)
if num >= b.Num {
return -1
}
return num
}
}
return -1
}
// Or sets z = x | y.
//
// It panics if x and y do not have the same Num.
func (z *Bits) Or(x, y Bits) {
if x.Num != y.Num {
panic("arguments have different number of bits")
}
if z.Num != x.Num {
*z = New(x.Num)
}
for i, w := range y.Bits {
z.Bits[i] = x.Bits[i] | w
}
}
// OnesCount returns the number of 1 bits.
func (b Bits) OnesCount() (c int) {
if b.Num == 0 {
return 0
}
last := len(b.Bits) - 1
for _, w := range b.Bits[:last] {
c += mb.OnesCount64(w)
}
c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num))
return
}
// Set sets the bits of z to the bits of x.
func (z *Bits) Set(b Bits) {
if z.Num != b.Num {
*z = New(b.Num)
}
copy(z.Bits, b.Bits)
}
// SetAll sets z to have all 1 bits.
func (b Bits) SetAll() {
for i := range b.Bits {
b.Bits[i] = ^uint64(0)
}
}
// SetBit sets the n'th bit to x, where x is a 0 or 1.
//
// It panics if n is out of range
func (b Bits) SetBit(n, x int) {
if n < 0 || n >= b.Num {
panic("bit number out of range")
}
if x == 0 {
b.Bits[n>>6] &^= 1 << uint(n&63)
} else {
b.Bits[n>>6] |= 1 << uint(n&63)
}
}
// SetBits sets the given bits to 1 in receiver b.
//
// Other bits of b are left unchanged.
//
// It panics if any bit number is out of range, negative or >= the number
// of bits.
func (b Bits) SetBits(nums ...int) {
for _, p := range nums {
b.SetBit(p, 1)
}
}
// Single returns true if b has exactly one 1 bit.
func (b Bits) Single() bool {
// like OnesCount, but stop as soon as two are found
if b.Num == 0 {
return false
}
c := 0
last := len(b.Bits) - 1
for _, w := range b.Bits[:last] {
c += mb.OnesCount64(w)
if c > 1 {
return false
}
}
c += mb.OnesCount64(b.Bits[last] << uint(len(b.Bits)*64-b.Num))
return c == 1
}
// Slice returns a slice with the bit numbers of each 1 bit.
func (b Bits) Slice() (s []int) {
for x, w := range b.Bits {
if w == 0 {
continue
}
t := mb.TrailingZeros64(w)
i := t // index in w of next 1 bit
for {
n := x<<6 | i
if n >= b.Num {
break
}
s = append(s, n)
w >>= uint(t + 1)
if w == 0 {
break
}
t = mb.TrailingZeros64(w)
i += 1 + t
}
}
return
}
// String returns a readable representation.
//
// The returned string is big-endian, with the highest number bit first.
//
// If Num is 0, an empty string is returned.
func (b Bits) String() (s string) {
if b.Num == 0 {
return ""
}
last := len(b.Bits) - 1
for _, w := range b.Bits[:last] {
s = fmt.Sprintf("%064b", w) + s
}
lb := b.Num - 64*last
return fmt.Sprintf("%0*b", lb,
b.Bits[last]&(^uint64(0)>>uint(64-lb))) + s
}
// Xor sets z = x ^ y.
func (z *Bits) Xor(x, y Bits) {
if x.Num != y.Num {
panic("arguments have different number of bits")
}
if z.Num != x.Num {
*z = New(x.Num)
}
for i, w := range y.Bits {
z.Bits[i] = x.Bits[i] ^ w
}
}
// ZeroFrom returns the number of the first 0 bit at or after (from) bit num.
//
// It returns -1 if there is no zero bit at or after num.
//
// This provides one way to iterate over zero bits.
// To iterate over the zero bits, call ZeroFrom with n = 0 to get the the first
// zero bit, then call with the result + 1 to get successive zero bits.
// Unlike the IterateZeros method, this technique is stateless and so allows
// bits to be changed between successive calls.
//
// There is no panic for calling ZeroFrom with an argument >= b.Num.
// In this case ZeroFrom simply returns -1.
//
// See also IterateZeros.
func (b Bits) ZeroFrom(num int) int {
// code much like OneFrom except words are negated before testing
if num >= b.Num {
return -1
}
x := num >> 6
// negate word to test for 0 at or after n
if wx := ^b.Bits[x] >> uint(num&63); wx != 0 {
num += mb.TrailingZeros64(wx)
if num >= b.Num {
return -1
}
return num
}
x++
for y, wy := range b.Bits[x:] {
wy = ^wy
if wy != 0 {
num = (x+y)<<6 | mb.TrailingZeros64(wy)
if num >= b.Num {
return -1
}
return num
}
}
return -1
}

View file

@ -1 +0,0 @@
module "github.com/soniakeys/bits"

View file

@ -1,38 +0,0 @@
= Bits
Bits provides methods on a bit array type.
The Bits type holds a fixed size array of bits, numbered consecutively
from zero. Some set-like operations are possible, but the API is more
array-like or register-like.
image:https://godoc.org/github.com/soniakeys/bits?status.svg[link=https://godoc.org/github.com/soniakeys/bits] image:https://travis-ci.org/soniakeys/bits.svg[link=https://travis-ci.org/soniakeys/bits]
== Motivation and history
This package evolved from needs of my library of
https://github.com/soniakeys/graph[graph algorithms]. For graph algorithms
a common need is to store a single bit of information per node in a way that
is both fast and memory efficient. I began by using `big.Int` from the standard
library, then wrapped big.Int in a type. From time to time I considered
other publicly available bit array or bit set packages, such as Will
Fitzgerald's popular https://github.com/willf/bitset[bitset], but there were
always little reasons I preferred my own type and methods. My type that
wrapped `big.Int` met my needs until some simple benchmarks indicated it
might be causing performance problems. Some further experiments supported
this hypothesis so I ran further tests with a prototype bit array written
from scratch. Then satisfied that my custom bit array was solving the graph
performance problems, I decided to move it to a separate package with the
idea it might have more general utility. For the initial version of this
package I did the following:
- implemented a few tests to demonstrate fundamental correctness
- brought over most methods of my type that wrapped big.Int
- changed the index type from the graph-specific node index to a general `int`
- replaced some custom bit-twiddling with use of the new `math/bits` package
in the standard library
- renamed a few methods for clarity
- added a few methods for symmetry
- added a few new methods I had seen a need for in my graph library
- added doc, examples, tests, and more tests for 100% coverage
- added this readme

View file

@ -1,2 +0,0 @@
*.dot
anecdote/anecdote

View file

@ -1,11 +0,0 @@
sudo: false
language: go
go:
- "1.9.x"
- master
before_script:
- go tool vet -composites=false -printf=false -shift=false .
- go get github.com/client9/misspell/cmd/misspell
- go get github.com/soniakeys/vetc
- misspell -error * */* */*/*
- vetc

View file

@ -1,406 +0,0 @@
// Copyright 2014 Sonia Keys
// License MIT: https://opensource.org/licenses/MIT
package graph
// adj.go contains methods on AdjacencyList and LabeledAdjacencyList.
//
// AdjacencyList methods are placed first and are alphabetized.
// LabeledAdjacencyList methods follow, also alphabetized.
// Only exported methods need be alphabetized; non-exported methods can
// be left near their use.
import (
"sort"
"github.com/soniakeys/bits"
)
// NI is a "node int"
//
// It is a node number or node ID. NIs are used extensively as slice indexes.
// NIs typically account for a significant fraction of the memory footprint of
// a graph.
type NI int32
// AnyParallel identifies if a graph contains parallel arcs, multiple arcs
// that lead from a node to the same node.
//
// If the graph has parallel arcs, the results fr and to represent an example
// where there are parallel arcs from node `fr` to node `to`.
//
// If there are no parallel arcs, the method returns false -1 -1.
//
// Multiple loops on a node count as parallel arcs.
//
// See also alt.AnyParallelMap, which can perform better for some large
// or dense graphs.
func (g AdjacencyList) AnyParallel() (has bool, fr, to NI) {
var t []NI
for n, to := range g {
if len(to) == 0 {
continue
}
// different code in the labeled version, so no code gen.
t = append(t[:0], to...)
sort.Slice(t, func(i, j int) bool { return t[i] < t[j] })
t0 := t[0]
for _, to := range t[1:] {
if to == t0 {
return true, NI(n), t0
}
t0 = to
}
}
return false, -1, -1
}
// Complement returns the arc-complement of a simple graph.
//
// The result will have an arc for every pair of distinct nodes where there
// is not an arc in g. The complement is valid for both directed and
// undirected graphs. If g is undirected, the complement will be undirected.
// The result will always be a simple graph, having no loops or parallel arcs.
func (g AdjacencyList) Complement() AdjacencyList {
c := make(AdjacencyList, len(g))
b := bits.New(len(g))
for n, to := range g {
b.ClearAll()
for _, to := range to {
b.SetBit(int(to), 1)
}
b.SetBit(n, 1)
ct := make([]NI, len(g)-b.OnesCount())
i := 0
b.IterateZeros(func(to int) bool {
ct[i] = NI(to)
i++
return true
})
c[n] = ct
}
return c
}
// IsUndirected returns true if g represents an undirected graph.
//
// Returns true when all non-loop arcs are paired in reciprocal pairs.
// Otherwise returns false and an example unpaired arc.
func (g AdjacencyList) IsUndirected() (u bool, from, to NI) {
// similar code in dot/writeUndirected
unpaired := make(AdjacencyList, len(g))
for fr, to := range g {
arc: // for each arc in g
for _, to := range to {
if to == NI(fr) {
continue // loop
}
// search unpaired arcs
ut := unpaired[to]
for i, u := range ut {
if u == NI(fr) { // found reciprocal
last := len(ut) - 1
ut[i] = ut[last]
unpaired[to] = ut[:last]
continue arc
}
}
// reciprocal not found
unpaired[fr] = append(unpaired[fr], to)
}
}
for fr, to := range unpaired {
if len(to) > 0 {
return false, NI(fr), to[0]
}
}
return true, -1, -1
}
// SortArcLists sorts the arc lists of each node of receiver g.
//
// Nodes are not relabeled and the graph remains equivalent.
func (g AdjacencyList) SortArcLists() {
for _, to := range g {
sort.Slice(to, func(i, j int) bool { return to[i] < to[j] })
}
}
// ------- Labeled methods below -------
// ArcsAsEdges constructs an edge list with an edge for each arc, including
// reciprocals.
//
// This is a simple way to construct an edge list for algorithms that allow
// the duplication represented by the reciprocal arcs. (e.g. Kruskal)
//
// See also LabeledUndirected.Edges for the edge list without this duplication.
func (g LabeledAdjacencyList) ArcsAsEdges() (el []LabeledEdge) {
for fr, to := range g {
for _, to := range to {
el = append(el, LabeledEdge{Edge{NI(fr), to.To}, to.Label})
}
}
return
}
// DistanceMatrix constructs a distance matrix corresponding to the arcs
// of graph g and weight function w.
//
// An arc from f to t with WeightFunc return w is represented by d[f][t] == w.
// In case of parallel arcs, the lowest weight is stored. The distance from
// any node to itself d[n][n] is 0, unless the node has a loop with a negative
// weight. If g has no arc from f to distinct t, +Inf is stored for d[f][t].
//
// The returned DistanceMatrix is suitable for DistanceMatrix.FloydWarshall.
func (g LabeledAdjacencyList) DistanceMatrix(w WeightFunc) (d DistanceMatrix) {
d = newDM(len(g))
for fr, to := range g {
for _, to := range to {
// < to pick min of parallel arcs (also nicely ignores NaN)
if wt := w(to.Label); wt < d[fr][to.To] {
d[fr][to.To] = wt
}
}
}
return
}
// HasArcLabel returns true if g has any arc from node `fr` to node `to`
// with label `l`.
//
// Also returned is the index within the slice of arcs from node `fr`.
// If no arc from `fr` to `to` with label `l` is present, HasArcLabel returns
// false, -1.
func (g LabeledAdjacencyList) HasArcLabel(fr, to NI, l LI) (bool, int) {
t := Half{to, l}
for x, h := range g[fr] {
if h == t {
return true, x
}
}
return false, -1
}
// AnyParallel identifies if a graph contains parallel arcs, multiple arcs
// that lead from a node to the same node.
//
// If the graph has parallel arcs, the results fr and to represent an example
// where there are parallel arcs from node `fr` to node `to`.
//
// If there are no parallel arcs, the method returns -1 -1.
//
// Multiple loops on a node count as parallel arcs.
//
// See also alt.AnyParallelMap, which can perform better for some large
// or dense graphs.
func (g LabeledAdjacencyList) AnyParallel() (has bool, fr, to NI) {
var t []NI
for n, to := range g {
if len(to) == 0 {
continue
}
// slightly different code needed here compared to AdjacencyList
t = t[:0]
for _, to := range to {
t = append(t, to.To)
}
sort.Slice(t, func(i, j int) bool { return t[i] < t[j] })
t0 := t[0]
for _, to := range t[1:] {
if to == t0 {
return true, NI(n), t0
}
t0 = to
}
}
return false, -1, -1
}
// AnyParallelLabel identifies if a graph contains parallel arcs with the same
// label.
//
// If the graph has parallel arcs with the same label, the results fr and to
// represent an example where there are parallel arcs from node `fr`
// to node `to`.
//
// If there are no parallel arcs, the method returns false -1 Half{}.
//
// Multiple loops on a node count as parallel arcs.
func (g LabeledAdjacencyList) AnyParallelLabel() (has bool, fr NI, to Half) {
var t []Half
for n, to := range g {
if len(to) == 0 {
continue
}
// slightly different code needed here compared to AdjacencyList
t = t[:0]
for _, to := range to {
t = append(t, to)
}
sort.Slice(t, func(i, j int) bool {
return t[i].To < t[j].To ||
t[i].To == t[j].To && t[i].Label < t[j].Label
})
t0 := t[0]
for _, to := range t[1:] {
if to == t0 {
return true, NI(n), t0
}
t0 = to
}
}
return false, -1, Half{}
}
// IsUndirected returns true if g represents an undirected graph.
//
// Returns true when all non-loop arcs are paired in reciprocal pairs with
// matching labels. Otherwise returns false and an example unpaired arc.
//
// Note the requirement that reciprocal pairs have matching labels is
// an additional test not present in the otherwise equivalent unlabeled version
// of IsUndirected.
func (g LabeledAdjacencyList) IsUndirected() (u bool, from NI, to Half) {
// similar code in LabeledAdjacencyList.Edges
unpaired := make(LabeledAdjacencyList, len(g))
for fr, to := range g {
arc: // for each arc in g
for _, to := range to {
if to.To == NI(fr) {
continue // loop
}
// search unpaired arcs
ut := unpaired[to.To]
for i, u := range ut {
if u.To == NI(fr) && u.Label == to.Label { // found reciprocal
last := len(ut) - 1
ut[i] = ut[last]
unpaired[to.To] = ut[:last]
continue arc
}
}
// reciprocal not found
unpaired[fr] = append(unpaired[fr], to)
}
}
for fr, to := range unpaired {
if len(to) > 0 {
return false, NI(fr), to[0]
}
}
return true, -1, to
}
// ArcLabels constructs the multiset of LIs present in g.
func (g LabeledAdjacencyList) ArcLabels() map[LI]int {
s := map[LI]int{}
for _, to := range g {
for _, to := range to {
s[to.Label]++
}
}
return s
}
// NegativeArc returns true if the receiver graph contains a negative arc.
func (g LabeledAdjacencyList) NegativeArc(w WeightFunc) bool {
for _, nbs := range g {
for _, nb := range nbs {
if w(nb.Label) < 0 {
return true
}
}
}
return false
}
// ParallelArcsLabel identifies all arcs from node `fr` to node `to` with label `l`.
//
// The returned slice contains an element for each arc from node `fr` to node `to`
// with label `l`. The element value is the index within the slice of arcs from node
// `fr`.
//
// See also the method HasArcLabel, which stops after finding a single arc.
func (g LabeledAdjacencyList) ParallelArcsLabel(fr, to NI, l LI) (p []int) {
t := Half{to, l}
for x, h := range g[fr] {
if h == t {
p = append(p, x)
}
}
return
}
// Unlabeled constructs the unlabeled graph corresponding to g.
func (g LabeledAdjacencyList) Unlabeled() AdjacencyList {
a := make(AdjacencyList, len(g))
for n, nbs := range g {
to := make([]NI, len(nbs))
for i, nb := range nbs {
to[i] = nb.To
}
a[n] = to
}
return a
}
// WeightedArcsAsEdges constructs a WeightedEdgeList object from the receiver.
//
// Internally it calls g.ArcsAsEdges() to obtain the Edges member.
// See LabeledAdjacencyList.ArcsAsEdges().
func (g LabeledAdjacencyList) WeightedArcsAsEdges(w WeightFunc) *WeightedEdgeList {
return &WeightedEdgeList{
Order: g.Order(),
WeightFunc: w,
Edges: g.ArcsAsEdges(),
}
}
// WeightedInDegree computes the weighted in-degree of each node in g
// for a given weight function w.
//
// The weighted in-degree of a node is the sum of weights of arcs going to
// the node.
//
// A weighted degree of a node is often termed the "strength" of a node.
//
// See note for undirected graphs at LabeledAdjacencyList.WeightedOutDegree.
func (g LabeledAdjacencyList) WeightedInDegree(w WeightFunc) []float64 {
ind := make([]float64, len(g))
for _, to := range g {
for _, to := range to {
ind[to.To] += w(to.Label)
}
}
return ind
}
// WeightedOutDegree computes the weighted out-degree of the specified node
// for a given weight function w.
//
// The weighted out-degree of a node is the sum of weights of arcs going from
// the node.
//
// A weighted degree of a node is often termed the "strength" of a node.
//
// Note for undirected graphs, the WeightedOutDegree result for a node will
// equal the WeightedInDegree for the node. You can use WeightedInDegree if
// you have need for the weighted degrees of all nodes or use WeightedOutDegree
// to compute the weighted degrees of individual nodes. In either case loops
// are counted just once, unlike the (unweighted) UndirectedDegree methods.
func (g LabeledAdjacencyList) WeightedOutDegree(n NI, w WeightFunc) (d float64) {
for _, to := range g[n] {
d += w(to.Label)
}
return
}
// More about loops and strength: I didn't see consensus on this especially
// in the case of undirected graphs. Some sources said to add in-degree and
// out-degree, which would seemingly double both loops and non-loops.
// Some said to double loops. Some said sum the edge weights and had no
// comment on loops. R of course makes everything an option. The meaning
// of "strength" where loops exist is unclear. So while I could write an
// UndirectedWeighted degree function that doubles loops but not edges,
// I'm going to just leave this for now.

View file

@ -1,417 +0,0 @@
// Copyright 2014 Sonia Keys
// License MIT: http://opensource.org/licenses/MIT
package graph
// adj_RO.go is code generated from adj_cg.go by directives in graph.go.
// Editing adj_cg.go is okay.
// DO NOT EDIT adj_RO.go. The RO is for Read Only.
import (
"errors"
"fmt"
"math/rand"
"github.com/soniakeys/bits"
)
// ArcDensity returns density for an simple directed graph.
//
// See also ArcDensity function.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) ArcDensity() float64 {
return ArcDensity(len(g), g.ArcSize())
}
// ArcSize returns the number of arcs in g.
//
// Note that for an undirected graph without loops, the number of undirected
// edges -- the traditional meaning of graph size -- will be ArcSize()/2.
// On the other hand, if g is an undirected graph that has or may have loops,
// g.ArcSize()/2 is not a meaningful quantity.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) ArcSize() int {
m := 0
for _, to := range g {
m += len(to)
}
return m
}
// BoundsOk validates that all arcs in g stay within the slice bounds of g.
//
// BoundsOk returns true when no arcs point outside the bounds of g.
// Otherwise it returns false and an example arc that points outside of g.
//
// Most methods of this package assume the BoundsOk condition and may
// panic when they encounter an arc pointing outside of the graph. This
// function can be used to validate a graph when the BoundsOk condition
// is unknown.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) BoundsOk() (ok bool, fr NI, to NI) {
for fr, to := range g {
for _, to := range to {
if to < 0 || to >= NI(len(g)) {
return false, NI(fr), to
}
}
}
return true, -1, to
}
// BreadthFirst traverses a directed or undirected graph in breadth
// first order.
//
// Traversal starts at node start and visits the nodes reachable from
// start. The function visit is called for each node visited. Nodes
// not reachable from start are not visited.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also alt.BreadthFirst, a variant with more options, and
// alt.BreadthFirst2, a direction optimizing variant.
func (g AdjacencyList) BreadthFirst(start NI, visit func(NI)) {
v := bits.New(len(g))
v.SetBit(int(start), 1)
visit(start)
var next []NI
for frontier := []NI{start}; len(frontier) > 0; {
for _, n := range frontier {
for _, nb := range g[n] {
if v.Bit(int(nb)) == 0 {
v.SetBit(int(nb), 1)
visit(nb)
next = append(next, nb)
}
}
}
frontier, next = next, frontier[:0]
}
}
// Copy makes a deep copy of g.
// Copy also computes the arc size ma, the number of arcs.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) Copy() (c AdjacencyList, ma int) {
c = make(AdjacencyList, len(g))
for n, to := range g {
c[n] = append([]NI{}, to...)
ma += len(to)
}
return
}
// DepthFirst traverses a directed or undirected graph in depth
// first order.
//
// Traversal starts at node start and visits the nodes reachable from
// start. The function visit is called for each node visited. Nodes
// not reachable from start are not visited.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also alt.DepthFirst, a variant with more options.
func (g AdjacencyList) DepthFirst(start NI, visit func(NI)) {
v := bits.New(len(g))
var f func(NI)
f = func(n NI) {
visit(n)
v.SetBit(int(n), 1)
for _, to := range g[n] {
if v.Bit(int(to)) == 0 {
f(to)
}
}
}
f(start)
}
// HasArc returns true if g has any arc from node `fr` to node `to`.
//
// Also returned is the index within the slice of arcs from node `fr`.
// If no arc from `fr` to `to` is present, HasArc returns false, -1.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also the method ParallelArcs, which finds all parallel arcs from
// `fr` to `to`.
func (g AdjacencyList) HasArc(fr, to NI) (bool, int) {
for x, h := range g[fr] {
if h == to {
return true, x
}
}
return false, -1
}
// AnyLoop identifies if a graph contains a loop, an arc that leads from a
// a node back to the same node.
//
// If g contains a loop, the method returns true and an example of a node
// with a loop. If there are no loops in g, the method returns false, -1.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) AnyLoop() (bool, NI) {
for fr, to := range g {
for _, to := range to {
if NI(fr) == to {
return true, to
}
}
}
return false, -1
}
// AddNode maps a node in a supergraph to a subgraph node.
//
// Argument p must be an NI in supergraph s.Super. AddNode panics if
// p is not a valid node index of s.Super.
//
// AddNode is idempotent in that it does not add a new node to the subgraph if
// a subgraph node already exists mapped to supergraph node p.
//
// The mapped subgraph NI is returned.
func (s *Subgraph) AddNode(p NI) (b NI) {
if int(p) < 0 || int(p) >= s.Super.Order() {
panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph"))
}
if b, ok := s.SubNI[p]; ok {
return b
}
a := s.AdjacencyList
b = NI(len(a))
s.AdjacencyList = append(a, nil)
s.SuperNI = append(s.SuperNI, p)
s.SubNI[p] = b
return
}
// AddArc adds an arc to a subgraph.
//
// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode,
// AddArc panics if fr and to are not valid node indexes of s.Super.
//
// The arc specfied by fr, to must exist in s.Super. Further, the number of
// parallel arcs in the subgraph cannot exceed the number of corresponding
// parallel arcs in the supergraph. That is, each arc already added to the
// subgraph counts against the arcs available in the supergraph. If a matching
// arc is not available, AddArc returns an error.
//
// If a matching arc is available, subgraph nodes are added as needed, the
// subgraph arc is added, and the method returns nil.
func (s *Subgraph) AddArc(fr NI, to NI) error {
// verify supergraph NIs first, but without adding subgraph nodes just yet.
if int(fr) < 0 || int(fr) >= s.Super.Order() {
panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph"))
}
if int(to) < 0 || int(to) >= s.Super.Order() {
panic(fmt.Sprint("AddArc: NI ", to, " not in supergraph"))
}
// count existing matching arcs in subgraph
n := 0
a := s.AdjacencyList
if bf, ok := s.SubNI[fr]; ok {
if bt, ok := s.SubNI[to]; ok {
// both NIs already exist in subgraph, need to count arcs
bTo := to
bTo = bt
for _, t := range a[bf] {
if t == bTo {
n++
}
}
}
}
// verify matching arcs are available in supergraph
for _, t := range (*s.Super)[fr] {
if t == to {
if n > 0 {
n-- // match existing arc
continue
}
// no more existing arcs need to be matched. nodes can finally
// be added as needed and then the arc can be added.
bf := s.AddNode(fr)
to = s.AddNode(to)
s.AdjacencyList[bf] = append(s.AdjacencyList[bf], to)
return nil // success
}
}
return errors.New("arc not available in supergraph")
}
func (super AdjacencyList) induceArcs(sub map[NI]NI, sup []NI) AdjacencyList {
s := make(AdjacencyList, len(sup))
for b, p := range sup {
var a []NI
for _, to := range super[p] {
if bt, ok := sub[to]; ok {
to = bt
a = append(a, to)
}
}
s[b] = a
}
return s
}
// InduceList constructs a node-induced subgraph.
//
// The subgraph is induced on receiver graph g. Argument l must be a list of
// NIs in receiver graph g. Receiver g becomes the supergraph of the induced
// subgraph.
//
// Duplicate NIs are allowed in list l. The duplicates are effectively removed
// and only a single corresponding node is created in the subgraph. Subgraph
// NIs are mapped in the order of list l, execpt for ignoring duplicates.
// NIs in l that are not in g will panic.
//
// Returned is the constructed Subgraph object containing the induced subgraph
// and the mappings to the supergraph.
func (g *AdjacencyList) InduceList(l []NI) *Subgraph {
sub, sup := mapList(l)
return &Subgraph{
Super: g,
SubNI: sub,
SuperNI: sup,
AdjacencyList: g.induceArcs(sub, sup)}
}
// InduceBits constructs a node-induced subgraph.
//
// The subgraph is induced on receiver graph g. Argument t must be a bitmap
// representing NIs in receiver graph g. Receiver g becomes the supergraph
// of the induced subgraph. NIs in t that are not in g will panic.
//
// Returned is the constructed Subgraph object containing the induced subgraph
// and the mappings to the supergraph.
func (g *AdjacencyList) InduceBits(t bits.Bits) *Subgraph {
sub, sup := mapBits(t)
return &Subgraph{
Super: g,
SubNI: sub,
SuperNI: sup,
AdjacencyList: g.induceArcs(sub, sup)}
}
// IsSimple checks for loops and parallel arcs.
//
// A graph is "simple" if it has no loops or parallel arcs.
//
// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is
// found, simple returns false and a node that represents a counterexample
// to the graph being simple.
//
// See also separate methods AnyLoop and AnyParallel.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) IsSimple() (ok bool, n NI) {
if lp, n := g.AnyLoop(); lp {
return false, n
}
if pa, n, _ := g.AnyParallel(); pa {
return false, n
}
return true, -1
}
// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g.
//
// An isolated node is one with no arcs going to or from it.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) IsolatedNodes() (i bits.Bits) {
i = bits.New(len(g))
i.SetAll()
for fr, to := range g {
if len(to) > 0 {
i.SetBit(fr, 0)
for _, to := range to {
i.SetBit(int(to), 0)
}
}
}
return
}
// Order is the number of nodes in receiver g.
//
// It is simply a wrapper method for the Go builtin len().
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) Order() int {
// Why a wrapper for len()? Mostly for Directed and Undirected.
// u.Order() is a little nicer than len(u.LabeledAdjacencyList).
return len(g)
}
// ParallelArcs identifies all arcs from node `fr` to node `to`.
//
// The returned slice contains an element for each arc from node `fr` to node `to`.
// The element value is the index within the slice of arcs from node `fr`.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also the method HasArc, which stops after finding a single arc.
func (g AdjacencyList) ParallelArcs(fr, to NI) (p []int) {
for x, h := range g[fr] {
if h == to {
p = append(p, x)
}
}
return
}
// Permute permutes the node labeling of receiver g.
//
// Argument p must be a permutation of the node numbers of the graph,
// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for
// example is acceptable.
//
// The graph is permuted in place. The graph keeps the same underlying
// memory but values of the graph representation are permuted to produce
// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on.
// See example (or the code) for clarification.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) Permute(p []int) {
old := append(AdjacencyList{}, g...) // shallow copy
for fr, arcs := range old {
for i, to := range arcs {
arcs[i] = NI(p[to])
}
g[p[fr]] = arcs
}
}
// ShuffleArcLists shuffles the arc lists of each node of receiver g.
//
// For example a node with arcs leading to nodes 3 and 7 might have an
// arc list of either [3 7] or [7 3] after calling this method. The
// connectivity of the graph is not changed. The resulting graph stays
// equivalent but a traversal will encounter arcs in a different
// order.
//
// If Rand r is nil, the rand package default shared source is used.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g AdjacencyList) ShuffleArcLists(r *rand.Rand) {
ri := rand.Intn
if r != nil {
ri = r.Intn
}
// Knuth-Fisher-Yates
for _, to := range g {
for i := len(to); i > 1; {
j := ri(i)
i--
to[i], to[j] = to[j], to[i]
}
}
}

View file

@ -1,417 +0,0 @@
// Copyright 2014 Sonia Keys
// License MIT: http://opensource.org/licenses/MIT
package graph
// adj_RO.go is code generated from adj_cg.go by directives in graph.go.
// Editing adj_cg.go is okay.
// DO NOT EDIT adj_RO.go. The RO is for Read Only.
import (
"errors"
"fmt"
"math/rand"
"github.com/soniakeys/bits"
)
// ArcDensity returns density for an simple directed graph.
//
// See also ArcDensity function.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) ArcDensity() float64 {
return ArcDensity(len(g), g.ArcSize())
}
// ArcSize returns the number of arcs in g.
//
// Note that for an undirected graph without loops, the number of undirected
// edges -- the traditional meaning of graph size -- will be ArcSize()/2.
// On the other hand, if g is an undirected graph that has or may have loops,
// g.ArcSize()/2 is not a meaningful quantity.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) ArcSize() int {
m := 0
for _, to := range g {
m += len(to)
}
return m
}
// BoundsOk validates that all arcs in g stay within the slice bounds of g.
//
// BoundsOk returns true when no arcs point outside the bounds of g.
// Otherwise it returns false and an example arc that points outside of g.
//
// Most methods of this package assume the BoundsOk condition and may
// panic when they encounter an arc pointing outside of the graph. This
// function can be used to validate a graph when the BoundsOk condition
// is unknown.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) BoundsOk() (ok bool, fr NI, to Half) {
for fr, to := range g {
for _, to := range to {
if to.To < 0 || to.To >= NI(len(g)) {
return false, NI(fr), to
}
}
}
return true, -1, to
}
// BreadthFirst traverses a directed or undirected graph in breadth
// first order.
//
// Traversal starts at node start and visits the nodes reachable from
// start. The function visit is called for each node visited. Nodes
// not reachable from start are not visited.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also alt.BreadthFirst, a variant with more options, and
// alt.BreadthFirst2, a direction optimizing variant.
func (g LabeledAdjacencyList) BreadthFirst(start NI, visit func(NI)) {
v := bits.New(len(g))
v.SetBit(int(start), 1)
visit(start)
var next []NI
for frontier := []NI{start}; len(frontier) > 0; {
for _, n := range frontier {
for _, nb := range g[n] {
if v.Bit(int(nb.To)) == 0 {
v.SetBit(int(nb.To), 1)
visit(nb.To)
next = append(next, nb.To)
}
}
}
frontier, next = next, frontier[:0]
}
}
// Copy makes a deep copy of g.
// Copy also computes the arc size ma, the number of arcs.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) Copy() (c LabeledAdjacencyList, ma int) {
c = make(LabeledAdjacencyList, len(g))
for n, to := range g {
c[n] = append([]Half{}, to...)
ma += len(to)
}
return
}
// DepthFirst traverses a directed or undirected graph in depth
// first order.
//
// Traversal starts at node start and visits the nodes reachable from
// start. The function visit is called for each node visited. Nodes
// not reachable from start are not visited.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also alt.DepthFirst, a variant with more options.
func (g LabeledAdjacencyList) DepthFirst(start NI, visit func(NI)) {
v := bits.New(len(g))
var f func(NI)
f = func(n NI) {
visit(n)
v.SetBit(int(n), 1)
for _, to := range g[n] {
if v.Bit(int(to.To)) == 0 {
f(to.To)
}
}
}
f(start)
}
// HasArc returns true if g has any arc from node `fr` to node `to`.
//
// Also returned is the index within the slice of arcs from node `fr`.
// If no arc from `fr` to `to` is present, HasArc returns false, -1.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also the method ParallelArcs, which finds all parallel arcs from
// `fr` to `to`.
func (g LabeledAdjacencyList) HasArc(fr, to NI) (bool, int) {
for x, h := range g[fr] {
if h.To == to {
return true, x
}
}
return false, -1
}
// AnyLoop identifies if a graph contains a loop, an arc that leads from a
// a node back to the same node.
//
// If g contains a loop, the method returns true and an example of a node
// with a loop. If there are no loops in g, the method returns false, -1.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) AnyLoop() (bool, NI) {
for fr, to := range g {
for _, to := range to {
if NI(fr) == to.To {
return true, to.To
}
}
}
return false, -1
}
// AddNode maps a node in a supergraph to a subgraph node.
//
// Argument p must be an NI in supergraph s.Super. AddNode panics if
// p is not a valid node index of s.Super.
//
// AddNode is idempotent in that it does not add a new node to the subgraph if
// a subgraph node already exists mapped to supergraph node p.
//
// The mapped subgraph NI is returned.
func (s *LabeledSubgraph) AddNode(p NI) (b NI) {
if int(p) < 0 || int(p) >= s.Super.Order() {
panic(fmt.Sprint("AddNode: NI ", p, " not in supergraph"))
}
if b, ok := s.SubNI[p]; ok {
return b
}
a := s.LabeledAdjacencyList
b = NI(len(a))
s.LabeledAdjacencyList = append(a, nil)
s.SuperNI = append(s.SuperNI, p)
s.SubNI[p] = b
return
}
// AddArc adds an arc to a subgraph.
//
// Arguments fr, to must be NIs in supergraph s.Super. As with AddNode,
// AddArc panics if fr and to are not valid node indexes of s.Super.
//
// The arc specfied by fr, to must exist in s.Super. Further, the number of
// parallel arcs in the subgraph cannot exceed the number of corresponding
// parallel arcs in the supergraph. That is, each arc already added to the
// subgraph counts against the arcs available in the supergraph. If a matching
// arc is not available, AddArc returns an error.
//
// If a matching arc is available, subgraph nodes are added as needed, the
// subgraph arc is added, and the method returns nil.
func (s *LabeledSubgraph) AddArc(fr NI, to Half) error {
// verify supergraph NIs first, but without adding subgraph nodes just yet.
if int(fr) < 0 || int(fr) >= s.Super.Order() {
panic(fmt.Sprint("AddArc: NI ", fr, " not in supergraph"))
}
if int(to.To) < 0 || int(to.To) >= s.Super.Order() {
panic(fmt.Sprint("AddArc: NI ", to.To, " not in supergraph"))
}
// count existing matching arcs in subgraph
n := 0
a := s.LabeledAdjacencyList
if bf, ok := s.SubNI[fr]; ok {
if bt, ok := s.SubNI[to.To]; ok {
// both NIs already exist in subgraph, need to count arcs
bTo := to
bTo.To = bt
for _, t := range a[bf] {
if t == bTo {
n++
}
}
}
}
// verify matching arcs are available in supergraph
for _, t := range (*s.Super)[fr] {
if t == to {
if n > 0 {
n-- // match existing arc
continue
}
// no more existing arcs need to be matched. nodes can finally
// be added as needed and then the arc can be added.
bf := s.AddNode(fr)
to.To = s.AddNode(to.To)
s.LabeledAdjacencyList[bf] = append(s.LabeledAdjacencyList[bf], to)
return nil // success
}
}
return errors.New("arc not available in supergraph")
}
func (super LabeledAdjacencyList) induceArcs(sub map[NI]NI, sup []NI) LabeledAdjacencyList {
s := make(LabeledAdjacencyList, len(sup))
for b, p := range sup {
var a []Half
for _, to := range super[p] {
if bt, ok := sub[to.To]; ok {
to.To = bt
a = append(a, to)
}
}
s[b] = a
}
return s
}
// InduceList constructs a node-induced subgraph.
//
// The subgraph is induced on receiver graph g. Argument l must be a list of
// NIs in receiver graph g. Receiver g becomes the supergraph of the induced
// subgraph.
//
// Duplicate NIs are allowed in list l. The duplicates are effectively removed
// and only a single corresponding node is created in the subgraph. Subgraph
// NIs are mapped in the order of list l, execpt for ignoring duplicates.
// NIs in l that are not in g will panic.
//
// Returned is the constructed Subgraph object containing the induced subgraph
// and the mappings to the supergraph.
func (g *LabeledAdjacencyList) InduceList(l []NI) *LabeledSubgraph {
sub, sup := mapList(l)
return &LabeledSubgraph{
Super: g,
SubNI: sub,
SuperNI: sup,
LabeledAdjacencyList: g.induceArcs(sub, sup)}
}
// InduceBits constructs a node-induced subgraph.
//
// The subgraph is induced on receiver graph g. Argument t must be a bitmap
// representing NIs in receiver graph g. Receiver g becomes the supergraph
// of the induced subgraph. NIs in t that are not in g will panic.
//
// Returned is the constructed Subgraph object containing the induced subgraph
// and the mappings to the supergraph.
func (g *LabeledAdjacencyList) InduceBits(t bits.Bits) *LabeledSubgraph {
sub, sup := mapBits(t)
return &LabeledSubgraph{
Super: g,
SubNI: sub,
SuperNI: sup,
LabeledAdjacencyList: g.induceArcs(sub, sup)}
}
// IsSimple checks for loops and parallel arcs.
//
// A graph is "simple" if it has no loops or parallel arcs.
//
// IsSimple returns true, -1 for simple graphs. If a loop or parallel arc is
// found, simple returns false and a node that represents a counterexample
// to the graph being simple.
//
// See also separate methods AnyLoop and AnyParallel.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) IsSimple() (ok bool, n NI) {
if lp, n := g.AnyLoop(); lp {
return false, n
}
if pa, n, _ := g.AnyParallel(); pa {
return false, n
}
return true, -1
}
// IsolatedNodes returns a bitmap of isolated nodes in receiver graph g.
//
// An isolated node is one with no arcs going to or from it.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) IsolatedNodes() (i bits.Bits) {
i = bits.New(len(g))
i.SetAll()
for fr, to := range g {
if len(to) > 0 {
i.SetBit(fr, 0)
for _, to := range to {
i.SetBit(int(to.To), 0)
}
}
}
return
}
// Order is the number of nodes in receiver g.
//
// It is simply a wrapper method for the Go builtin len().
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) Order() int {
// Why a wrapper for len()? Mostly for Directed and Undirected.
// u.Order() is a little nicer than len(u.LabeledAdjacencyList).
return len(g)
}
// ParallelArcs identifies all arcs from node `fr` to node `to`.
//
// The returned slice contains an element for each arc from node `fr` to node `to`.
// The element value is the index within the slice of arcs from node `fr`.
//
// There are equivalent labeled and unlabeled versions of this method.
//
// See also the method HasArc, which stops after finding a single arc.
func (g LabeledAdjacencyList) ParallelArcs(fr, to NI) (p []int) {
for x, h := range g[fr] {
if h.To == to {
p = append(p, x)
}
}
return
}
// Permute permutes the node labeling of receiver g.
//
// Argument p must be a permutation of the node numbers of the graph,
// 0 through len(g)-1. A permutation returned by rand.Perm(len(g)) for
// example is acceptable.
//
// The graph is permuted in place. The graph keeps the same underlying
// memory but values of the graph representation are permuted to produce
// an isomorphic graph. The node previously labeled 0 becomes p[0] and so on.
// See example (or the code) for clarification.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) Permute(p []int) {
old := append(LabeledAdjacencyList{}, g...) // shallow copy
for fr, arcs := range old {
for i, to := range arcs {
arcs[i].To = NI(p[to.To])
}
g[p[fr]] = arcs
}
}
// ShuffleArcLists shuffles the arc lists of each node of receiver g.
//
// For example a node with arcs leading to nodes 3 and 7 might have an
// arc list of either [3 7] or [7 3] after calling this method. The
// connectivity of the graph is not changed. The resulting graph stays
// equivalent but a traversal will encounter arcs in a different
// order.
//
// If Rand r is nil, the rand package default shared source is used.
//
// There are equivalent labeled and unlabeled versions of this method.
func (g LabeledAdjacencyList) ShuffleArcLists(r *rand.Rand) {
ri := rand.Intn
if r != nil {
ri = r.Intn
}
// Knuth-Fisher-Yates
for _, to := range g {
for i := len(to); i > 1; {
j := ri(i)
i--
to[i], to[j] = to[j], to[i]
}
}
}

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -1,122 +0,0 @@
// Copyright 2014 Sonia Keys
// License MIT: http://opensource.org/licenses/MIT
// Graph algorithms: Dijkstra, A*, Bellman Ford, Floyd Warshall;
// Kruskal and Prim minimal spanning tree; topological sort and DAG longest
// and shortest paths; Eulerian cycle and path; degeneracy and k-cores;
// Bron Kerbosch clique finding; connected components; dominance; and others.
//
// This is a graph library of integer indexes. To use it with application
// data, you associate data with integer indexes, perform searches or other
// operations with the library, and then use the integer index results to refer
// back to your application data.
//
// Thus it does not store application data, pointers to application data,
// or require you to implement an interface on your application data.
// The idea is to keep the library methods fast and lean.
//
// Representation overview
//
// The package defines a type for a node index (NI) which is just an integer
// type. It defines types for a number of number graph representations using
// NI. The fundamental graph type is AdjacencyList, which is the
// common "list of lists" graph representation. It is a list as a slice
// with one element for each node of the graph. Each element is a list
// itself, a list of neighbor nodes, implemented as an NI slice. Methods
// on an AdjacencyList generally work on any representable graph, including
// directed or undirected graphs, simple graphs or multigraphs.
//
// The type Undirected embeds an AdjacencyList adding methods specific to
// undirected graphs. Similarly the type Directed adds methods meaningful
// for directed graphs.
//
// Similar to NI, the type LI is a "label index" which labels a
// node-to-neighbor "arc" or edge. Just as an NI can index arbitrary node
// data, an LI can index arbitrary arc or edge data. A number of algorithms
// use a "weight" associated with an arc. This package does not represent
// weighted arcs explicitly, but instead uses the LI as a more general
// mechanism allowing not only weights but arbitrary data to be associated
// with arcs. While AdjacencyList represents an arc with simply an NI,
// the type LabeledAdjacencyList uses a type that pairs an NI with an LI.
// This type is named Half, for half-arc. (A full arc would represent
// both ends.) Types LabeledDirected and LabeledUndirected embed a
// LabeledAdjacencyList.
//
// In contrast to Half, the type Edge represents both ends of an edge (but
// no label.) The type LabeledEdge adds the label. The type WeightedEdgeList
// bundles a list of LabeledEdges with a WeightFunc. (WeightedEdgeList has
// few methods. It exists primarily to support the Kruskal algorithm.)
//
// FromList is a compact rooted tree (or forest) respresentation. Like
// AdjacencyList and LabeledAdjacencyList, it is a list with one element for
// each node of the graph. Each element contains only a single neighbor
// however, its parent in the tree, the "from" node.
//
// Code generation
//
// A number of methods on AdjacencyList, Directed, and Undirected are
// applicable to LabeledAdjacencyList, LabeledDirected, and LabeledUndirected
// simply by ignoring the label. In these cases code generation provides
// methods on both types from a single source implementation. These methods
// are documented with the sentence "There are equivalent labeled and unlabeled
// versions of this method."
//
// Terminology
//
// This package uses the term "node" rather than "vertex." It uses "arc"
// to mean a directed edge, and uses "from" and "to" to refer to the ends
// of an arc. It uses "start" and "end" to refer to endpoints of a search
// or traversal.
//
// The usage of "to" and "from" is perhaps most strange. In common speech
// they are prepositions, but throughout this package they are used as
// adjectives, for example to refer to the "from node" of an arc or the
// "to node". The type "FromList" is named to indicate it stores a list of
// "from" values.
//
// A "half arc" refers to just one end of an arc, either the to or from end.
//
// Two arcs are "reciprocal" if they connect two distinct nodes n1 and n2,
// one arc leading from n1 to n2 and the other arc leading from n2 to n1.
// Undirected graphs are represented with reciprocal arcs.
//
// A node with an arc to itself represents a "loop." Duplicate arcs, where
// a node has multiple arcs to another node, are termed "parallel arcs."
// A graph with no loops or parallel arcs is "simple." A graph that allows
// parallel arcs is a "multigraph"
//
// The "size" of a graph traditionally means the number of undirected edges.
// This package uses "arc size" to mean the number of arcs in a graph. For an
// undirected graph without loops, arc size is 2 * size.
//
// The "order" of a graph is the number of nodes. An "ordering" though means
// an ordered list of nodes.
//
// A number of graph search algorithms use a concept of arc "weights."
// The sum of arc weights along a path is a "distance." In contrast, the
// number of nodes in a path, including start and end nodes, is the path's
// "length." (Yes, mixing weights and lengths would be nonsense physically,
// but the terms used here are just distinct terms for abstract values.
// The actual meaning to an application is likely to be something else
// entirely and is not relevant within this package.)
//
// Finally, this package documentation takes back the word "object" in some
// places to refer to a Go value, especially a value of a type with methods.
//
// Shortest path searches
//
// This package implements a number of shortest path searches. Most work
// with weighted graphs that are directed or undirected, and with graphs
// that may have loops or parallel arcs. For weighted graphs, "shortest"
// is defined as the path distance (sum of arc weights) with path length
// (number of nodes) breaking ties. If multiple paths have the same minimum
// distance with the same minimum length, search methods are free to return
// any of them.
//
// Algorithm Description
// Dijkstra Non-negative arc weights, single or all paths.
// AStar Non-negative arc weights, heuristic guided, single path.
// BellmanFord Negative arc weights allowed, no negative cycles, all paths.
// DAGPath O(n) algorithm for DAGs, arc weights of any sign.
// FloydWarshall all pairs distances, no negative cycles.
package graph

View file

@ -1,498 +0,0 @@
// Copyright 2014 Sonia Keys
// License MIT: http://opensource.org/licenses/MIT
package graph
import "github.com/soniakeys/bits"
// FromList represents a rooted tree (or forest) where each node is associated
// with a half arc identifying an arc "from" another node.
//
// Other terms for this data structure include "parent list",
// "predecessor list", "in-tree", "inverse arborescence", and
// "spaghetti stack."
//
// The Paths member represents the tree structure. Leaves and MaxLen are
// not always needed. Where Leaves is used it serves as a bitmap where
// Leaves.Bit(n) == 1 for each leaf n of the tree. Where MaxLen is used it is
// provided primarily as a convenience for functions that might want to
// anticipate the maximum path length that would be encountered traversing
// the tree.
//
// Various graph search methods use a FromList to returns search results.
// For a start node of a search, From will be -1 and Len will be 1. For other
// nodes reached by the search, From represents a half arc in a path back to
// start and Len represents the number of nodes in the path. For nodes not
// reached by the search, From will be -1 and Len will be 0.
//
// A single FromList can also represent a forest. In this case paths from
// all leaves do not return to a single root node, but multiple root nodes.
//
// While a FromList generally encodes a tree or forest, it is technically
// possible to encode a cyclic graph. A number of FromList methods require
// the receiver to be acyclic. Graph methods documented to return a tree or
// forest will never return a cyclic FromList. In other cases however,
// where a FromList is not known to by cyclic, the Cyclic method can be
// useful to validate the acyclic property.
type FromList struct {
Paths []PathEnd // tree representation
Leaves bits.Bits // leaves of tree
MaxLen int // length of longest path, max of all PathEnd.Len values
}
// PathEnd associates a half arc and a path length.
//
// A PathEnd list is an element type of FromList.
type PathEnd struct {
From NI // a "from" half arc, the node the arc comes from
Len int // number of nodes in path from start
}
/* NewFromList could be confusing now with bits also needing allocation.
maybe best to not have this function. Maybe a more useful new would be
one that took a PathEnd slice and intitialized everything including roots
and max len. Maybe its time for a separate []PathEnd type when that's
all that's needed. (and reconsider the name PathEnd)
*/
// NewFromList creates a FromList object of given order.
//
// The Paths member is allocated to the specified order n but other members
// are left as zero values.
func NewFromList(n int) FromList {
return FromList{Paths: make([]PathEnd, n)}
}
// BoundsOk validates the "from" values in the list.
//
// Negative values are allowed as they indicate root nodes.
//
// BoundsOk returns true when all from values are less than len(t).
// Otherwise it returns false and a node with a from value >= len(t).
func (f FromList) BoundsOk() (ok bool, n NI) {
for n, e := range f.Paths {
if int(e.From) >= len(f.Paths) {
return false, NI(n)
}
}
return true, -1
}
// CommonStart returns the common start node of minimal paths to a and b.
//
// It returns -1 if a and b cannot be traced back to a common node.
//
// The method relies on populated PathEnd.Len members. Use RecalcLen if
// the Len members are not known to be present and correct.
func (f FromList) CommonStart(a, b NI) NI {
p := f.Paths
if p[a].Len < p[b].Len {
a, b = b, a
}
for bl := p[b].Len; p[a].Len > bl; {
a = p[a].From
if a < 0 {
return -1
}
}
for a != b {
a = p[a].From
if a < 0 {
return -1
}
b = p[b].From
}
return a
}
// Cyclic determines if f contains a cycle, a non-empty path from a node
// back to itself.
//
// Cyclic returns true if g contains at least one cycle. It also returns
// an example of a node involved in a cycle.
//
// Cyclic returns (false, -1) in the normal case where f is acyclic.
// Note that the bool is not an "ok" return. A cyclic FromList is usually
// not okay.
func (f FromList) Cyclic() (cyclic bool, n NI) {
p := f.Paths
vis := bits.New(len(p))
for i := range p {
path := bits.New(len(p))
for n := i; vis.Bit(n) == 0; {
vis.SetBit(n, 1)
path.SetBit(n, 1)
if n = int(p[n].From); n < 0 {
break
}
if path.Bit(n) == 1 {
return true, NI(n)
}
}
}
return false, -1
}
// IsolatedNodeBits returns a bitmap of isolated nodes in receiver graph f.
//
// An isolated node is one with no arcs going to or from it.
func (f FromList) IsolatedNodes() (iso bits.Bits) {
p := f.Paths
iso = bits.New(len(p))
iso.SetAll()
for n, e := range p {
if e.From >= 0 {
iso.SetBit(n, 0)
iso.SetBit(int(e.From), 0)
}
}
return
}
// PathTo decodes a FromList, recovering a single path.
//
// The path is returned as a list of nodes where the first element will be
// a root node and the last element will be the specified end node.
//
// Only the Paths member of the receiver is used. Other members of the
// FromList do not need to be valid, however the MaxLen member can be useful
// for allocating argument p.
//
// Argument p can provide the result slice. If p has capacity for the result
// it will be used, otherwise a new slice is created for the result.
//
// See also function PathTo.
func (f FromList) PathTo(end NI, p []NI) []NI {
return PathTo(f.Paths, end, p)
}
// PathTo decodes a single path from a PathEnd list.
//
// A PathEnd list is the main data representation in a FromList. See FromList.
//
// PathTo returns a list of nodes where the first element will be
// a root node and the last element will be the specified end node.
//
// Argument p can provide the result slice. If p has capacity for the result
// it will be used, otherwise a new slice is created for the result.
//
// See also method FromList.PathTo.
func PathTo(paths []PathEnd, end NI, p []NI) []NI {
n := paths[end].Len
if n == 0 {
return p[:0]
}
if cap(p) >= n {
p = p[:n]
} else {
p = make([]NI, n)
}
for {
n--
p[n] = end
if n == 0 {
return p
}
end = paths[end].From
}
}
// PathToLabeled decodes a FromList, recovering a single path.
//
// The start of the returned path will be a root node of the FromList.
//
// Only the Paths member of the receiver is used. Other members of the
// FromList do not need to be valid, however the MaxLen member can be useful
// for allocating argument p.
//
// Argument p can provide the result slice. If p has capacity for the result
// it will be used, otherwise a new slice is created for the result.
//
// See also function PathTo.
func (f FromList) PathToLabeled(end NI, labels []LI, p []Half) LabeledPath {
n := f.Paths[end].Len - 1
if n <= 0 {
return LabeledPath{end, p[:0]}
}
if cap(p) >= n {
p = p[:n]
} else {
p = make([]Half, n)
}
for {
n--
p[n] = Half{To: end, Label: labels[end]}
end = f.Paths[end].From
if n == 0 {
return LabeledPath{end, p}
}
}
}
// Preorder traverses a FromList in preorder.
//
// Nodes are visited in order such that for any node n with from node fr,
// fr is visited before n. Where f represents a tree, the visit ordering
// corresponds to a preordering, or depth first traversal of the tree.
// Where f represents a forest, the preorderings of the trees can be
// intermingled.
//
// Leaves must be set correctly first. Use RecalcLeaves if leaves are not
// known to be set correctly. FromList f cannot be cyclic.
//
// Traversal continues while visitor function v returns true. It terminates
// if v returns false. Preorder returns true if it completes without v
// returning false. Preorder returns false if traversal is terminated by v
// returning false.
func (f FromList) Preorder(v func(NI) bool) bool {
p := f.Paths
done := bits.New(len(p))
var df func(NI) bool
df = func(n NI) bool {
done.SetBit(int(n), 1)
if fr := p[n].From; fr >= 0 && done.Bit(int(fr)) == 0 {
df(fr)
}
return v(n)
}
for n := range f.Paths {
p[n].Len = 0
}
return f.Leaves.IterateOnes(func(n int) bool {
return df(NI(n))
})
}
// RecalcLeaves recomputes the Leaves member of f.
func (f *FromList) RecalcLeaves() {
p := f.Paths
lv := &f.Leaves
if lv.Num != len(p) {
*lv = bits.New(len(p))
}
lv.SetAll()
for n := range f.Paths {
if fr := p[n].From; fr >= 0 {
lv.SetBit(int(fr), 0)
}
}
}
// RecalcLen recomputes Len for each path end, and recomputes MaxLen.
//
// RecalcLen relies on the Leaves member being valid. If it is not known
// to be valid, call RecalcLeaves before calling RecalcLen.
//
// RecalcLen will panic if the FromList is cyclic. Use the Cyclic method
// if needed to verify that the FromList is acyclic.
func (f *FromList) RecalcLen() {
p := f.Paths
var setLen func(NI) int
setLen = func(n NI) int {
switch {
case p[n].Len > 0:
return p[n].Len
case p[n].From < 0:
p[n].Len = 1
return 1
}
l := 1 + setLen(p[n].From)
p[n].Len = l
return l
}
for n := range f.Paths {
p[n].Len = 0
}
f.MaxLen = 0
f.Leaves.IterateOnes(func(n int) bool {
if l := setLen(NI(n)); l > f.MaxLen {
f.MaxLen = l
}
return true
})
}
// ReRoot reorients the tree containing n to make n the root node.
//
// It keeps the tree connected by "reversing" the path from n to the old root.
//
// After ReRoot, the Leaves and Len members are invalid.
// Call RecalcLeaves or RecalcLen as needed.
func (f *FromList) ReRoot(n NI) {
p := f.Paths
fr := p[n].From
if fr < 0 {
return
}
p[n].From = -1
for {
ff := p[fr].From
p[fr].From = n
if ff < 0 {
return
}
n = fr
fr = ff
}
}
// Root finds the root of a node in a FromList.
func (f FromList) Root(n NI) NI {
for p := f.Paths; ; {
fr := p[n].From
if fr < 0 {
return n
}
n = fr
}
}
// Transpose constructs the directed graph corresponding to FromList f
// but with arcs in the opposite direction. That is, from roots toward leaves.
//
// If non-nil argrument roots is passed, Transpose populates it as roots of
// the resulting forest and returns nRoots as a count of the roots.
//
// The method relies only on the From member of f.Paths. Other members of
// the FromList are not used.
func (f FromList) Transpose(roots *bits.Bits) (forest Directed, nRoots int) {
p := f.Paths
g := make(AdjacencyList, len(p))
if roots != nil {
nRoots = len(p)
if roots.Num != nRoots {
*roots = bits.New(nRoots)
}
roots.SetAll()
}
for i, e := range p {
if e.From == -1 {
continue
}
g[e.From] = append(g[e.From], NI(i))
if roots != nil && roots.Bit(i) == 1 {
roots.SetBit(i, 0)
nRoots--
}
}
return Directed{g}, nRoots
}
// TransposeLabeled constructs the labeled directed graph corresponding
// to FromList f but with arcs in the opposite direction. That is, from
// roots toward leaves.
//
// The argument labels can be nil. In this case labels are generated matching
// the path indexes. This corresponds to the "to", or child node.
//
// If labels is non-nil, it must be the same length as t.Paths and is used
// to look up label numbers by the path index.
//
// If non-nil argrument roots is passed, Transpose populates it as roots of
// the resulting forest and returns nRoots as a count of the roots.
//
// The method relies only on the From member of f.Paths. Other members of
// the FromList are not used.
func (f FromList) TransposeLabeled(labels []LI, roots *bits.Bits) (forest LabeledDirected, nRoots int) {
p := f.Paths
g := make(LabeledAdjacencyList, len(p))
if roots != nil {
nRoots = len(p)
if roots.Num != nRoots {
*roots = bits.New(nRoots)
}
roots.SetAll()
}
for i, p := range f.Paths {
if p.From == -1 {
continue
}
l := LI(i)
if labels != nil {
l = labels[i]
}
g[p.From] = append(g[p.From], Half{NI(i), l})
if roots != nil && roots.Bit(i) == 1 {
roots.SetBit(i, 0)
nRoots--
}
}
return LabeledDirected{g}, nRoots
}
// Undirected constructs the undirected graph corresponding to FromList f.
//
// The resulting graph will be a tree or forest.
//
// If non-nil argrument roots is passed, Transpose populates it as roots of
// the resulting forest and returns nRoots as a count of the roots.
//
// The method relies only on the From member of f.Paths. Other members of
// the FromList are not used.
func (f FromList) Undirected(roots *bits.Bits) (forest Undirected, nRoots int) {
p := f.Paths
g := make(AdjacencyList, len(p))
if roots != nil {
nRoots = len(p)
if roots.Num != nRoots {
*roots = bits.New(nRoots)
}
roots.SetAll()
}
for i, e := range p {
if e.From == -1 {
continue
}
g[i] = append(g[i], e.From)
g[e.From] = append(g[e.From], NI(i))
if roots != nil && roots.Bit(i) == 1 {
roots.SetBit(i, 0)
nRoots--
}
}
return Undirected{g}, nRoots
}
// LabeledUndirected constructs the labeled undirected graph corresponding
// to FromList f.
//
// The resulting graph will be a tree or forest.
//
// The argument labels can be nil. In this case labels are generated matching
// the path indexes. This corresponds to the "to", or child node.
//
// If labels is non-nil, it must be the same length as t.Paths and is used
// to look up label numbers by the path index.
//
// If non-nil argrument roots is passed, LabeledUndirected populates it as
// roots of the resulting forest and returns nRoots as a count of the roots.
//
// The method relies only on the From member of f.Paths. Other members of
// the FromList are not used.
func (f FromList) LabeledUndirected(labels []LI, roots *bits.Bits) (forest LabeledUndirected, nRoots int) {
p := f.Paths
g := make(LabeledAdjacencyList, len(p))
if roots != nil {
nRoots = len(p)
if roots.Num != nRoots {
*roots = bits.New(nRoots)
}
roots.SetAll()
}
for i, p := range f.Paths {
if p.From == -1 {
continue
}
l := LI(i)
if labels != nil {
l = labels[i]
}
g[i] = append(g[i], Half{p.From, l})
g[p.From] = append(g[p.From], Half{NI(i), l})
if roots != nil && roots.Bit(i) == 1 {
roots.SetBit(i, 0)
nRoots--
}
}
return LabeledUndirected{g}, nRoots
}

View file

@ -1,3 +0,0 @@
module "github.com/soniakeys/graph"
require "github.com/soniakeys/bits" v1.0.0

Some files were not shown because too many files have changed in this diff Show more