diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..f5dc1d1
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,53 @@
+sudo: required
+
+language: go
+go:
+ - 1.5
+
+services:
+ - docker
+
+before_install:
+ - sudo apt-get update
+ - sudo apt-get install -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" -y --force-yes -q docker-engine
+ - docker build -t aacebedo/cfdnsupdaterbuild environments/build
+ - sudo mkdir -m 777 -p /build/vivid/amd64 /build/vivid/arm
+ - sudo mkdir -m 777 -p /build/wily/amd64 /build/wily/arm
+ - sudo mkdir -m 777 -p /build/xenial/amd64 /build/xenial/arm
+
+script:
+ - docker run -t -v /build/vivid/amd64:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a amd64 -o /build
+ - docker run -t -v /build/vivid/arm:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a arm -o /build
+ - docker run -t -v /build/wily/amd64:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a amd64 -o /build
+ - docker run -t -v /build/wily/arm:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a arm -o /build
+ - docker run -t -v /build/xenial/amd64:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a amd64 -o /build
+ - docker run -t -v /build/xenial/arm:/build aacebedo/cfdnsupdaterbuild build -p aacebedo/cfdnsupdater -bn cfdnsupdater -b $TRAVIS_BRANCH -a arm -o /build
+ - docker run -t -v /build:/build aacebedo/cfdnsupdaterbuild deploydesc -bn cfdnsupdater -b $TRAVIS_BRANCH -dc "Cloudflare DNS updater" -u aacebedo -li LGPL-3.0 -o /build
+
+after_success:
+ - sudo chmod -R a+rwX /build
+ - mv /build/xenial/amd64/cfdnsupdater.tar /build/cfdnsupdater.amd64.$TRAVIS_BRANCH.tar
+ - mv /build/xenial/arm/cfdnsupdater.tar /build/cfdnsupdater.arm.$TRAVIS_BRANCH.tar
+
+deploy:
+ - provider: bintray
+ skip_cleanup: true
+ file: /build/bintray.desc
+ user: $BINTRAY_USERNAME
+ key: $BINTRAY_APIKEY
+ dry-run: false
+ on:
+ tags: true
+ - provider: releases
+ skip_cleanup: true
+ api-key:
+ secure: $GITHUB_TOKEN
+ file:
+ - /build/cfdnsupdater.amd64.$TRAVIS_BRANCH.tar
+ - /build/cfdnsupdater.arm.$TRAVIS_BRANCH.tar
+ on:
+ tags: true
+ - provider: script
+ script: docker login -u $BINTRAY_USERNAME -p $BINTRAY_APIKEY -e $BINTRAY_EMAIL aacebedo-docker-cfdsnupdater.bintray.io && docker build --build-arg VERSION=$TRAVIS_BRANCH -f environments/run/Dockerfile.amd64 -t aacebedo/cfdnsupdater-amd64 . && docker tag aacebedo/cfdnsupdater-amd64 aacebedo-docker-cfdsnupdater.bintray.io/cfdnsupdater-amd64:$TRAVIS_BRANCH && docker push aacebedo-docker-cfdsnupdater.bintray.io/cfdnsupdater-amd64:$TRAVIS_BRANCH
+ on:
+ tags: true
diff --git a/environments/build/Dockerfile b/environments/build/Dockerfile
index ca55f1b..eda21ad 100644
--- a/environments/build/Dockerfile
+++ b/environments/build/Dockerfile
@@ -14,18 +14,20 @@
# along with CFDNSUpdater. If not, see .
from alpine
-RUN apk add --update go git python3 bash
-
-ENV GOPATH /go
-ENV CGO_ENABLED=0
-
-ADD ./build.py /usr/bin
+RUN apk update
+RUN apk upgrade
+RUN apk add go git python3 bash ruby-dev gcc make ruby ruby-bundler \
+ g++ libstdc++ libffi-dev ruby-rdoc tar
+RUN apk add ruby-irb ruby-rdoc
+RUN gem install fpm
+
+COPY ./build.py /usr/bin/
RUN chmod u+x /usr/bin/build.py
-WORKDIR /out
-VOLUME ["/out"]
+WORKDIR /build
+VOLUME ["/build"]
-ENTRYPOINT ["build.py", "-p", "aacebedo/cfdnsupdater", "-n", "cfdnsupdater"]
+ENTRYPOINT ["build.py"]
diff --git a/environments/build/build.py b/environments/build/build.py
old mode 100644
new mode 100755
index 4d5b73d..6990987
--- a/environments/build/build.py
+++ b/environments/build/build.py
@@ -1,5 +1,5 @@
#!/usr/bin/env python3
-#This file is part of CFDNSUpdater.
+# This file is part of CFDNSUpdater.
#
# CFDNSUpdater is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
@@ -7,69 +7,207 @@
# (at your option) any later version.
#
# CFDNSUpdater is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# but WITHbuild ANY WARRANTY; withbuild even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with CFDNSUpdater. If not, see .
+import json
import argparse
import subprocess
import sys
import os
-import tarfile
+import uuid
+import time
+import shutil
from subprocess import Popen, PIPE
+from errno import ENAMETOOLONG
def parseArguments(raw_args):
parser = argparse.ArgumentParser(prog="build",
description='Project Builder')
- parser.add_argument('-project','-p',required=True,help="Github project", type=str)
- parser.add_argument('-binname','-n',required=True,help="Binary name project", type=str)
-
- parser.add_argument('-arch','-a',required=True, help='Architecture to build', type=str)
-
- parser.add_argument('--branch', '-b', help='Git branch to build', default="master", type=str)
+ rootSubparsers = parser.add_subparsers(dest="function")
+ buildParser = rootSubparsers.add_parser('build', help='Build packages')
+ buildParser.add_argument('-project', '-p', required=True,
+ help="Github project", type=str)
+ buildParser.add_argument('-arch', '-a', required=True,
+ help='Architecture to build', type=str)
+ buildParser.add_argument('--branch', '-b', help='Git branch to build',
+ default="master", type=str)
+ buildParser.add_argument('-binname', '-bn', required=True,
+ help='binname', type=str)
+ buildParser.add_argument('--outputdirpath', '-o', help='Output directory',
+ required=True, type=str)
+ deployDescParser = rootSubparsers.add_parser('deploydesc',
+ help='Create deployement \
+ descriptor')
+ deployDescParser.add_argument('--branch', '-b', help='Git branch to build',
+ required=True, type=str)
+ deployDescParser.add_argument('-binname', '-bn', required=True,
+ help='binname', type=str)
+ deployDescParser.add_argument('-user', '-u', required=True,
+ help='User', type=str)
+ deployDescParser.add_argument('-description', '-dc', required=True,
+ help='Package description', type=str)
+ deployDescParser.add_argument('--outputdirpath', '-o',
+ help='Output directory',
+ required=True, type=str)
+ deployDescParser.add_argument('--licenses', '-li', help='Software licences',
+ default=[], type=str, action='append')
+ deployDescParser.add_argument('--labels', '-la', help='Package labels',
+ action='append',
+ default=[], type=str)
return parser.parse_args(raw_args)
-
-def build(project, branch, arch, bin_name):
- if not os.path.exists(os.path.join("/", "out", "project")) :
- os.makedirs(os.path.join("/", "out","project"), exist_ok=True)
- process = None
- process = subprocess.Popen(["git", "clone", "-b", branch,
- "https://github.com/{}".format(project),
- os.path.join("/","out","project")],
- shell=False, stdout=PIPE)
- stdout,err = process.communicate()
- if err != None:
- os.exit("Error while cloning project: {}".format(err))
+def generateTmpDir():
+ tmp_dir_path = None
+ for x in range(0, 5):
+ tmp_dir_path = os.path.join(os.path.abspath(os.sep), "tmp", str(uuid.uuid4()))
+ if not os.path.exists(tmp_dir_path) :
+ os.makedirs(tmp_dir_path, exist_ok=True)
+ break
+ else:
+ tmp_dir_path = None
+ if tmp_dir_path == None:
+ raise Exception("Unable to generate a tmp direcctory")
+ return tmp_dir_path
+
+def generatePackage(build_dir_path,
+ package_type, package_name, version, arch):
+ process = subprocess.Popen(["fpm", "-t", package_type,
+ "-n", package_name,
+ "-p", build_dir_path,
+ "-a", arch,
+ "-f",
+ "-v", version.replace("/", "_"),
+ "-C", os.path.join(build_dir_path, "packaging"),
+ "-s", "dir", "."], shell=False)
+ process.communicate()
+ if process.returncode != 0:
+ os.exit("Error while cloning project")
- else:
- print("Project already exists, branch name is ignored")
+def build(build_dir_path, project, branch, arch, bin_name):
+ if len(os.listdir(build_dir_path)) != 0:
+ raise Exception("Build error: {} is not empty.".format(build_dir_path))
+ go_dir_path = os.path.join(generateTmpDir(), "go")
+ print("Go path is : {}".format(go_dir_path))
+ src_dir_path = os.path.join(go_dir_path, 'src', "github.com", project)
- go_path = "{}:/out/project".format(os.environ["GOPATH"])
+ process = None
+ process = subprocess.Popen(["git", "clone", "-b", branch,
+ "https://github.com/{}".format(project),
+ src_dir_path], shell=False)
+ process.communicate()
+ if process.returncode != 0:
+ os.exit("Error while cloning project")
+
process = subprocess.Popen(["go", "get", "-d", "./..."],
- cwd=os.path.join("/", "out", "project"), shell=False,
- env=dict(os.environ, GOARCH=arch, GOPATH=go_path), stdout=PIPE)
- stdout,err = process.communicate()
- if err != None:
- os.exit("Error while getting dependencies: {}".format(err))
-
- process = subprocess.Popen(["go", "build", '-o', os.path.join('bin', bin_name), bin_name],
- cwd=os.path.join("/", "out", "project"), shell=False,
- env=dict(os.environ, GOARCH=arch, GOPATH=go_path), stdout=PIPE)
- stdout,err = process.communicate()
- if err != None:
- os.exit("Error while building project: {}".format(err))
-
- with tarfile.open(os.path.join("/", "out","project", "{}.{}.{}.tar.gz".format(bin_name, arch, branch)), "w:gz") as tar:
- tar.add(os.path.join("/", "out", "project", "bin", bin_name), arcname=bin_name)
+ cwd=src_dir_path, shell=False,
+ env=dict(os.environ,
+ GOARCH=arch,
+ GOPATH=go_dir_path,
+ CGO_ENABLED="0"))
+ process.communicate()
+ if process.returncode != 0:
+ sys.exit("Error while getting dependencies project")
+ process = subprocess.Popen(["go", "install", "./..."],
+ cwd=src_dir_path, shell=False,
+ env=dict(os.environ,
+ GOARCH=arch,
+ GOPATH=go_dir_path,
+ CGO_ENABLED="0"))
+ process.communicate()
+ if process.returncode != 0:
+ os.exit("Error while build the project")
+ bin_dir_path = os.path.join(build_dir_path, "packaging",
+ "usr", "local", "bin")
+ os.makedirs(bin_dir_path)
+ for dirName, _, fileList in os.walk(os.path.join(go_dir_path, "bin")):
+ for fname in fileList:
+ shutil.copy2(os.path.join(dirName, fname),
+ os.path.join(bin_dir_path, fname))
-if __name__ == "__main__":
- parsed_args = parseArguments(sys.argv[1:])
- build(parsed_args.project,parsed_args.branch,parsed_args.arch,parsed_args.binname)
+ if os.path.exists(os.path.join(src_dir_path, "resources")) :
+ for name in os.listdir(os.path.join(src_dir_path, "resources")):
+ shutil.copytree(os.path.join(src_dir_path, "resources", name),
+ os.path.join(build_dir_path, "packaging", name))
+
+def generateBintrayDescriptor(build_dir,
+ bin_name,
+ user,
+ desc,
+ version,
+ licenses=[],
+ labels=[]):
+ github_addr = "https://github.com/{}/{}".format(user,bin_name)
+ descriptor = {"package":{
+ "name":bin_name,
+ "repo":bin_name,
+ "subject":user,
+ "desc":desc,
+ "website_url":github_addr,
+ "issue_tracker_url":github_addr,
+ "vcs_url":github_addr,
+ "github_use_tag_release_notes":True,
+ "licenses":licenses,
+ "labels":labels,
+ "public_download_numebrs":False,
+ "public_stats":False
+ },
+ "version":{
+ "name":version,
+ "desc":desc,
+ "released":time.strftime("%Y-%m-%d"),
+ "vcs_tag":version,
+ "gpgSign":False
+ },
+ "files":[],
+ "publish":True
+ }
+
+ for distrib in os.listdir(build_dir):
+ if os.path.isdir(os.path.join(build_dir,distrib)):
+ for arch in os.listdir(os.path.join(build_dir,distrib)):
+ if os.path.isdir(os.path.join(build_dir,distrib,arch)) :
+ descriptor["files"].append({
+ "includePattern": os.path.join(build_dir,
+ distrib,
+ arch,
+ "(.*\.deb)"),
+ "uploadPattern": os.path.join(distrib,"$1"),
+ "matrixParams":
+ {
+ "deb_distribution":distrib,
+ "deb_component":"main",
+ "deb_architecture":arch
+ }
+ })
+ file = open(os.path.join(build_dir, "bintray.desc"), 'w')
+ json.dump(descriptor, file, ensure_ascii=False, indent=2)
+ file.close()
+if __name__ == "__main__":
+ args = parseArguments(sys.argv[1:])
+ if not os.path.exists(args.outputdirpath):
+ os.makedirs(args.outputdirpath, exist_ok=True)
+ if args.function == "build" :
+ build(args.outputdirpath,
+ args.project, args.branch,
+ args.arch, args.binname)
+ generatePackage(args.outputdirpath, "deb", args.binname,
+ args.branch, args.arch)
+ generatePackage(args.outputdirpath, "tar", args.binname,
+ args.branch, args.arch)
+ else:
+ generateBintrayDescriptor(args.outputdirpath,
+ args.binname,
+ args.user,
+ args.description,
+ args.branch,
+ args.licenses,
+ args.labels)
diff --git a/environments/run/Dockerfile.amd64 b/environments/run/Dockerfile.amd64
index fb28ce6..8068e64 100644
--- a/environments/run/Dockerfile.amd64
+++ b/environments/run/Dockerfile.amd64
@@ -13,16 +13,15 @@
# You should have received a copy of the GNU General Public License
# along with CFDNSUpdater. If not, see .
-from gliderlabs/alpine
+from alpine
RUN apk add --no-cache ca-certificates
ARG VERSION
-ADD https://github.com/aacebedo/cfdnsupdater/releases/download/${VERSION}/cfdnsupdater.amd64.${VERSION}.tar.gz /tmp/cfdnsupdater.tar.gz
-RUN tar xvzf /tmp/cfdnsupdater.tar.gz -C /usr/local/bin
-RUN rm -rf /tmp/cfdnsupdater.tar.gz
-
+ADD https://github.com/aacebedo/cfdnsupdater/releases/download/${VERSION}/cfdnsupdater.amd64.${VERSION}.tar /tmp/cfdnsupdater.tar
+RUN tar xvf /tmp/cfdnsupdater.tar -C /
+RUN rm -rf /tmp/cfdnsupdater.tar
ENTRYPOINT ["cfdnsupdater"]
diff --git a/environments/run/Dockerfile.arm b/environments/run/Dockerfile.arm
index 6372621..0fef3e2 100644
--- a/environments/run/Dockerfile.arm
+++ b/environments/run/Dockerfile.arm
@@ -13,15 +13,15 @@
# You should have received a copy of the GNU General Public License
# along with CFDNSUpdater. If not, see .
-from vimagick/alpine-arm
+from gliderlabs/alpine-arm
RUN apk add --no-cache ca-certificates
ARG VERSION
-ADD https://github.com/aacebedo/cfdnsupdater/releases/download/${VERSION}/cfdnsupdater.arm.${VERSION}.tar.gz /tmp/cfdnsupdater.tar.gz
-RUN tar xvzf /tmp/cfdnsupdater.tar.gz -C /usr/local/bin
-RUN rm -rf /tmp/cfdnsupdater.tar.gz
+ADD https://github.com/aacebedo/cfdnsupdater/releases/download/${VERSION}/cfdnsupdater.arm.${VERSION}.tar /tmp/cfdnsupdater.tar
+RUN tar xvf /tmp/cfdnsupdater.tar -C /
+RUN rm -rf /tmp/cfdnsupdater.tar
ENTRYPOINT ["cfdnsupdater"]
diff --git a/src/cfdnsupdater/configuration/cmdline.go b/go/configuration/cmdline.go
similarity index 98%
rename from src/cfdnsupdater/configuration/cmdline.go
rename to go/configuration/cmdline.go
index c021e5e..d13c4a8 100644
--- a/src/cfdnsupdater/configuration/cmdline.go
+++ b/go/configuration/cmdline.go
@@ -22,7 +22,7 @@ import (
"gopkg.in/yaml.v2"
"io/ioutil"
"math"
- "cfdnsupdater/utils"
+ "github.com/aacebedo/cfdnsupdater/go/utils"
)
const (
diff --git a/src/cfdnsupdater/configuration/loggers.go b/go/configuration/loggers.go
similarity index 100%
rename from src/cfdnsupdater/configuration/loggers.go
rename to go/configuration/loggers.go
diff --git a/src/cfdnsupdater/configuration/types.go b/go/configuration/types.go
similarity index 88%
rename from src/cfdnsupdater/configuration/types.go
rename to go/configuration/types.go
index f90e33e..baccc12 100644
--- a/src/cfdnsupdater/configuration/types.go
+++ b/go/configuration/types.go
@@ -16,8 +16,8 @@
package configuration
import (
- "cfdnsupdater/core"
- "cfdnsupdater/utils"
+ "github.com/aacebedo/cfdnsupdater/go/core"
+ "github.com/aacebedo/cfdnsupdater/go/utils"
)
@@ -26,7 +26,7 @@ type DomainConfiguration struct {
ApiKey string `yaml:"apikey"`
Period int `yaml:"period"`
RecordNames []string `yaml:"record_names"`
- RecordTypes core.RecordTypeSlice `yaml:"record_types"`
+ RecordTypes core.RecordTypeSlice `yaml:"record_types"`
}
type CFDNSUpdaterConfiguration struct {
diff --git a/src/cfdnsupdater/core/loggers.go b/go/core/loggers.go
similarity index 100%
rename from src/cfdnsupdater/core/loggers.go
rename to go/core/loggers.go
diff --git a/src/cfdnsupdater/core/types.go b/go/core/types.go
similarity index 100%
rename from src/cfdnsupdater/core/types.go
rename to go/core/types.go
diff --git a/src/cfdnsupdater/updater/loggers.go b/go/updater/loggers.go
similarity index 100%
rename from src/cfdnsupdater/updater/loggers.go
rename to go/updater/loggers.go
diff --git a/src/cfdnsupdater/updater/updater.go b/go/updater/updater.go
similarity index 98%
rename from src/cfdnsupdater/updater/updater.go
rename to go/updater/updater.go
index 68800c3..deaf722 100644
--- a/src/cfdnsupdater/updater/updater.go
+++ b/go/updater/updater.go
@@ -16,8 +16,8 @@
package updater
import (
- "cfdnsupdater/core"
- "cfdnsupdater/utils"
+ "github.com/aacebedo/cfdnsupdater/go/core"
+ "github.com/aacebedo/cfdnsupdater/go/utils"
"encoding/json"
"errors"
"fmt"
diff --git a/src/cfdnsupdater/utils/helpers.go b/go/utils/helpers.go
similarity index 100%
rename from src/cfdnsupdater/utils/helpers.go
rename to go/utils/helpers.go
diff --git a/src/cfdnsupdater/utils/loggers.go b/go/utils/loggers.go
similarity index 100%
rename from src/cfdnsupdater/utils/loggers.go
rename to go/utils/loggers.go
diff --git a/src/cfdnsupdater/main.go b/main.go
similarity index 91%
rename from src/cfdnsupdater/main.go
rename to main.go
index 340862d..14a6e25 100644
--- a/src/cfdnsupdater/main.go
+++ b/main.go
@@ -16,9 +16,9 @@
package main
import (
- "cfdnsupdater/utils"
- "cfdnsupdater/configuration"
- "cfdnsupdater/updater"
+ "github.com/aacebedo/cfdnsupdater/go/utils"
+ "github.com/aacebedo/cfdnsupdater/go/configuration"
+ "github.com/aacebedo/cfdnsupdater/go/updater"
"os"
"sync"
"github.com/op/go-logging"
diff --git a/src/cfdnsupdater/Godeps/Godeps.json b/src/cfdnsupdater/Godeps/Godeps.json
deleted file mode 100644
index 16c8ffb..0000000
--- a/src/cfdnsupdater/Godeps/Godeps.json
+++ /dev/null
@@ -1,36 +0,0 @@
-{
- "ImportPath": "cfdnsupdater",
- "GoVersion": "go1.5",
- "Deps": [
- {
- "ImportPath": "github.com/alecthomas/template",
- "Rev": "14fd436dd20c3cc65242a9f396b61bfc8a3926fc"
- },
- {
- "ImportPath": "github.com/alecthomas/units",
- "Rev": "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
- },
- {
- "ImportPath": "github.com/levigross/grequests",
- "Comment": "0.7-4-g67a2af2",
- "Rev": "67a2af2c584676e9b8ff6b6452e96e7ffdc55410"
- },
- {
- "ImportPath": "github.com/op/go-logging",
- "Rev": "dfaf3dff9b631bc4236201d90d41ee0de9202889"
- },
- {
- "ImportPath": "golang.org/x/net/publicsuffix",
- "Rev": "04b9de9b512f58addf28c9853d50ebef61c3953e"
- },
- {
- "ImportPath": "gopkg.in/alecthomas/kingpin.v2",
- "Comment": "v2.1.10",
- "Rev": "24b74030480f0aa98802b51ff4622a7eb09dfddd"
- },
- {
- "ImportPath": "gopkg.in/yaml.v2",
- "Rev": "f7716cbe52baa25d2e9b0d0da546fcf909fc16b4"
- }
- ]
-}
diff --git a/src/cfdnsupdater/Godeps/Readme b/src/cfdnsupdater/Godeps/Readme
deleted file mode 100644
index 4cdaa53..0000000
--- a/src/cfdnsupdater/Godeps/Readme
+++ /dev/null
@@ -1,5 +0,0 @@
-This directory tree is generated automatically by godep.
-
-Please do not edit.
-
-See https://github.com/tools/godep for more information.
diff --git a/src/cfdnsupdater/Godeps/_workspace/.gitignore b/src/cfdnsupdater/Godeps/_workspace/.gitignore
deleted file mode 100644
index f037d68..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-/pkg
-/bin
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/README.md b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/README.md
deleted file mode 100644
index ef6a8ee..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/README.md
+++ /dev/null
@@ -1,25 +0,0 @@
-# Go's `text/template` package with newline elision
-
-This is a fork of Go 1.4's [text/template](http://golang.org/pkg/text/template/) package with one addition: a backslash immediately after a closing delimiter will delete all subsequent newlines until a non-newline.
-
-eg.
-
-```
-{{if true}}\
-hello
-{{end}}\
-```
-
-Will result in:
-
-```
-hello\n
-```
-
-Rather than:
-
-```
-\n
-hello\n
-\n
-```
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/doc.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/doc.go
deleted file mode 100644
index 223c595..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/doc.go
+++ /dev/null
@@ -1,406 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-/*
-Package template implements data-driven templates for generating textual output.
-
-To generate HTML output, see package html/template, which has the same interface
-as this package but automatically secures HTML output against certain attacks.
-
-Templates are executed by applying them to a data structure. Annotations in the
-template refer to elements of the data structure (typically a field of a struct
-or a key in a map) to control execution and derive values to be displayed.
-Execution of the template walks the structure and sets the cursor, represented
-by a period '.' and called "dot", to the value at the current location in the
-structure as execution proceeds.
-
-The input text for a template is UTF-8-encoded text in any format.
-"Actions"--data evaluations or control structures--are delimited by
-"{{" and "}}"; all text outside actions is copied to the output unchanged.
-Actions may not span newlines, although comments can.
-
-Once parsed, a template may be executed safely in parallel.
-
-Here is a trivial example that prints "17 items are made of wool".
-
- type Inventory struct {
- Material string
- Count uint
- }
- sweaters := Inventory{"wool", 17}
- tmpl, err := template.New("test").Parse("{{.Count}} items are made of {{.Material}}")
- if err != nil { panic(err) }
- err = tmpl.Execute(os.Stdout, sweaters)
- if err != nil { panic(err) }
-
-More intricate examples appear below.
-
-Actions
-
-Here is the list of actions. "Arguments" and "pipelines" are evaluations of
-data, defined in detail below.
-
-*/
-// {{/* a comment */}}
-// A comment; discarded. May contain newlines.
-// Comments do not nest and must start and end at the
-// delimiters, as shown here.
-/*
-
- {{pipeline}}
- The default textual representation of the value of the pipeline
- is copied to the output.
-
- {{if pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, T1 is executed. The empty values are false, 0, any
- nil pointer or interface value, and any array, slice, map, or
- string of length zero.
- Dot is unaffected.
-
- {{if pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, T0 is executed;
- otherwise, T1 is executed. Dot is unaffected.
-
- {{if pipeline}} T1 {{else if pipeline}} T0 {{end}}
- To simplify the appearance of if-else chains, the else action
- of an if may include another if directly; the effect is exactly
- the same as writing
- {{if pipeline}} T1 {{else}}{{if pipeline}} T0 {{end}}{{end}}
-
- {{range pipeline}} T1 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, nothing is output;
- otherwise, dot is set to the successive elements of the array,
- slice, or map and T1 is executed. If the value is a map and the
- keys are of basic type with a defined order ("comparable"), the
- elements will be visited in sorted key order.
-
- {{range pipeline}} T1 {{else}} T0 {{end}}
- The value of the pipeline must be an array, slice, map, or channel.
- If the value of the pipeline has length zero, dot is unaffected and
- T0 is executed; otherwise, dot is set to the successive elements
- of the array, slice, or map and T1 is executed.
-
- {{template "name"}}
- The template with the specified name is executed with nil data.
-
- {{template "name" pipeline}}
- The template with the specified name is executed with dot set
- to the value of the pipeline.
-
- {{with pipeline}} T1 {{end}}
- If the value of the pipeline is empty, no output is generated;
- otherwise, dot is set to the value of the pipeline and T1 is
- executed.
-
- {{with pipeline}} T1 {{else}} T0 {{end}}
- If the value of the pipeline is empty, dot is unaffected and T0
- is executed; otherwise, dot is set to the value of the pipeline
- and T1 is executed.
-
-Arguments
-
-An argument is a simple value, denoted by one of the following.
-
- - A boolean, string, character, integer, floating-point, imaginary
- or complex constant in Go syntax. These behave like Go's untyped
- constants, although raw strings may not span newlines.
- - The keyword nil, representing an untyped Go nil.
- - The character '.' (period):
- .
- The result is the value of dot.
- - A variable name, which is a (possibly empty) alphanumeric string
- preceded by a dollar sign, such as
- $piOver2
- or
- $
- The result is the value of the variable.
- Variables are described below.
- - The name of a field of the data, which must be a struct, preceded
- by a period, such as
- .Field
- The result is the value of the field. Field invocations may be
- chained:
- .Field1.Field2
- Fields can also be evaluated on variables, including chaining:
- $x.Field1.Field2
- - The name of a key of the data, which must be a map, preceded
- by a period, such as
- .Key
- The result is the map element value indexed by the key.
- Key invocations may be chained and combined with fields to any
- depth:
- .Field1.Key1.Field2.Key2
- Although the key must be an alphanumeric identifier, unlike with
- field names they do not need to start with an upper case letter.
- Keys can also be evaluated on variables, including chaining:
- $x.key1.key2
- - The name of a niladic method of the data, preceded by a period,
- such as
- .Method
- The result is the value of invoking the method with dot as the
- receiver, dot.Method(). Such a method must have one return value (of
- any type) or two return values, the second of which is an error.
- If it has two and the returned error is non-nil, execution terminates
- and an error is returned to the caller as the value of Execute.
- Method invocations may be chained and combined with fields and keys
- to any depth:
- .Field1.Key1.Method1.Field2.Key2.Method2
- Methods can also be evaluated on variables, including chaining:
- $x.Method1.Field
- - The name of a niladic function, such as
- fun
- The result is the value of invoking the function, fun(). The return
- types and values behave as in methods. Functions and function
- names are described below.
- - A parenthesized instance of one the above, for grouping. The result
- may be accessed by a field or map key invocation.
- print (.F1 arg1) (.F2 arg2)
- (.StructValuedMethod "arg").Field
-
-Arguments may evaluate to any type; if they are pointers the implementation
-automatically indirects to the base type when required.
-If an evaluation yields a function value, such as a function-valued
-field of a struct, the function is not invoked automatically, but it
-can be used as a truth value for an if action and the like. To invoke
-it, use the call function, defined below.
-
-A pipeline is a possibly chained sequence of "commands". A command is a simple
-value (argument) or a function or method call, possibly with multiple arguments:
-
- Argument
- The result is the value of evaluating the argument.
- .Method [Argument...]
- The method can be alone or the last element of a chain but,
- unlike methods in the middle of a chain, it can take arguments.
- The result is the value of calling the method with the
- arguments:
- dot.Method(Argument1, etc.)
- functionName [Argument...]
- The result is the value of calling the function associated
- with the name:
- function(Argument1, etc.)
- Functions and function names are described below.
-
-Pipelines
-
-A pipeline may be "chained" by separating a sequence of commands with pipeline
-characters '|'. In a chained pipeline, the result of the each command is
-passed as the last argument of the following command. The output of the final
-command in the pipeline is the value of the pipeline.
-
-The output of a command will be either one value or two values, the second of
-which has type error. If that second value is present and evaluates to
-non-nil, execution terminates and the error is returned to the caller of
-Execute.
-
-Variables
-
-A pipeline inside an action may initialize a variable to capture the result.
-The initialization has syntax
-
- $variable := pipeline
-
-where $variable is the name of the variable. An action that declares a
-variable produces no output.
-
-If a "range" action initializes a variable, the variable is set to the
-successive elements of the iteration. Also, a "range" may declare two
-variables, separated by a comma:
-
- range $index, $element := pipeline
-
-in which case $index and $element are set to the successive values of the
-array/slice index or map key and element, respectively. Note that if there is
-only one variable, it is assigned the element; this is opposite to the
-convention in Go range clauses.
-
-A variable's scope extends to the "end" action of the control structure ("if",
-"with", or "range") in which it is declared, or to the end of the template if
-there is no such control structure. A template invocation does not inherit
-variables from the point of its invocation.
-
-When execution begins, $ is set to the data argument passed to Execute, that is,
-to the starting value of dot.
-
-Examples
-
-Here are some example one-line templates demonstrating pipelines and variables.
-All produce the quoted word "output":
-
- {{"\"output\""}}
- A string constant.
- {{`"output"`}}
- A raw string constant.
- {{printf "%q" "output"}}
- A function call.
- {{"output" | printf "%q"}}
- A function call whose final argument comes from the previous
- command.
- {{printf "%q" (print "out" "put")}}
- A parenthesized argument.
- {{"put" | printf "%s%s" "out" | printf "%q"}}
- A more elaborate call.
- {{"output" | printf "%s" | printf "%q"}}
- A longer chain.
- {{with "output"}}{{printf "%q" .}}{{end}}
- A with action using dot.
- {{with $x := "output" | printf "%q"}}{{$x}}{{end}}
- A with action that creates and uses a variable.
- {{with $x := "output"}}{{printf "%q" $x}}{{end}}
- A with action that uses the variable in another action.
- {{with $x := "output"}}{{$x | printf "%q"}}{{end}}
- The same, but pipelined.
-
-Functions
-
-During execution functions are found in two function maps: first in the
-template, then in the global function map. By default, no functions are defined
-in the template but the Funcs method can be used to add them.
-
-Predefined global functions are named as follows.
-
- and
- Returns the boolean AND of its arguments by returning the
- first empty argument or the last argument, that is,
- "and x y" behaves as "if x then y else x". All the
- arguments are evaluated.
- call
- Returns the result of calling the first argument, which
- must be a function, with the remaining arguments as parameters.
- Thus "call .X.Y 1 2" is, in Go notation, dot.X.Y(1, 2) where
- Y is a func-valued field, map entry, or the like.
- The first argument must be the result of an evaluation
- that yields a value of function type (as distinct from
- a predefined function such as print). The function must
- return either one or two result values, the second of which
- is of type error. If the arguments don't match the function
- or the returned error value is non-nil, execution stops.
- html
- Returns the escaped HTML equivalent of the textual
- representation of its arguments.
- index
- Returns the result of indexing its first argument by the
- following arguments. Thus "index x 1 2 3" is, in Go syntax,
- x[1][2][3]. Each indexed item must be a map, slice, or array.
- js
- Returns the escaped JavaScript equivalent of the textual
- representation of its arguments.
- len
- Returns the integer length of its argument.
- not
- Returns the boolean negation of its single argument.
- or
- Returns the boolean OR of its arguments by returning the
- first non-empty argument or the last argument, that is,
- "or x y" behaves as "if x then x else y". All the
- arguments are evaluated.
- print
- An alias for fmt.Sprint
- printf
- An alias for fmt.Sprintf
- println
- An alias for fmt.Sprintln
- urlquery
- Returns the escaped value of the textual representation of
- its arguments in a form suitable for embedding in a URL query.
-
-The boolean functions take any zero value to be false and a non-zero
-value to be true.
-
-There is also a set of binary comparison operators defined as
-functions:
-
- eq
- Returns the boolean truth of arg1 == arg2
- ne
- Returns the boolean truth of arg1 != arg2
- lt
- Returns the boolean truth of arg1 < arg2
- le
- Returns the boolean truth of arg1 <= arg2
- gt
- Returns the boolean truth of arg1 > arg2
- ge
- Returns the boolean truth of arg1 >= arg2
-
-For simpler multi-way equality tests, eq (only) accepts two or more
-arguments and compares the second and subsequent to the first,
-returning in effect
-
- arg1==arg2 || arg1==arg3 || arg1==arg4 ...
-
-(Unlike with || in Go, however, eq is a function call and all the
-arguments will be evaluated.)
-
-The comparison functions work on basic types only (or named basic
-types, such as "type Celsius float32"). They implement the Go rules
-for comparison of values, except that size and exact type are
-ignored, so any integer value, signed or unsigned, may be compared
-with any other integer value. (The arithmetic value is compared,
-not the bit pattern, so all negative integers are less than all
-unsigned integers.) However, as usual, one may not compare an int
-with a float32 and so on.
-
-Associated templates
-
-Each template is named by a string specified when it is created. Also, each
-template is associated with zero or more other templates that it may invoke by
-name; such associations are transitive and form a name space of templates.
-
-A template may use a template invocation to instantiate another associated
-template; see the explanation of the "template" action above. The name must be
-that of a template associated with the template that contains the invocation.
-
-Nested template definitions
-
-When parsing a template, another template may be defined and associated with the
-template being parsed. Template definitions must appear at the top level of the
-template, much like global variables in a Go program.
-
-The syntax of such definitions is to surround each template declaration with a
-"define" and "end" action.
-
-The define action names the template being created by providing a string
-constant. Here is a simple example:
-
- `{{define "T1"}}ONE{{end}}
- {{define "T2"}}TWO{{end}}
- {{define "T3"}}{{template "T1"}} {{template "T2"}}{{end}}
- {{template "T3"}}`
-
-This defines two templates, T1 and T2, and a third T3 that invokes the other two
-when it is executed. Finally it invokes T3. If executed this template will
-produce the text
-
- ONE TWO
-
-By construction, a template may reside in only one association. If it's
-necessary to have a template addressable from multiple associations, the
-template definition must be parsed multiple times to create distinct *Template
-values, or must be copied with the Clone or AddParseTree method.
-
-Parse may be called multiple times to assemble the various associated templates;
-see the ParseFiles and ParseGlob functions and methods for simple ways to parse
-related templates stored in files.
-
-A template may be executed directly or through ExecuteTemplate, which executes
-an associated template identified by name. To invoke our example above, we
-might write,
-
- err := tmpl.Execute(os.Stdout, "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-or to invoke a particular template explicitly by name,
-
- err := tmpl.ExecuteTemplate(os.Stdout, "T2", "no data needed")
- if err != nil {
- log.Fatalf("execution failed: %s", err)
- }
-
-*/
-package template
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/exec.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/exec.go
deleted file mode 100644
index c3078e5..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/exec.go
+++ /dev/null
@@ -1,845 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "fmt"
- "io"
- "reflect"
- "runtime"
- "sort"
- "strings"
-
- "github.com/alecthomas/template/parse"
-)
-
-// state represents the state of an execution. It's not part of the
-// template so that multiple executions of the same template
-// can execute in parallel.
-type state struct {
- tmpl *Template
- wr io.Writer
- node parse.Node // current node, for errors
- vars []variable // push-down stack of variable values.
-}
-
-// variable holds the dynamic value of a variable such as $, $x etc.
-type variable struct {
- name string
- value reflect.Value
-}
-
-// push pushes a new variable on the stack.
-func (s *state) push(name string, value reflect.Value) {
- s.vars = append(s.vars, variable{name, value})
-}
-
-// mark returns the length of the variable stack.
-func (s *state) mark() int {
- return len(s.vars)
-}
-
-// pop pops the variable stack up to the mark.
-func (s *state) pop(mark int) {
- s.vars = s.vars[0:mark]
-}
-
-// setVar overwrites the top-nth variable on the stack. Used by range iterations.
-func (s *state) setVar(n int, value reflect.Value) {
- s.vars[len(s.vars)-n].value = value
-}
-
-// varValue returns the value of the named variable.
-func (s *state) varValue(name string) reflect.Value {
- for i := s.mark() - 1; i >= 0; i-- {
- if s.vars[i].name == name {
- return s.vars[i].value
- }
- }
- s.errorf("undefined variable: %s", name)
- return zero
-}
-
-var zero reflect.Value
-
-// at marks the state to be on node n, for error reporting.
-func (s *state) at(node parse.Node) {
- s.node = node
-}
-
-// doublePercent returns the string with %'s replaced by %%, if necessary,
-// so it can be used safely inside a Printf format string.
-func doublePercent(str string) string {
- if strings.Contains(str, "%") {
- str = strings.Replace(str, "%", "%%", -1)
- }
- return str
-}
-
-// errorf formats the error and terminates processing.
-func (s *state) errorf(format string, args ...interface{}) {
- name := doublePercent(s.tmpl.Name())
- if s.node == nil {
- format = fmt.Sprintf("template: %s: %s", name, format)
- } else {
- location, context := s.tmpl.ErrorContext(s.node)
- format = fmt.Sprintf("template: %s: executing %q at <%s>: %s", location, name, doublePercent(context), format)
- }
- panic(fmt.Errorf(format, args...))
-}
-
-// errRecover is the handler that turns panics into returns from the top
-// level of Parse.
-func errRecover(errp *error) {
- e := recover()
- if e != nil {
- switch err := e.(type) {
- case runtime.Error:
- panic(e)
- case error:
- *errp = err
- default:
- panic(e)
- }
- }
-}
-
-// ExecuteTemplate applies the template associated with t that has the given name
-// to the specified data object and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) ExecuteTemplate(wr io.Writer, name string, data interface{}) error {
- tmpl := t.tmpl[name]
- if tmpl == nil {
- return fmt.Errorf("template: no template %q associated with template %q", name, t.name)
- }
- return tmpl.Execute(wr, data)
-}
-
-// Execute applies a parsed template to the specified data object,
-// and writes the output to wr.
-// If an error occurs executing the template or writing its output,
-// execution stops, but partial results may already have been written to
-// the output writer.
-// A template may be executed safely in parallel.
-func (t *Template) Execute(wr io.Writer, data interface{}) (err error) {
- defer errRecover(&err)
- value := reflect.ValueOf(data)
- state := &state{
- tmpl: t,
- wr: wr,
- vars: []variable{{"$", value}},
- }
- t.init()
- if t.Tree == nil || t.Root == nil {
- var b bytes.Buffer
- for name, tmpl := range t.tmpl {
- if tmpl.Tree == nil || tmpl.Root == nil {
- continue
- }
- if b.Len() > 0 {
- b.WriteString(", ")
- }
- fmt.Fprintf(&b, "%q", name)
- }
- var s string
- if b.Len() > 0 {
- s = "; defined templates are: " + b.String()
- }
- state.errorf("%q is an incomplete or empty template%s", t.Name(), s)
- }
- state.walk(value, t.Root)
- return
-}
-
-// Walk functions step through the major pieces of the template structure,
-// generating output as they go.
-func (s *state) walk(dot reflect.Value, node parse.Node) {
- s.at(node)
- switch node := node.(type) {
- case *parse.ActionNode:
- // Do not pop variables so they persist until next end.
- // Also, if the action declares variables, don't print the result.
- val := s.evalPipeline(dot, node.Pipe)
- if len(node.Pipe.Decl) == 0 {
- s.printValue(node, val)
- }
- case *parse.IfNode:
- s.walkIfOrWith(parse.NodeIf, dot, node.Pipe, node.List, node.ElseList)
- case *parse.ListNode:
- for _, node := range node.Nodes {
- s.walk(dot, node)
- }
- case *parse.RangeNode:
- s.walkRange(dot, node)
- case *parse.TemplateNode:
- s.walkTemplate(dot, node)
- case *parse.TextNode:
- if _, err := s.wr.Write(node.Text); err != nil {
- s.errorf("%s", err)
- }
- case *parse.WithNode:
- s.walkIfOrWith(parse.NodeWith, dot, node.Pipe, node.List, node.ElseList)
- default:
- s.errorf("unknown node: %s", node)
- }
-}
-
-// walkIfOrWith walks an 'if' or 'with' node. The two control structures
-// are identical in behavior except that 'with' sets dot.
-func (s *state) walkIfOrWith(typ parse.NodeType, dot reflect.Value, pipe *parse.PipeNode, list, elseList *parse.ListNode) {
- defer s.pop(s.mark())
- val := s.evalPipeline(dot, pipe)
- truth, ok := isTrue(val)
- if !ok {
- s.errorf("if/with can't use %v", val)
- }
- if truth {
- if typ == parse.NodeWith {
- s.walk(val, list)
- } else {
- s.walk(dot, list)
- }
- } else if elseList != nil {
- s.walk(dot, elseList)
- }
-}
-
-// isTrue reports whether the value is 'true', in the sense of not the zero of its type,
-// and whether the value has a meaningful truth value.
-func isTrue(val reflect.Value) (truth, ok bool) {
- if !val.IsValid() {
- // Something like var x interface{}, never set. It's a form of nil.
- return false, true
- }
- switch val.Kind() {
- case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
- truth = val.Len() > 0
- case reflect.Bool:
- truth = val.Bool()
- case reflect.Complex64, reflect.Complex128:
- truth = val.Complex() != 0
- case reflect.Chan, reflect.Func, reflect.Ptr, reflect.Interface:
- truth = !val.IsNil()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- truth = val.Int() != 0
- case reflect.Float32, reflect.Float64:
- truth = val.Float() != 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- truth = val.Uint() != 0
- case reflect.Struct:
- truth = true // Struct values are always true.
- default:
- return
- }
- return truth, true
-}
-
-func (s *state) walkRange(dot reflect.Value, r *parse.RangeNode) {
- s.at(r)
- defer s.pop(s.mark())
- val, _ := indirect(s.evalPipeline(dot, r.Pipe))
- // mark top of stack before any variables in the body are pushed.
- mark := s.mark()
- oneIteration := func(index, elem reflect.Value) {
- // Set top var (lexically the second if there are two) to the element.
- if len(r.Pipe.Decl) > 0 {
- s.setVar(1, elem)
- }
- // Set next var (lexically the first if there are two) to the index.
- if len(r.Pipe.Decl) > 1 {
- s.setVar(2, index)
- }
- s.walk(elem, r.List)
- s.pop(mark)
- }
- switch val.Kind() {
- case reflect.Array, reflect.Slice:
- if val.Len() == 0 {
- break
- }
- for i := 0; i < val.Len(); i++ {
- oneIteration(reflect.ValueOf(i), val.Index(i))
- }
- return
- case reflect.Map:
- if val.Len() == 0 {
- break
- }
- for _, key := range sortKeys(val.MapKeys()) {
- oneIteration(key, val.MapIndex(key))
- }
- return
- case reflect.Chan:
- if val.IsNil() {
- break
- }
- i := 0
- for ; ; i++ {
- elem, ok := val.Recv()
- if !ok {
- break
- }
- oneIteration(reflect.ValueOf(i), elem)
- }
- if i == 0 {
- break
- }
- return
- case reflect.Invalid:
- break // An invalid value is likely a nil map, etc. and acts like an empty map.
- default:
- s.errorf("range can't iterate over %v", val)
- }
- if r.ElseList != nil {
- s.walk(dot, r.ElseList)
- }
-}
-
-func (s *state) walkTemplate(dot reflect.Value, t *parse.TemplateNode) {
- s.at(t)
- tmpl := s.tmpl.tmpl[t.Name]
- if tmpl == nil {
- s.errorf("template %q not defined", t.Name)
- }
- // Variables declared by the pipeline persist.
- dot = s.evalPipeline(dot, t.Pipe)
- newState := *s
- newState.tmpl = tmpl
- // No dynamic scoping: template invocations inherit no variables.
- newState.vars = []variable{{"$", dot}}
- newState.walk(dot, tmpl.Root)
-}
-
-// Eval functions evaluate pipelines, commands, and their elements and extract
-// values from the data structure by examining fields, calling methods, and so on.
-// The printing of those values happens only through walk functions.
-
-// evalPipeline returns the value acquired by evaluating a pipeline. If the
-// pipeline has a variable declaration, the variable will be pushed on the
-// stack. Callers should therefore pop the stack after they are finished
-// executing commands depending on the pipeline value.
-func (s *state) evalPipeline(dot reflect.Value, pipe *parse.PipeNode) (value reflect.Value) {
- if pipe == nil {
- return
- }
- s.at(pipe)
- for _, cmd := range pipe.Cmds {
- value = s.evalCommand(dot, cmd, value) // previous value is this one's final arg.
- // If the object has type interface{}, dig down one level to the thing inside.
- if value.Kind() == reflect.Interface && value.Type().NumMethod() == 0 {
- value = reflect.ValueOf(value.Interface()) // lovely!
- }
- }
- for _, variable := range pipe.Decl {
- s.push(variable.Ident[0], value)
- }
- return value
-}
-
-func (s *state) notAFunction(args []parse.Node, final reflect.Value) {
- if len(args) > 1 || final.IsValid() {
- s.errorf("can't give argument to non-function %s", args[0])
- }
-}
-
-func (s *state) evalCommand(dot reflect.Value, cmd *parse.CommandNode, final reflect.Value) reflect.Value {
- firstWord := cmd.Args[0]
- switch n := firstWord.(type) {
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, cmd.Args, final)
- case *parse.ChainNode:
- return s.evalChainNode(dot, n, cmd.Args, final)
- case *parse.IdentifierNode:
- // Must be a function.
- return s.evalFunction(dot, n, cmd, cmd.Args, final)
- case *parse.PipeNode:
- // Parenthesized pipeline. The arguments are all inside the pipeline; final is ignored.
- return s.evalPipeline(dot, n)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, cmd.Args, final)
- }
- s.at(firstWord)
- s.notAFunction(cmd.Args, final)
- switch word := firstWord.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(word.True)
- case *parse.DotNode:
- return dot
- case *parse.NilNode:
- s.errorf("nil is not a command")
- case *parse.NumberNode:
- return s.idealConstant(word)
- case *parse.StringNode:
- return reflect.ValueOf(word.Text)
- }
- s.errorf("can't evaluate command %q", firstWord)
- panic("not reached")
-}
-
-// idealConstant is called to return the value of a number in a context where
-// we don't know the type. In that case, the syntax of the number tells us
-// its type, and we use Go rules to resolve. Note there is no such thing as
-// a uint ideal constant in this situation - the value must be of int type.
-func (s *state) idealConstant(constant *parse.NumberNode) reflect.Value {
- // These are ideal constants but we don't know the type
- // and we have no context. (If it was a method argument,
- // we'd know what we need.) The syntax guides us to some extent.
- s.at(constant)
- switch {
- case constant.IsComplex:
- return reflect.ValueOf(constant.Complex128) // incontrovertible.
- case constant.IsFloat && !isHexConstant(constant.Text) && strings.IndexAny(constant.Text, ".eE") >= 0:
- return reflect.ValueOf(constant.Float64)
- case constant.IsInt:
- n := int(constant.Int64)
- if int64(n) != constant.Int64 {
- s.errorf("%s overflows int", constant.Text)
- }
- return reflect.ValueOf(n)
- case constant.IsUint:
- s.errorf("%s overflows int", constant.Text)
- }
- return zero
-}
-
-func isHexConstant(s string) bool {
- return len(s) > 2 && s[0] == '0' && (s[1] == 'x' || s[1] == 'X')
-}
-
-func (s *state) evalFieldNode(dot reflect.Value, field *parse.FieldNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(field)
- return s.evalFieldChain(dot, dot, field, field.Ident, args, final)
-}
-
-func (s *state) evalChainNode(dot reflect.Value, chain *parse.ChainNode, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(chain)
- // (pipe).Field1.Field2 has pipe as .Node, fields as .Field. Eval the pipeline, then the fields.
- pipe := s.evalArg(dot, nil, chain.Node)
- if len(chain.Field) == 0 {
- s.errorf("internal error: no fields in evalChainNode")
- }
- return s.evalFieldChain(dot, pipe, chain, chain.Field, args, final)
-}
-
-func (s *state) evalVariableNode(dot reflect.Value, variable *parse.VariableNode, args []parse.Node, final reflect.Value) reflect.Value {
- // $x.Field has $x as the first ident, Field as the second. Eval the var, then the fields.
- s.at(variable)
- value := s.varValue(variable.Ident[0])
- if len(variable.Ident) == 1 {
- s.notAFunction(args, final)
- return value
- }
- return s.evalFieldChain(dot, value, variable, variable.Ident[1:], args, final)
-}
-
-// evalFieldChain evaluates .X.Y.Z possibly followed by arguments.
-// dot is the environment in which to evaluate arguments, while
-// receiver is the value being walked along the chain.
-func (s *state) evalFieldChain(dot, receiver reflect.Value, node parse.Node, ident []string, args []parse.Node, final reflect.Value) reflect.Value {
- n := len(ident)
- for i := 0; i < n-1; i++ {
- receiver = s.evalField(dot, ident[i], node, nil, zero, receiver)
- }
- // Now if it's a method, it gets the arguments.
- return s.evalField(dot, ident[n-1], node, args, final, receiver)
-}
-
-func (s *state) evalFunction(dot reflect.Value, node *parse.IdentifierNode, cmd parse.Node, args []parse.Node, final reflect.Value) reflect.Value {
- s.at(node)
- name := node.Ident
- function, ok := findFunction(name, s.tmpl)
- if !ok {
- s.errorf("%q is not a defined function", name)
- }
- return s.evalCall(dot, function, cmd, name, args, final)
-}
-
-// evalField evaluates an expression like (.Field) or (.Field arg1 arg2).
-// The 'final' argument represents the return value from the preceding
-// value of the pipeline, if any.
-func (s *state) evalField(dot reflect.Value, fieldName string, node parse.Node, args []parse.Node, final, receiver reflect.Value) reflect.Value {
- if !receiver.IsValid() {
- return zero
- }
- typ := receiver.Type()
- receiver, _ = indirect(receiver)
- // Unless it's an interface, need to get to a value of type *T to guarantee
- // we see all methods of T and *T.
- ptr := receiver
- if ptr.Kind() != reflect.Interface && ptr.CanAddr() {
- ptr = ptr.Addr()
- }
- if method := ptr.MethodByName(fieldName); method.IsValid() {
- return s.evalCall(dot, method, node, fieldName, args, final)
- }
- hasArgs := len(args) > 1 || final.IsValid()
- // It's not a method; must be a field of a struct or an element of a map. The receiver must not be nil.
- receiver, isNil := indirect(receiver)
- if isNil {
- s.errorf("nil pointer evaluating %s.%s", typ, fieldName)
- }
- switch receiver.Kind() {
- case reflect.Struct:
- tField, ok := receiver.Type().FieldByName(fieldName)
- if ok {
- field := receiver.FieldByIndex(tField.Index)
- if tField.PkgPath != "" { // field is unexported
- s.errorf("%s is an unexported field of struct type %s", fieldName, typ)
- }
- // If it's a function, we must call it.
- if hasArgs {
- s.errorf("%s has arguments but cannot be invoked as function", fieldName)
- }
- return field
- }
- s.errorf("%s is not a field of struct type %s", fieldName, typ)
- case reflect.Map:
- // If it's a map, attempt to use the field name as a key.
- nameVal := reflect.ValueOf(fieldName)
- if nameVal.Type().AssignableTo(receiver.Type().Key()) {
- if hasArgs {
- s.errorf("%s is not a method but has arguments", fieldName)
- }
- return receiver.MapIndex(nameVal)
- }
- }
- s.errorf("can't evaluate field %s in type %s", fieldName, typ)
- panic("not reached")
-}
-
-var (
- errorType = reflect.TypeOf((*error)(nil)).Elem()
- fmtStringerType = reflect.TypeOf((*fmt.Stringer)(nil)).Elem()
-)
-
-// evalCall executes a function or method call. If it's a method, fun already has the receiver bound, so
-// it looks just like a function call. The arg list, if non-nil, includes (in the manner of the shell), arg[0]
-// as the function itself.
-func (s *state) evalCall(dot, fun reflect.Value, node parse.Node, name string, args []parse.Node, final reflect.Value) reflect.Value {
- if args != nil {
- args = args[1:] // Zeroth arg is function name/node; not passed to function.
- }
- typ := fun.Type()
- numIn := len(args)
- if final.IsValid() {
- numIn++
- }
- numFixed := len(args)
- if typ.IsVariadic() {
- numFixed = typ.NumIn() - 1 // last arg is the variadic one.
- if numIn < numFixed {
- s.errorf("wrong number of args for %s: want at least %d got %d", name, typ.NumIn()-1, len(args))
- }
- } else if numIn < typ.NumIn()-1 || !typ.IsVariadic() && numIn != typ.NumIn() {
- s.errorf("wrong number of args for %s: want %d got %d", name, typ.NumIn(), len(args))
- }
- if !goodFunc(typ) {
- // TODO: This could still be a confusing error; maybe goodFunc should provide info.
- s.errorf("can't call method/function %q with %d results", name, typ.NumOut())
- }
- // Build the arg list.
- argv := make([]reflect.Value, numIn)
- // Args must be evaluated. Fixed args first.
- i := 0
- for ; i < numFixed && i < len(args); i++ {
- argv[i] = s.evalArg(dot, typ.In(i), args[i])
- }
- // Now the ... args.
- if typ.IsVariadic() {
- argType := typ.In(typ.NumIn() - 1).Elem() // Argument is a slice.
- for ; i < len(args); i++ {
- argv[i] = s.evalArg(dot, argType, args[i])
- }
- }
- // Add final value if necessary.
- if final.IsValid() {
- t := typ.In(typ.NumIn() - 1)
- if typ.IsVariadic() {
- t = t.Elem()
- }
- argv[i] = s.validateType(final, t)
- }
- result := fun.Call(argv)
- // If we have an error that is not nil, stop execution and return that error to the caller.
- if len(result) == 2 && !result[1].IsNil() {
- s.at(node)
- s.errorf("error calling %s: %s", name, result[1].Interface().(error))
- }
- return result[0]
-}
-
-// canBeNil reports whether an untyped nil can be assigned to the type. See reflect.Zero.
-func canBeNil(typ reflect.Type) bool {
- switch typ.Kind() {
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
- return true
- }
- return false
-}
-
-// validateType guarantees that the value is valid and assignable to the type.
-func (s *state) validateType(value reflect.Value, typ reflect.Type) reflect.Value {
- if !value.IsValid() {
- if typ == nil || canBeNil(typ) {
- // An untyped nil interface{}. Accept as a proper nil value.
- return reflect.Zero(typ)
- }
- s.errorf("invalid value; expected %s", typ)
- }
- if typ != nil && !value.Type().AssignableTo(typ) {
- if value.Kind() == reflect.Interface && !value.IsNil() {
- value = value.Elem()
- if value.Type().AssignableTo(typ) {
- return value
- }
- // fallthrough
- }
- // Does one dereference or indirection work? We could do more, as we
- // do with method receivers, but that gets messy and method receivers
- // are much more constrained, so it makes more sense there than here.
- // Besides, one is almost always all you need.
- switch {
- case value.Kind() == reflect.Ptr && value.Type().Elem().AssignableTo(typ):
- value = value.Elem()
- if !value.IsValid() {
- s.errorf("dereference of nil pointer of type %s", typ)
- }
- case reflect.PtrTo(value.Type()).AssignableTo(typ) && value.CanAddr():
- value = value.Addr()
- default:
- s.errorf("wrong type for value; expected %s; got %s", typ, value.Type())
- }
- }
- return value
-}
-
-func (s *state) evalArg(dot reflect.Value, typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- switch arg := n.(type) {
- case *parse.DotNode:
- return s.validateType(dot, typ)
- case *parse.NilNode:
- if canBeNil(typ) {
- return reflect.Zero(typ)
- }
- s.errorf("cannot assign nil to %s", typ)
- case *parse.FieldNode:
- return s.validateType(s.evalFieldNode(dot, arg, []parse.Node{n}, zero), typ)
- case *parse.VariableNode:
- return s.validateType(s.evalVariableNode(dot, arg, nil, zero), typ)
- case *parse.PipeNode:
- return s.validateType(s.evalPipeline(dot, arg), typ)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, arg, arg, nil, zero)
- case *parse.ChainNode:
- return s.validateType(s.evalChainNode(dot, arg, nil, zero), typ)
- }
- switch typ.Kind() {
- case reflect.Bool:
- return s.evalBool(typ, n)
- case reflect.Complex64, reflect.Complex128:
- return s.evalComplex(typ, n)
- case reflect.Float32, reflect.Float64:
- return s.evalFloat(typ, n)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return s.evalInteger(typ, n)
- case reflect.Interface:
- if typ.NumMethod() == 0 {
- return s.evalEmptyInterface(dot, n)
- }
- case reflect.String:
- return s.evalString(typ, n)
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return s.evalUnsignedInteger(typ, n)
- }
- s.errorf("can't handle %s for arg of type %s", n, typ)
- panic("not reached")
-}
-
-func (s *state) evalBool(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.BoolNode); ok {
- value := reflect.New(typ).Elem()
- value.SetBool(n.True)
- return value
- }
- s.errorf("expected bool; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalString(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.StringNode); ok {
- value := reflect.New(typ).Elem()
- value.SetString(n.Text)
- return value
- }
- s.errorf("expected string; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsInt {
- value := reflect.New(typ).Elem()
- value.SetInt(n.Int64)
- return value
- }
- s.errorf("expected integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalUnsignedInteger(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsUint {
- value := reflect.New(typ).Elem()
- value.SetUint(n.Uint64)
- return value
- }
- s.errorf("expected unsigned integer; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalFloat(typ reflect.Type, n parse.Node) reflect.Value {
- s.at(n)
- if n, ok := n.(*parse.NumberNode); ok && n.IsFloat {
- value := reflect.New(typ).Elem()
- value.SetFloat(n.Float64)
- return value
- }
- s.errorf("expected float; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalComplex(typ reflect.Type, n parse.Node) reflect.Value {
- if n, ok := n.(*parse.NumberNode); ok && n.IsComplex {
- value := reflect.New(typ).Elem()
- value.SetComplex(n.Complex128)
- return value
- }
- s.errorf("expected complex; found %s", n)
- panic("not reached")
-}
-
-func (s *state) evalEmptyInterface(dot reflect.Value, n parse.Node) reflect.Value {
- s.at(n)
- switch n := n.(type) {
- case *parse.BoolNode:
- return reflect.ValueOf(n.True)
- case *parse.DotNode:
- return dot
- case *parse.FieldNode:
- return s.evalFieldNode(dot, n, nil, zero)
- case *parse.IdentifierNode:
- return s.evalFunction(dot, n, n, nil, zero)
- case *parse.NilNode:
- // NilNode is handled in evalArg, the only place that calls here.
- s.errorf("evalEmptyInterface: nil (can't happen)")
- case *parse.NumberNode:
- return s.idealConstant(n)
- case *parse.StringNode:
- return reflect.ValueOf(n.Text)
- case *parse.VariableNode:
- return s.evalVariableNode(dot, n, nil, zero)
- case *parse.PipeNode:
- return s.evalPipeline(dot, n)
- }
- s.errorf("can't handle assignment of %s to empty interface argument", n)
- panic("not reached")
-}
-
-// indirect returns the item at the end of indirection, and a bool to indicate if it's nil.
-// We indirect through pointers and empty interfaces (only) because
-// non-empty interfaces have methods we might need.
-func indirect(v reflect.Value) (rv reflect.Value, isNil bool) {
- for ; v.Kind() == reflect.Ptr || v.Kind() == reflect.Interface; v = v.Elem() {
- if v.IsNil() {
- return v, true
- }
- if v.Kind() == reflect.Interface && v.NumMethod() > 0 {
- break
- }
- }
- return v, false
-}
-
-// printValue writes the textual representation of the value to the output of
-// the template.
-func (s *state) printValue(n parse.Node, v reflect.Value) {
- s.at(n)
- iface, ok := printableValue(v)
- if !ok {
- s.errorf("can't print %s of type %s", n, v.Type())
- }
- fmt.Fprint(s.wr, iface)
-}
-
-// printableValue returns the, possibly indirected, interface value inside v that
-// is best for a call to formatted printer.
-func printableValue(v reflect.Value) (interface{}, bool) {
- if v.Kind() == reflect.Ptr {
- v, _ = indirect(v) // fmt.Fprint handles nil.
- }
- if !v.IsValid() {
- return "", true
- }
-
- if !v.Type().Implements(errorType) && !v.Type().Implements(fmtStringerType) {
- if v.CanAddr() && (reflect.PtrTo(v.Type()).Implements(errorType) || reflect.PtrTo(v.Type()).Implements(fmtStringerType)) {
- v = v.Addr()
- } else {
- switch v.Kind() {
- case reflect.Chan, reflect.Func:
- return nil, false
- }
- }
- }
- return v.Interface(), true
-}
-
-// Types to help sort the keys in a map for reproducible output.
-
-type rvs []reflect.Value
-
-func (x rvs) Len() int { return len(x) }
-func (x rvs) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
-
-type rvInts struct{ rvs }
-
-func (x rvInts) Less(i, j int) bool { return x.rvs[i].Int() < x.rvs[j].Int() }
-
-type rvUints struct{ rvs }
-
-func (x rvUints) Less(i, j int) bool { return x.rvs[i].Uint() < x.rvs[j].Uint() }
-
-type rvFloats struct{ rvs }
-
-func (x rvFloats) Less(i, j int) bool { return x.rvs[i].Float() < x.rvs[j].Float() }
-
-type rvStrings struct{ rvs }
-
-func (x rvStrings) Less(i, j int) bool { return x.rvs[i].String() < x.rvs[j].String() }
-
-// sortKeys sorts (if it can) the slice of reflect.Values, which is a slice of map keys.
-func sortKeys(v []reflect.Value) []reflect.Value {
- if len(v) <= 1 {
- return v
- }
- switch v[0].Kind() {
- case reflect.Float32, reflect.Float64:
- sort.Sort(rvFloats{v})
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- sort.Sort(rvInts{v})
- case reflect.String:
- sort.Sort(rvStrings{v})
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- sort.Sort(rvUints{v})
- }
- return v
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go
deleted file mode 100644
index 39ee5ed..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/funcs.go
+++ /dev/null
@@ -1,598 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "net/url"
- "reflect"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// FuncMap is the type of the map defining the mapping from names to functions.
-// Each function must have either a single return value, or two return values of
-// which the second has type error. In that case, if the second (error)
-// return value evaluates to non-nil during execution, execution terminates and
-// Execute returns that error.
-type FuncMap map[string]interface{}
-
-var builtins = FuncMap{
- "and": and,
- "call": call,
- "html": HTMLEscaper,
- "index": index,
- "js": JSEscaper,
- "len": length,
- "not": not,
- "or": or,
- "print": fmt.Sprint,
- "printf": fmt.Sprintf,
- "println": fmt.Sprintln,
- "urlquery": URLQueryEscaper,
-
- // Comparisons
- "eq": eq, // ==
- "ge": ge, // >=
- "gt": gt, // >
- "le": le, // <=
- "lt": lt, // <
- "ne": ne, // !=
-}
-
-var builtinFuncs = createValueFuncs(builtins)
-
-// createValueFuncs turns a FuncMap into a map[string]reflect.Value
-func createValueFuncs(funcMap FuncMap) map[string]reflect.Value {
- m := make(map[string]reflect.Value)
- addValueFuncs(m, funcMap)
- return m
-}
-
-// addValueFuncs adds to values the functions in funcs, converting them to reflect.Values.
-func addValueFuncs(out map[string]reflect.Value, in FuncMap) {
- for name, fn := range in {
- v := reflect.ValueOf(fn)
- if v.Kind() != reflect.Func {
- panic("value for " + name + " not a function")
- }
- if !goodFunc(v.Type()) {
- panic(fmt.Errorf("can't install method/function %q with %d results", name, v.Type().NumOut()))
- }
- out[name] = v
- }
-}
-
-// addFuncs adds to values the functions in funcs. It does no checking of the input -
-// call addValueFuncs first.
-func addFuncs(out, in FuncMap) {
- for name, fn := range in {
- out[name] = fn
- }
-}
-
-// goodFunc checks that the function or method has the right result signature.
-func goodFunc(typ reflect.Type) bool {
- // We allow functions with 1 result or 2 results where the second is an error.
- switch {
- case typ.NumOut() == 1:
- return true
- case typ.NumOut() == 2 && typ.Out(1) == errorType:
- return true
- }
- return false
-}
-
-// findFunction looks for a function in the template, and global map.
-func findFunction(name string, tmpl *Template) (reflect.Value, bool) {
- if tmpl != nil && tmpl.common != nil {
- if fn := tmpl.execFuncs[name]; fn.IsValid() {
- return fn, true
- }
- }
- if fn := builtinFuncs[name]; fn.IsValid() {
- return fn, true
- }
- return reflect.Value{}, false
-}
-
-// Indexing.
-
-// index returns the result of indexing its first argument by the following
-// arguments. Thus "index x 1 2 3" is, in Go syntax, x[1][2][3]. Each
-// indexed item must be a map, slice, or array.
-func index(item interface{}, indices ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(item)
- for _, i := range indices {
- index := reflect.ValueOf(i)
- var isNil bool
- if v, isNil = indirect(v); isNil {
- return nil, fmt.Errorf("index of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Slice, reflect.String:
- var x int64
- switch index.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- x = index.Int()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- x = int64(index.Uint())
- default:
- return nil, fmt.Errorf("cannot index slice/array with type %s", index.Type())
- }
- if x < 0 || x >= int64(v.Len()) {
- return nil, fmt.Errorf("index out of range: %d", x)
- }
- v = v.Index(int(x))
- case reflect.Map:
- if !index.IsValid() {
- index = reflect.Zero(v.Type().Key())
- }
- if !index.Type().AssignableTo(v.Type().Key()) {
- return nil, fmt.Errorf("%s is not index type for %s", index.Type(), v.Type())
- }
- if x := v.MapIndex(index); x.IsValid() {
- v = x
- } else {
- v = reflect.Zero(v.Type().Elem())
- }
- default:
- return nil, fmt.Errorf("can't index item of type %s", v.Type())
- }
- }
- return v.Interface(), nil
-}
-
-// Length
-
-// length returns the length of the item, with an error if it has no defined length.
-func length(item interface{}) (int, error) {
- v, isNil := indirect(reflect.ValueOf(item))
- if isNil {
- return 0, fmt.Errorf("len of nil pointer")
- }
- switch v.Kind() {
- case reflect.Array, reflect.Chan, reflect.Map, reflect.Slice, reflect.String:
- return v.Len(), nil
- }
- return 0, fmt.Errorf("len of type %s", v.Type())
-}
-
-// Function invocation
-
-// call returns the result of evaluating the first argument as a function.
-// The function must return 1 result, or 2 results, the second of which is an error.
-func call(fn interface{}, args ...interface{}) (interface{}, error) {
- v := reflect.ValueOf(fn)
- typ := v.Type()
- if typ.Kind() != reflect.Func {
- return nil, fmt.Errorf("non-function of type %s", typ)
- }
- if !goodFunc(typ) {
- return nil, fmt.Errorf("function called with %d args; should be 1 or 2", typ.NumOut())
- }
- numIn := typ.NumIn()
- var dddType reflect.Type
- if typ.IsVariadic() {
- if len(args) < numIn-1 {
- return nil, fmt.Errorf("wrong number of args: got %d want at least %d", len(args), numIn-1)
- }
- dddType = typ.In(numIn - 1).Elem()
- } else {
- if len(args) != numIn {
- return nil, fmt.Errorf("wrong number of args: got %d want %d", len(args), numIn)
- }
- }
- argv := make([]reflect.Value, len(args))
- for i, arg := range args {
- value := reflect.ValueOf(arg)
- // Compute the expected type. Clumsy because of variadics.
- var argType reflect.Type
- if !typ.IsVariadic() || i < numIn-1 {
- argType = typ.In(i)
- } else {
- argType = dddType
- }
- if !value.IsValid() && canBeNil(argType) {
- value = reflect.Zero(argType)
- }
- if !value.Type().AssignableTo(argType) {
- return nil, fmt.Errorf("arg %d has type %s; should be %s", i, value.Type(), argType)
- }
- argv[i] = value
- }
- result := v.Call(argv)
- if len(result) == 2 && !result[1].IsNil() {
- return result[0].Interface(), result[1].Interface().(error)
- }
- return result[0].Interface(), nil
-}
-
-// Boolean logic.
-
-func truth(a interface{}) bool {
- t, _ := isTrue(reflect.ValueOf(a))
- return t
-}
-
-// and computes the Boolean AND of its arguments, returning
-// the first false argument it encounters, or the last argument.
-func and(arg0 interface{}, args ...interface{}) interface{} {
- if !truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if !truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// or computes the Boolean OR of its arguments, returning
-// the first true argument it encounters, or the last argument.
-func or(arg0 interface{}, args ...interface{}) interface{} {
- if truth(arg0) {
- return arg0
- }
- for i := range args {
- arg0 = args[i]
- if truth(arg0) {
- break
- }
- }
- return arg0
-}
-
-// not returns the Boolean negation of its argument.
-func not(arg interface{}) (truth bool) {
- truth, _ = isTrue(reflect.ValueOf(arg))
- return !truth
-}
-
-// Comparison.
-
-// TODO: Perhaps allow comparison between signed and unsigned integers.
-
-var (
- errBadComparisonType = errors.New("invalid type for comparison")
- errBadComparison = errors.New("incompatible types for comparison")
- errNoComparison = errors.New("missing argument for comparison")
-)
-
-type kind int
-
-const (
- invalidKind kind = iota
- boolKind
- complexKind
- intKind
- floatKind
- integerKind
- stringKind
- uintKind
-)
-
-func basicKind(v reflect.Value) (kind, error) {
- switch v.Kind() {
- case reflect.Bool:
- return boolKind, nil
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return intKind, nil
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return uintKind, nil
- case reflect.Float32, reflect.Float64:
- return floatKind, nil
- case reflect.Complex64, reflect.Complex128:
- return complexKind, nil
- case reflect.String:
- return stringKind, nil
- }
- return invalidKind, errBadComparisonType
-}
-
-// eq evaluates the comparison a == b || a == c || ...
-func eq(arg1 interface{}, arg2 ...interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- if len(arg2) == 0 {
- return false, errNoComparison
- }
- for _, arg := range arg2 {
- v2 := reflect.ValueOf(arg)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() >= 0 && uint64(v1.Int()) == v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() == uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind:
- truth = v1.Bool() == v2.Bool()
- case complexKind:
- truth = v1.Complex() == v2.Complex()
- case floatKind:
- truth = v1.Float() == v2.Float()
- case intKind:
- truth = v1.Int() == v2.Int()
- case stringKind:
- truth = v1.String() == v2.String()
- case uintKind:
- truth = v1.Uint() == v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- if truth {
- return true, nil
- }
- }
- return false, nil
-}
-
-// ne evaluates the comparison a != b.
-func ne(arg1, arg2 interface{}) (bool, error) {
- // != is the inverse of ==.
- equal, err := eq(arg1, arg2)
- return !equal, err
-}
-
-// lt evaluates the comparison a < b.
-func lt(arg1, arg2 interface{}) (bool, error) {
- v1 := reflect.ValueOf(arg1)
- k1, err := basicKind(v1)
- if err != nil {
- return false, err
- }
- v2 := reflect.ValueOf(arg2)
- k2, err := basicKind(v2)
- if err != nil {
- return false, err
- }
- truth := false
- if k1 != k2 {
- // Special case: Can compare integer values regardless of type's sign.
- switch {
- case k1 == intKind && k2 == uintKind:
- truth = v1.Int() < 0 || uint64(v1.Int()) < v2.Uint()
- case k1 == uintKind && k2 == intKind:
- truth = v2.Int() >= 0 && v1.Uint() < uint64(v2.Int())
- default:
- return false, errBadComparison
- }
- } else {
- switch k1 {
- case boolKind, complexKind:
- return false, errBadComparisonType
- case floatKind:
- truth = v1.Float() < v2.Float()
- case intKind:
- truth = v1.Int() < v2.Int()
- case stringKind:
- truth = v1.String() < v2.String()
- case uintKind:
- truth = v1.Uint() < v2.Uint()
- default:
- panic("invalid kind")
- }
- }
- return truth, nil
-}
-
-// le evaluates the comparison <= b.
-func le(arg1, arg2 interface{}) (bool, error) {
- // <= is < or ==.
- lessThan, err := lt(arg1, arg2)
- if lessThan || err != nil {
- return lessThan, err
- }
- return eq(arg1, arg2)
-}
-
-// gt evaluates the comparison a > b.
-func gt(arg1, arg2 interface{}) (bool, error) {
- // > is the inverse of <=.
- lessOrEqual, err := le(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessOrEqual, nil
-}
-
-// ge evaluates the comparison a >= b.
-func ge(arg1, arg2 interface{}) (bool, error) {
- // >= is the inverse of <.
- lessThan, err := lt(arg1, arg2)
- if err != nil {
- return false, err
- }
- return !lessThan, nil
-}
-
-// HTML escaping.
-
-var (
- htmlQuot = []byte(""") // shorter than """
- htmlApos = []byte("'") // shorter than "'" and apos was not in HTML until HTML5
- htmlAmp = []byte("&")
- htmlLt = []byte("<")
- htmlGt = []byte(">")
-)
-
-// HTMLEscape writes to w the escaped HTML equivalent of the plain text data b.
-func HTMLEscape(w io.Writer, b []byte) {
- last := 0
- for i, c := range b {
- var html []byte
- switch c {
- case '"':
- html = htmlQuot
- case '\'':
- html = htmlApos
- case '&':
- html = htmlAmp
- case '<':
- html = htmlLt
- case '>':
- html = htmlGt
- default:
- continue
- }
- w.Write(b[last:i])
- w.Write(html)
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// HTMLEscapeString returns the escaped HTML equivalent of the plain text data s.
-func HTMLEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexAny(s, `'"&<>`) < 0 {
- return s
- }
- var b bytes.Buffer
- HTMLEscape(&b, []byte(s))
- return b.String()
-}
-
-// HTMLEscaper returns the escaped HTML equivalent of the textual
-// representation of its arguments.
-func HTMLEscaper(args ...interface{}) string {
- return HTMLEscapeString(evalArgs(args))
-}
-
-// JavaScript escaping.
-
-var (
- jsLowUni = []byte(`\u00`)
- hex = []byte("0123456789ABCDEF")
-
- jsBackslash = []byte(`\\`)
- jsApos = []byte(`\'`)
- jsQuot = []byte(`\"`)
- jsLt = []byte(`\x3C`)
- jsGt = []byte(`\x3E`)
-)
-
-// JSEscape writes to w the escaped JavaScript equivalent of the plain text data b.
-func JSEscape(w io.Writer, b []byte) {
- last := 0
- for i := 0; i < len(b); i++ {
- c := b[i]
-
- if !jsIsSpecial(rune(c)) {
- // fast path: nothing to do
- continue
- }
- w.Write(b[last:i])
-
- if c < utf8.RuneSelf {
- // Quotes, slashes and angle brackets get quoted.
- // Control characters get written as \u00XX.
- switch c {
- case '\\':
- w.Write(jsBackslash)
- case '\'':
- w.Write(jsApos)
- case '"':
- w.Write(jsQuot)
- case '<':
- w.Write(jsLt)
- case '>':
- w.Write(jsGt)
- default:
- w.Write(jsLowUni)
- t, b := c>>4, c&0x0f
- w.Write(hex[t : t+1])
- w.Write(hex[b : b+1])
- }
- } else {
- // Unicode rune.
- r, size := utf8.DecodeRune(b[i:])
- if unicode.IsPrint(r) {
- w.Write(b[i : i+size])
- } else {
- fmt.Fprintf(w, "\\u%04X", r)
- }
- i += size - 1
- }
- last = i + 1
- }
- w.Write(b[last:])
-}
-
-// JSEscapeString returns the escaped JavaScript equivalent of the plain text data s.
-func JSEscapeString(s string) string {
- // Avoid allocation if we can.
- if strings.IndexFunc(s, jsIsSpecial) < 0 {
- return s
- }
- var b bytes.Buffer
- JSEscape(&b, []byte(s))
- return b.String()
-}
-
-func jsIsSpecial(r rune) bool {
- switch r {
- case '\\', '\'', '"', '<', '>':
- return true
- }
- return r < ' ' || utf8.RuneSelf <= r
-}
-
-// JSEscaper returns the escaped JavaScript equivalent of the textual
-// representation of its arguments.
-func JSEscaper(args ...interface{}) string {
- return JSEscapeString(evalArgs(args))
-}
-
-// URLQueryEscaper returns the escaped value of the textual representation of
-// its arguments in a form suitable for embedding in a URL query.
-func URLQueryEscaper(args ...interface{}) string {
- return url.QueryEscape(evalArgs(args))
-}
-
-// evalArgs formats the list of arguments into a string. It is therefore equivalent to
-// fmt.Sprint(args...)
-// except that each argument is indirected (if a pointer), as required,
-// using the same rules as the default string evaluation during template
-// execution.
-func evalArgs(args []interface{}) string {
- ok := false
- var s string
- // Fast path for simple common case.
- if len(args) == 1 {
- s, ok = args[0].(string)
- }
- if !ok {
- for i, arg := range args {
- a, ok := printableValue(reflect.ValueOf(arg))
- if ok {
- args[i] = a
- } // else left fmt do its thing
- }
- s = fmt.Sprint(args...)
- }
- return s
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/helper.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/helper.go
deleted file mode 100644
index 3636fb5..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/helper.go
+++ /dev/null
@@ -1,108 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Helper functions to make constructing templates easier.
-
-package template
-
-import (
- "fmt"
- "io/ioutil"
- "path/filepath"
-)
-
-// Functions and methods to parse templates.
-
-// Must is a helper that wraps a call to a function returning (*Template, error)
-// and panics if the error is non-nil. It is intended for use in variable
-// initializations such as
-// var t = template.Must(template.New("name").Parse("text"))
-func Must(t *Template, err error) *Template {
- if err != nil {
- panic(err)
- }
- return t
-}
-
-// ParseFiles creates a new Template and parses the template definitions from
-// the named files. The returned template's name will have the (base) name and
-// (parsed) contents of the first file. There must be at least one file.
-// If an error occurs, parsing stops and the returned *Template is nil.
-func ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(nil, filenames...)
-}
-
-// ParseFiles parses the named files and associates the resulting templates with
-// t. If an error occurs, parsing stops and the returned template is nil;
-// otherwise it is t. There must be at least one file.
-func (t *Template) ParseFiles(filenames ...string) (*Template, error) {
- return parseFiles(t, filenames...)
-}
-
-// parseFiles is the helper for the method and function. If the argument
-// template is nil, it is created from the first file.
-func parseFiles(t *Template, filenames ...string) (*Template, error) {
- if len(filenames) == 0 {
- // Not really a problem, but be consistent.
- return nil, fmt.Errorf("template: no files named in call to ParseFiles")
- }
- for _, filename := range filenames {
- b, err := ioutil.ReadFile(filename)
- if err != nil {
- return nil, err
- }
- s := string(b)
- name := filepath.Base(filename)
- // First template becomes return value if not already defined,
- // and we use that one for subsequent New calls to associate
- // all the templates together. Also, if this file has the same name
- // as t, this file becomes the contents of t, so
- // t, err := New(name).Funcs(xxx).ParseFiles(name)
- // works. Otherwise we create a new template associated with t.
- var tmpl *Template
- if t == nil {
- t = New(name)
- }
- if name == t.Name() {
- tmpl = t
- } else {
- tmpl = t.New(name)
- }
- _, err = tmpl.Parse(s)
- if err != nil {
- return nil, err
- }
- }
- return t, nil
-}
-
-// ParseGlob creates a new Template and parses the template definitions from the
-// files identified by the pattern, which must match at least one file. The
-// returned template will have the (base) name and (parsed) contents of the
-// first file matched by the pattern. ParseGlob is equivalent to calling
-// ParseFiles with the list of files matched by the pattern.
-func ParseGlob(pattern string) (*Template, error) {
- return parseGlob(nil, pattern)
-}
-
-// ParseGlob parses the template definitions in the files identified by the
-// pattern and associates the resulting templates with t. The pattern is
-// processed by filepath.Glob and must match at least one file. ParseGlob is
-// equivalent to calling t.ParseFiles with the list of files matched by the
-// pattern.
-func (t *Template) ParseGlob(pattern string) (*Template, error) {
- return parseGlob(t, pattern)
-}
-
-// parseGlob is the implementation of the function and method ParseGlob.
-func parseGlob(t *Template, pattern string) (*Template, error) {
- filenames, err := filepath.Glob(pattern)
- if err != nil {
- return nil, err
- }
- if len(filenames) == 0 {
- return nil, fmt.Errorf("template: pattern matches no files: %#q", pattern)
- }
- return parseFiles(t, filenames...)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go
deleted file mode 100644
index 55f1c05..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/lex.go
+++ /dev/null
@@ -1,556 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package parse
-
-import (
- "fmt"
- "strings"
- "unicode"
- "unicode/utf8"
-)
-
-// item represents a token or text string returned from the scanner.
-type item struct {
- typ itemType // The type of this item.
- pos Pos // The starting position, in bytes, of this item in the input string.
- val string // The value of this item.
-}
-
-func (i item) String() string {
- switch {
- case i.typ == itemEOF:
- return "EOF"
- case i.typ == itemError:
- return i.val
- case i.typ > itemKeyword:
- return fmt.Sprintf("<%s>", i.val)
- case len(i.val) > 10:
- return fmt.Sprintf("%.10q...", i.val)
- }
- return fmt.Sprintf("%q", i.val)
-}
-
-// itemType identifies the type of lex items.
-type itemType int
-
-const (
- itemError itemType = iota // error occurred; value is text of error
- itemBool // boolean constant
- itemChar // printable ASCII character; grab bag for comma etc.
- itemCharConstant // character constant
- itemComplex // complex constant (1+2i); imaginary is just a number
- itemColonEquals // colon-equals (':=') introducing a declaration
- itemEOF
- itemField // alphanumeric identifier starting with '.'
- itemIdentifier // alphanumeric identifier not starting with '.'
- itemLeftDelim // left action delimiter
- itemLeftParen // '(' inside action
- itemNumber // simple number, including imaginary
- itemPipe // pipe symbol
- itemRawString // raw quoted string (includes quotes)
- itemRightDelim // right action delimiter
- itemElideNewline // elide newline after right delim
- itemRightParen // ')' inside action
- itemSpace // run of spaces separating arguments
- itemString // quoted string (includes quotes)
- itemText // plain text
- itemVariable // variable starting with '$', such as '$' or '$1' or '$hello'
- // Keywords appear after all the rest.
- itemKeyword // used only to delimit the keywords
- itemDot // the cursor, spelled '.'
- itemDefine // define keyword
- itemElse // else keyword
- itemEnd // end keyword
- itemIf // if keyword
- itemNil // the untyped nil constant, easiest to treat as a keyword
- itemRange // range keyword
- itemTemplate // template keyword
- itemWith // with keyword
-)
-
-var key = map[string]itemType{
- ".": itemDot,
- "define": itemDefine,
- "else": itemElse,
- "end": itemEnd,
- "if": itemIf,
- "range": itemRange,
- "nil": itemNil,
- "template": itemTemplate,
- "with": itemWith,
-}
-
-const eof = -1
-
-// stateFn represents the state of the scanner as a function that returns the next state.
-type stateFn func(*lexer) stateFn
-
-// lexer holds the state of the scanner.
-type lexer struct {
- name string // the name of the input; used only for error reports
- input string // the string being scanned
- leftDelim string // start of action
- rightDelim string // end of action
- state stateFn // the next lexing function to enter
- pos Pos // current position in the input
- start Pos // start position of this item
- width Pos // width of last rune read from input
- lastPos Pos // position of most recent item returned by nextItem
- items chan item // channel of scanned items
- parenDepth int // nesting depth of ( ) exprs
-}
-
-// next returns the next rune in the input.
-func (l *lexer) next() rune {
- if int(l.pos) >= len(l.input) {
- l.width = 0
- return eof
- }
- r, w := utf8.DecodeRuneInString(l.input[l.pos:])
- l.width = Pos(w)
- l.pos += l.width
- return r
-}
-
-// peek returns but does not consume the next rune in the input.
-func (l *lexer) peek() rune {
- r := l.next()
- l.backup()
- return r
-}
-
-// backup steps back one rune. Can only be called once per call of next.
-func (l *lexer) backup() {
- l.pos -= l.width
-}
-
-// emit passes an item back to the client.
-func (l *lexer) emit(t itemType) {
- l.items <- item{t, l.start, l.input[l.start:l.pos]}
- l.start = l.pos
-}
-
-// ignore skips over the pending input before this point.
-func (l *lexer) ignore() {
- l.start = l.pos
-}
-
-// accept consumes the next rune if it's from the valid set.
-func (l *lexer) accept(valid string) bool {
- if strings.IndexRune(valid, l.next()) >= 0 {
- return true
- }
- l.backup()
- return false
-}
-
-// acceptRun consumes a run of runes from the valid set.
-func (l *lexer) acceptRun(valid string) {
- for strings.IndexRune(valid, l.next()) >= 0 {
- }
- l.backup()
-}
-
-// lineNumber reports which line we're on, based on the position of
-// the previous item returned by nextItem. Doing it this way
-// means we don't have to worry about peek double counting.
-func (l *lexer) lineNumber() int {
- return 1 + strings.Count(l.input[:l.lastPos], "\n")
-}
-
-// errorf returns an error token and terminates the scan by passing
-// back a nil pointer that will be the next state, terminating l.nextItem.
-func (l *lexer) errorf(format string, args ...interface{}) stateFn {
- l.items <- item{itemError, l.start, fmt.Sprintf(format, args...)}
- return nil
-}
-
-// nextItem returns the next item from the input.
-func (l *lexer) nextItem() item {
- item := <-l.items
- l.lastPos = item.pos
- return item
-}
-
-// lex creates a new scanner for the input string.
-func lex(name, input, left, right string) *lexer {
- if left == "" {
- left = leftDelim
- }
- if right == "" {
- right = rightDelim
- }
- l := &lexer{
- name: name,
- input: input,
- leftDelim: left,
- rightDelim: right,
- items: make(chan item),
- }
- go l.run()
- return l
-}
-
-// run runs the state machine for the lexer.
-func (l *lexer) run() {
- for l.state = lexText; l.state != nil; {
- l.state = l.state(l)
- }
-}
-
-// state functions
-
-const (
- leftDelim = "{{"
- rightDelim = "}}"
- leftComment = "/*"
- rightComment = "*/"
-)
-
-// lexText scans until an opening action delimiter, "{{".
-func lexText(l *lexer) stateFn {
- for {
- if strings.HasPrefix(l.input[l.pos:], l.leftDelim) {
- if l.pos > l.start {
- l.emit(itemText)
- }
- return lexLeftDelim
- }
- if l.next() == eof {
- break
- }
- }
- // Correctly reached EOF.
- if l.pos > l.start {
- l.emit(itemText)
- }
- l.emit(itemEOF)
- return nil
-}
-
-// lexLeftDelim scans the left delimiter, which is known to be present.
-func lexLeftDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.leftDelim))
- if strings.HasPrefix(l.input[l.pos:], leftComment) {
- return lexComment
- }
- l.emit(itemLeftDelim)
- l.parenDepth = 0
- return lexInsideAction
-}
-
-// lexComment scans a comment. The left comment marker is known to be present.
-func lexComment(l *lexer) stateFn {
- l.pos += Pos(len(leftComment))
- i := strings.Index(l.input[l.pos:], rightComment)
- if i < 0 {
- return l.errorf("unclosed comment")
- }
- l.pos += Pos(i + len(rightComment))
- if !strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- return l.errorf("comment ends before closing delimiter")
-
- }
- l.pos += Pos(len(l.rightDelim))
- l.ignore()
- return lexText
-}
-
-// lexRightDelim scans the right delimiter, which is known to be present.
-func lexRightDelim(l *lexer) stateFn {
- l.pos += Pos(len(l.rightDelim))
- l.emit(itemRightDelim)
- if l.peek() == '\\' {
- l.pos++
- l.emit(itemElideNewline)
- }
- return lexText
-}
-
-// lexInsideAction scans the elements inside action delimiters.
-func lexInsideAction(l *lexer) stateFn {
- // Either number, quoted string, or identifier.
- // Spaces separate arguments; runs of spaces turn into itemSpace.
- // Pipe symbols separate and are emitted.
- if strings.HasPrefix(l.input[l.pos:], l.rightDelim+"\\") || strings.HasPrefix(l.input[l.pos:], l.rightDelim) {
- if l.parenDepth == 0 {
- return lexRightDelim
- }
- return l.errorf("unclosed left paren")
- }
- switch r := l.next(); {
- case r == eof || isEndOfLine(r):
- return l.errorf("unclosed action")
- case isSpace(r):
- return lexSpace
- case r == ':':
- if l.next() != '=' {
- return l.errorf("expected :=")
- }
- l.emit(itemColonEquals)
- case r == '|':
- l.emit(itemPipe)
- case r == '"':
- return lexQuote
- case r == '`':
- return lexRawQuote
- case r == '$':
- return lexVariable
- case r == '\'':
- return lexChar
- case r == '.':
- // special look-ahead for ".field" so we don't break l.backup().
- if l.pos < Pos(len(l.input)) {
- r := l.input[l.pos]
- if r < '0' || '9' < r {
- return lexField
- }
- }
- fallthrough // '.' can start a number.
- case r == '+' || r == '-' || ('0' <= r && r <= '9'):
- l.backup()
- return lexNumber
- case isAlphaNumeric(r):
- l.backup()
- return lexIdentifier
- case r == '(':
- l.emit(itemLeftParen)
- l.parenDepth++
- return lexInsideAction
- case r == ')':
- l.emit(itemRightParen)
- l.parenDepth--
- if l.parenDepth < 0 {
- return l.errorf("unexpected right paren %#U", r)
- }
- return lexInsideAction
- case r <= unicode.MaxASCII && unicode.IsPrint(r):
- l.emit(itemChar)
- return lexInsideAction
- default:
- return l.errorf("unrecognized character in action: %#U", r)
- }
- return lexInsideAction
-}
-
-// lexSpace scans a run of space characters.
-// One space has already been seen.
-func lexSpace(l *lexer) stateFn {
- for isSpace(l.peek()) {
- l.next()
- }
- l.emit(itemSpace)
- return lexInsideAction
-}
-
-// lexIdentifier scans an alphanumeric.
-func lexIdentifier(l *lexer) stateFn {
-Loop:
- for {
- switch r := l.next(); {
- case isAlphaNumeric(r):
- // absorb.
- default:
- l.backup()
- word := l.input[l.start:l.pos]
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- switch {
- case key[word] > itemKeyword:
- l.emit(key[word])
- case word[0] == '.':
- l.emit(itemField)
- case word == "true", word == "false":
- l.emit(itemBool)
- default:
- l.emit(itemIdentifier)
- }
- break Loop
- }
- }
- return lexInsideAction
-}
-
-// lexField scans a field: .Alphanumeric.
-// The . has been scanned.
-func lexField(l *lexer) stateFn {
- return lexFieldOrVariable(l, itemField)
-}
-
-// lexVariable scans a Variable: $Alphanumeric.
-// The $ has been scanned.
-func lexVariable(l *lexer) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "$".
- l.emit(itemVariable)
- return lexInsideAction
- }
- return lexFieldOrVariable(l, itemVariable)
-}
-
-// lexVariable scans a field or variable: [.$]Alphanumeric.
-// The . or $ has been scanned.
-func lexFieldOrVariable(l *lexer, typ itemType) stateFn {
- if l.atTerminator() { // Nothing interesting follows -> "." or "$".
- if typ == itemVariable {
- l.emit(itemVariable)
- } else {
- l.emit(itemDot)
- }
- return lexInsideAction
- }
- var r rune
- for {
- r = l.next()
- if !isAlphaNumeric(r) {
- l.backup()
- break
- }
- }
- if !l.atTerminator() {
- return l.errorf("bad character %#U", r)
- }
- l.emit(typ)
- return lexInsideAction
-}
-
-// atTerminator reports whether the input is at valid termination character to
-// appear after an identifier. Breaks .X.Y into two pieces. Also catches cases
-// like "$x+2" not being acceptable without a space, in case we decide one
-// day to implement arithmetic.
-func (l *lexer) atTerminator() bool {
- r := l.peek()
- if isSpace(r) || isEndOfLine(r) {
- return true
- }
- switch r {
- case eof, '.', ',', '|', ':', ')', '(':
- return true
- }
- // Does r start the delimiter? This can be ambiguous (with delim=="//", $x/2 will
- // succeed but should fail) but only in extremely rare cases caused by willfully
- // bad choice of delimiter.
- if rd, _ := utf8.DecodeRuneInString(l.rightDelim); rd == r {
- return true
- }
- return false
-}
-
-// lexChar scans a character constant. The initial quote is already
-// scanned. Syntax checking is done by the parser.
-func lexChar(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated character constant")
- case '\'':
- break Loop
- }
- }
- l.emit(itemCharConstant)
- return lexInsideAction
-}
-
-// lexNumber scans a number: decimal, octal, hex, float, or imaginary. This
-// isn't a perfect number scanner - for instance it accepts "." and "0x0.2"
-// and "089" - but when it's wrong the input is invalid and the parser (via
-// strconv) will notice.
-func lexNumber(l *lexer) stateFn {
- if !l.scanNumber() {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- if sign := l.peek(); sign == '+' || sign == '-' {
- // Complex: 1+2i. No spaces, must end in 'i'.
- if !l.scanNumber() || l.input[l.pos-1] != 'i' {
- return l.errorf("bad number syntax: %q", l.input[l.start:l.pos])
- }
- l.emit(itemComplex)
- } else {
- l.emit(itemNumber)
- }
- return lexInsideAction
-}
-
-func (l *lexer) scanNumber() bool {
- // Optional leading sign.
- l.accept("+-")
- // Is it hex?
- digits := "0123456789"
- if l.accept("0") && l.accept("xX") {
- digits = "0123456789abcdefABCDEF"
- }
- l.acceptRun(digits)
- if l.accept(".") {
- l.acceptRun(digits)
- }
- if l.accept("eE") {
- l.accept("+-")
- l.acceptRun("0123456789")
- }
- // Is it imaginary?
- l.accept("i")
- // Next thing mustn't be alphanumeric.
- if isAlphaNumeric(l.peek()) {
- l.next()
- return false
- }
- return true
-}
-
-// lexQuote scans a quoted string.
-func lexQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case '\\':
- if r := l.next(); r != eof && r != '\n' {
- break
- }
- fallthrough
- case eof, '\n':
- return l.errorf("unterminated quoted string")
- case '"':
- break Loop
- }
- }
- l.emit(itemString)
- return lexInsideAction
-}
-
-// lexRawQuote scans a raw quoted string.
-func lexRawQuote(l *lexer) stateFn {
-Loop:
- for {
- switch l.next() {
- case eof, '\n':
- return l.errorf("unterminated raw quoted string")
- case '`':
- break Loop
- }
- }
- l.emit(itemRawString)
- return lexInsideAction
-}
-
-// isSpace reports whether r is a space character.
-func isSpace(r rune) bool {
- return r == ' ' || r == '\t'
-}
-
-// isEndOfLine reports whether r is an end-of-line character.
-func isEndOfLine(r rune) bool {
- return r == '\r' || r == '\n'
-}
-
-// isAlphaNumeric reports whether r is an alphabetic, digit, or underscore.
-func isAlphaNumeric(r rune) bool {
- return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go
deleted file mode 100644
index 55c37f6..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/node.go
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Parse nodes.
-
-package parse
-
-import (
- "bytes"
- "fmt"
- "strconv"
- "strings"
-)
-
-var textFormat = "%s" // Changed to "%q" in tests for better error messages.
-
-// A Node is an element in the parse tree. The interface is trivial.
-// The interface contains an unexported method so that only
-// types local to this package can satisfy it.
-type Node interface {
- Type() NodeType
- String() string
- // Copy does a deep copy of the Node and all its components.
- // To avoid type assertions, some XxxNodes also have specialized
- // CopyXxx methods that return *XxxNode.
- Copy() Node
- Position() Pos // byte position of start of node in full original input string
- // tree returns the containing *Tree.
- // It is unexported so all implementations of Node are in this package.
- tree() *Tree
-}
-
-// NodeType identifies the type of a parse tree node.
-type NodeType int
-
-// Pos represents a byte position in the original input text from which
-// this template was parsed.
-type Pos int
-
-func (p Pos) Position() Pos {
- return p
-}
-
-// Type returns itself and provides an easy default implementation
-// for embedding in a Node. Embedded in all non-trivial Nodes.
-func (t NodeType) Type() NodeType {
- return t
-}
-
-const (
- NodeText NodeType = iota // Plain text.
- NodeAction // A non-control action such as a field evaluation.
- NodeBool // A boolean constant.
- NodeChain // A sequence of field accesses.
- NodeCommand // An element of a pipeline.
- NodeDot // The cursor, dot.
- nodeElse // An else action. Not added to tree.
- nodeEnd // An end action. Not added to tree.
- NodeField // A field or method name.
- NodeIdentifier // An identifier; always a function name.
- NodeIf // An if action.
- NodeList // A list of Nodes.
- NodeNil // An untyped nil constant.
- NodeNumber // A numerical constant.
- NodePipe // A pipeline of commands.
- NodeRange // A range action.
- NodeString // A string constant.
- NodeTemplate // A template invocation action.
- NodeVariable // A $ variable.
- NodeWith // A with action.
-)
-
-// Nodes.
-
-// ListNode holds a sequence of nodes.
-type ListNode struct {
- NodeType
- Pos
- tr *Tree
- Nodes []Node // The element nodes in lexical order.
-}
-
-func (t *Tree) newList(pos Pos) *ListNode {
- return &ListNode{tr: t, NodeType: NodeList, Pos: pos}
-}
-
-func (l *ListNode) append(n Node) {
- l.Nodes = append(l.Nodes, n)
-}
-
-func (l *ListNode) tree() *Tree {
- return l.tr
-}
-
-func (l *ListNode) String() string {
- b := new(bytes.Buffer)
- for _, n := range l.Nodes {
- fmt.Fprint(b, n)
- }
- return b.String()
-}
-
-func (l *ListNode) CopyList() *ListNode {
- if l == nil {
- return l
- }
- n := l.tr.newList(l.Pos)
- for _, elem := range l.Nodes {
- n.append(elem.Copy())
- }
- return n
-}
-
-func (l *ListNode) Copy() Node {
- return l.CopyList()
-}
-
-// TextNode holds plain text.
-type TextNode struct {
- NodeType
- Pos
- tr *Tree
- Text []byte // The text; may span newlines.
-}
-
-func (t *Tree) newText(pos Pos, text string) *TextNode {
- return &TextNode{tr: t, NodeType: NodeText, Pos: pos, Text: []byte(text)}
-}
-
-func (t *TextNode) String() string {
- return fmt.Sprintf(textFormat, t.Text)
-}
-
-func (t *TextNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TextNode) Copy() Node {
- return &TextNode{tr: t.tr, NodeType: NodeText, Pos: t.Pos, Text: append([]byte{}, t.Text...)}
-}
-
-// PipeNode holds a pipeline with optional declaration
-type PipeNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Decl []*VariableNode // Variable declarations in lexical order.
- Cmds []*CommandNode // The commands in lexical order.
-}
-
-func (t *Tree) newPipeline(pos Pos, line int, decl []*VariableNode) *PipeNode {
- return &PipeNode{tr: t, NodeType: NodePipe, Pos: pos, Line: line, Decl: decl}
-}
-
-func (p *PipeNode) append(command *CommandNode) {
- p.Cmds = append(p.Cmds, command)
-}
-
-func (p *PipeNode) String() string {
- s := ""
- if len(p.Decl) > 0 {
- for i, v := range p.Decl {
- if i > 0 {
- s += ", "
- }
- s += v.String()
- }
- s += " := "
- }
- for i, c := range p.Cmds {
- if i > 0 {
- s += " | "
- }
- s += c.String()
- }
- return s
-}
-
-func (p *PipeNode) tree() *Tree {
- return p.tr
-}
-
-func (p *PipeNode) CopyPipe() *PipeNode {
- if p == nil {
- return p
- }
- var decl []*VariableNode
- for _, d := range p.Decl {
- decl = append(decl, d.Copy().(*VariableNode))
- }
- n := p.tr.newPipeline(p.Pos, p.Line, decl)
- for _, c := range p.Cmds {
- n.append(c.Copy().(*CommandNode))
- }
- return n
-}
-
-func (p *PipeNode) Copy() Node {
- return p.CopyPipe()
-}
-
-// ActionNode holds an action (something bounded by delimiters).
-// Control actions have their own nodes; ActionNode represents simple
-// ones such as field evaluations and parenthesized pipelines.
-type ActionNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline in the action.
-}
-
-func (t *Tree) newAction(pos Pos, line int, pipe *PipeNode) *ActionNode {
- return &ActionNode{tr: t, NodeType: NodeAction, Pos: pos, Line: line, Pipe: pipe}
-}
-
-func (a *ActionNode) String() string {
- return fmt.Sprintf("{{%s}}", a.Pipe)
-
-}
-
-func (a *ActionNode) tree() *Tree {
- return a.tr
-}
-
-func (a *ActionNode) Copy() Node {
- return a.tr.newAction(a.Pos, a.Line, a.Pipe.CopyPipe())
-
-}
-
-// CommandNode holds a command (a pipeline inside an evaluating action).
-type CommandNode struct {
- NodeType
- Pos
- tr *Tree
- Args []Node // Arguments in lexical order: Identifier, field, or constant.
-}
-
-func (t *Tree) newCommand(pos Pos) *CommandNode {
- return &CommandNode{tr: t, NodeType: NodeCommand, Pos: pos}
-}
-
-func (c *CommandNode) append(arg Node) {
- c.Args = append(c.Args, arg)
-}
-
-func (c *CommandNode) String() string {
- s := ""
- for i, arg := range c.Args {
- if i > 0 {
- s += " "
- }
- if arg, ok := arg.(*PipeNode); ok {
- s += "(" + arg.String() + ")"
- continue
- }
- s += arg.String()
- }
- return s
-}
-
-func (c *CommandNode) tree() *Tree {
- return c.tr
-}
-
-func (c *CommandNode) Copy() Node {
- if c == nil {
- return c
- }
- n := c.tr.newCommand(c.Pos)
- for _, c := range c.Args {
- n.append(c.Copy())
- }
- return n
-}
-
-// IdentifierNode holds an identifier.
-type IdentifierNode struct {
- NodeType
- Pos
- tr *Tree
- Ident string // The identifier's name.
-}
-
-// NewIdentifier returns a new IdentifierNode with the given identifier name.
-func NewIdentifier(ident string) *IdentifierNode {
- return &IdentifierNode{NodeType: NodeIdentifier, Ident: ident}
-}
-
-// SetPos sets the position. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetPos(pos Pos) *IdentifierNode {
- i.Pos = pos
- return i
-}
-
-// SetTree sets the parent tree for the node. NewIdentifier is a public method so we can't modify its signature.
-// Chained for convenience.
-// TODO: fix one day?
-func (i *IdentifierNode) SetTree(t *Tree) *IdentifierNode {
- i.tr = t
- return i
-}
-
-func (i *IdentifierNode) String() string {
- return i.Ident
-}
-
-func (i *IdentifierNode) tree() *Tree {
- return i.tr
-}
-
-func (i *IdentifierNode) Copy() Node {
- return NewIdentifier(i.Ident).SetTree(i.tr).SetPos(i.Pos)
-}
-
-// VariableNode holds a list of variable names, possibly with chained field
-// accesses. The dollar sign is part of the (first) name.
-type VariableNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // Variable name and fields in lexical order.
-}
-
-func (t *Tree) newVariable(pos Pos, ident string) *VariableNode {
- return &VariableNode{tr: t, NodeType: NodeVariable, Pos: pos, Ident: strings.Split(ident, ".")}
-}
-
-func (v *VariableNode) String() string {
- s := ""
- for i, id := range v.Ident {
- if i > 0 {
- s += "."
- }
- s += id
- }
- return s
-}
-
-func (v *VariableNode) tree() *Tree {
- return v.tr
-}
-
-func (v *VariableNode) Copy() Node {
- return &VariableNode{tr: v.tr, NodeType: NodeVariable, Pos: v.Pos, Ident: append([]string{}, v.Ident...)}
-}
-
-// DotNode holds the special identifier '.'.
-type DotNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newDot(pos Pos) *DotNode {
- return &DotNode{tr: t, NodeType: NodeDot, Pos: pos}
-}
-
-func (d *DotNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeDot
-}
-
-func (d *DotNode) String() string {
- return "."
-}
-
-func (d *DotNode) tree() *Tree {
- return d.tr
-}
-
-func (d *DotNode) Copy() Node {
- return d.tr.newDot(d.Pos)
-}
-
-// NilNode holds the special identifier 'nil' representing an untyped nil constant.
-type NilNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newNil(pos Pos) *NilNode {
- return &NilNode{tr: t, NodeType: NodeNil, Pos: pos}
-}
-
-func (n *NilNode) Type() NodeType {
- // Override method on embedded NodeType for API compatibility.
- // TODO: Not really a problem; could change API without effect but
- // api tool complains.
- return NodeNil
-}
-
-func (n *NilNode) String() string {
- return "nil"
-}
-
-func (n *NilNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NilNode) Copy() Node {
- return n.tr.newNil(n.Pos)
-}
-
-// FieldNode holds a field (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The period is dropped from each ident.
-type FieldNode struct {
- NodeType
- Pos
- tr *Tree
- Ident []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newField(pos Pos, ident string) *FieldNode {
- return &FieldNode{tr: t, NodeType: NodeField, Pos: pos, Ident: strings.Split(ident[1:], ".")} // [1:] to drop leading period
-}
-
-func (f *FieldNode) String() string {
- s := ""
- for _, id := range f.Ident {
- s += "." + id
- }
- return s
-}
-
-func (f *FieldNode) tree() *Tree {
- return f.tr
-}
-
-func (f *FieldNode) Copy() Node {
- return &FieldNode{tr: f.tr, NodeType: NodeField, Pos: f.Pos, Ident: append([]string{}, f.Ident...)}
-}
-
-// ChainNode holds a term followed by a chain of field accesses (identifier starting with '.').
-// The names may be chained ('.x.y').
-// The periods are dropped from each ident.
-type ChainNode struct {
- NodeType
- Pos
- tr *Tree
- Node Node
- Field []string // The identifiers in lexical order.
-}
-
-func (t *Tree) newChain(pos Pos, node Node) *ChainNode {
- return &ChainNode{tr: t, NodeType: NodeChain, Pos: pos, Node: node}
-}
-
-// Add adds the named field (which should start with a period) to the end of the chain.
-func (c *ChainNode) Add(field string) {
- if len(field) == 0 || field[0] != '.' {
- panic("no dot in field")
- }
- field = field[1:] // Remove leading dot.
- if field == "" {
- panic("empty field")
- }
- c.Field = append(c.Field, field)
-}
-
-func (c *ChainNode) String() string {
- s := c.Node.String()
- if _, ok := c.Node.(*PipeNode); ok {
- s = "(" + s + ")"
- }
- for _, field := range c.Field {
- s += "." + field
- }
- return s
-}
-
-func (c *ChainNode) tree() *Tree {
- return c.tr
-}
-
-func (c *ChainNode) Copy() Node {
- return &ChainNode{tr: c.tr, NodeType: NodeChain, Pos: c.Pos, Node: c.Node, Field: append([]string{}, c.Field...)}
-}
-
-// BoolNode holds a boolean constant.
-type BoolNode struct {
- NodeType
- Pos
- tr *Tree
- True bool // The value of the boolean constant.
-}
-
-func (t *Tree) newBool(pos Pos, true bool) *BoolNode {
- return &BoolNode{tr: t, NodeType: NodeBool, Pos: pos, True: true}
-}
-
-func (b *BoolNode) String() string {
- if b.True {
- return "true"
- }
- return "false"
-}
-
-func (b *BoolNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BoolNode) Copy() Node {
- return b.tr.newBool(b.Pos, b.True)
-}
-
-// NumberNode holds a number: signed or unsigned integer, float, or complex.
-// The value is parsed and stored under all the types that can represent the value.
-// This simulates in a small amount of code the behavior of Go's ideal constants.
-type NumberNode struct {
- NodeType
- Pos
- tr *Tree
- IsInt bool // Number has an integral value.
- IsUint bool // Number has an unsigned integral value.
- IsFloat bool // Number has a floating-point value.
- IsComplex bool // Number is complex.
- Int64 int64 // The signed integer value.
- Uint64 uint64 // The unsigned integer value.
- Float64 float64 // The floating-point value.
- Complex128 complex128 // The complex value.
- Text string // The original textual representation from the input.
-}
-
-func (t *Tree) newNumber(pos Pos, text string, typ itemType) (*NumberNode, error) {
- n := &NumberNode{tr: t, NodeType: NodeNumber, Pos: pos, Text: text}
- switch typ {
- case itemCharConstant:
- rune, _, tail, err := strconv.UnquoteChar(text[1:], text[0])
- if err != nil {
- return nil, err
- }
- if tail != "'" {
- return nil, fmt.Errorf("malformed character constant: %s", text)
- }
- n.Int64 = int64(rune)
- n.IsInt = true
- n.Uint64 = uint64(rune)
- n.IsUint = true
- n.Float64 = float64(rune) // odd but those are the rules.
- n.IsFloat = true
- return n, nil
- case itemComplex:
- // fmt.Sscan can parse the pair, so let it do the work.
- if _, err := fmt.Sscan(text, &n.Complex128); err != nil {
- return nil, err
- }
- n.IsComplex = true
- n.simplifyComplex()
- return n, nil
- }
- // Imaginary constants can only be complex unless they are zero.
- if len(text) > 0 && text[len(text)-1] == 'i' {
- f, err := strconv.ParseFloat(text[:len(text)-1], 64)
- if err == nil {
- n.IsComplex = true
- n.Complex128 = complex(0, f)
- n.simplifyComplex()
- return n, nil
- }
- }
- // Do integer test first so we get 0x123 etc.
- u, err := strconv.ParseUint(text, 0, 64) // will fail for -0; fixed below.
- if err == nil {
- n.IsUint = true
- n.Uint64 = u
- }
- i, err := strconv.ParseInt(text, 0, 64)
- if err == nil {
- n.IsInt = true
- n.Int64 = i
- if i == 0 {
- n.IsUint = true // in case of -0.
- n.Uint64 = u
- }
- }
- // If an integer extraction succeeded, promote the float.
- if n.IsInt {
- n.IsFloat = true
- n.Float64 = float64(n.Int64)
- } else if n.IsUint {
- n.IsFloat = true
- n.Float64 = float64(n.Uint64)
- } else {
- f, err := strconv.ParseFloat(text, 64)
- if err == nil {
- n.IsFloat = true
- n.Float64 = f
- // If a floating-point extraction succeeded, extract the int if needed.
- if !n.IsInt && float64(int64(f)) == f {
- n.IsInt = true
- n.Int64 = int64(f)
- }
- if !n.IsUint && float64(uint64(f)) == f {
- n.IsUint = true
- n.Uint64 = uint64(f)
- }
- }
- }
- if !n.IsInt && !n.IsUint && !n.IsFloat {
- return nil, fmt.Errorf("illegal number syntax: %q", text)
- }
- return n, nil
-}
-
-// simplifyComplex pulls out any other types that are represented by the complex number.
-// These all require that the imaginary part be zero.
-func (n *NumberNode) simplifyComplex() {
- n.IsFloat = imag(n.Complex128) == 0
- if n.IsFloat {
- n.Float64 = real(n.Complex128)
- n.IsInt = float64(int64(n.Float64)) == n.Float64
- if n.IsInt {
- n.Int64 = int64(n.Float64)
- }
- n.IsUint = float64(uint64(n.Float64)) == n.Float64
- if n.IsUint {
- n.Uint64 = uint64(n.Float64)
- }
- }
-}
-
-func (n *NumberNode) String() string {
- return n.Text
-}
-
-func (n *NumberNode) tree() *Tree {
- return n.tr
-}
-
-func (n *NumberNode) Copy() Node {
- nn := new(NumberNode)
- *nn = *n // Easy, fast, correct.
- return nn
-}
-
-// StringNode holds a string constant. The value has been "unquoted".
-type StringNode struct {
- NodeType
- Pos
- tr *Tree
- Quoted string // The original text of the string, with quotes.
- Text string // The string, after quote processing.
-}
-
-func (t *Tree) newString(pos Pos, orig, text string) *StringNode {
- return &StringNode{tr: t, NodeType: NodeString, Pos: pos, Quoted: orig, Text: text}
-}
-
-func (s *StringNode) String() string {
- return s.Quoted
-}
-
-func (s *StringNode) tree() *Tree {
- return s.tr
-}
-
-func (s *StringNode) Copy() Node {
- return s.tr.newString(s.Pos, s.Quoted, s.Text)
-}
-
-// endNode represents an {{end}} action.
-// It does not appear in the final parse tree.
-type endNode struct {
- NodeType
- Pos
- tr *Tree
-}
-
-func (t *Tree) newEnd(pos Pos) *endNode {
- return &endNode{tr: t, NodeType: nodeEnd, Pos: pos}
-}
-
-func (e *endNode) String() string {
- return "{{end}}"
-}
-
-func (e *endNode) tree() *Tree {
- return e.tr
-}
-
-func (e *endNode) Copy() Node {
- return e.tr.newEnd(e.Pos)
-}
-
-// elseNode represents an {{else}} action. Does not appear in the final tree.
-type elseNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
-}
-
-func (t *Tree) newElse(pos Pos, line int) *elseNode {
- return &elseNode{tr: t, NodeType: nodeElse, Pos: pos, Line: line}
-}
-
-func (e *elseNode) Type() NodeType {
- return nodeElse
-}
-
-func (e *elseNode) String() string {
- return "{{else}}"
-}
-
-func (e *elseNode) tree() *Tree {
- return e.tr
-}
-
-func (e *elseNode) Copy() Node {
- return e.tr.newElse(e.Pos, e.Line)
-}
-
-// BranchNode is the common representation of if, range, and with.
-type BranchNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Pipe *PipeNode // The pipeline to be evaluated.
- List *ListNode // What to execute if the value is non-empty.
- ElseList *ListNode // What to execute if the value is empty (nil if absent).
-}
-
-func (b *BranchNode) String() string {
- name := ""
- switch b.NodeType {
- case NodeIf:
- name = "if"
- case NodeRange:
- name = "range"
- case NodeWith:
- name = "with"
- default:
- panic("unknown branch type")
- }
- if b.ElseList != nil {
- return fmt.Sprintf("{{%s %s}}%s{{else}}%s{{end}}", name, b.Pipe, b.List, b.ElseList)
- }
- return fmt.Sprintf("{{%s %s}}%s{{end}}", name, b.Pipe, b.List)
-}
-
-func (b *BranchNode) tree() *Tree {
- return b.tr
-}
-
-func (b *BranchNode) Copy() Node {
- switch b.NodeType {
- case NodeIf:
- return b.tr.newIf(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeRange:
- return b.tr.newRange(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- case NodeWith:
- return b.tr.newWith(b.Pos, b.Line, b.Pipe, b.List, b.ElseList)
- default:
- panic("unknown branch type")
- }
-}
-
-// IfNode represents an {{if}} action and its commands.
-type IfNode struct {
- BranchNode
-}
-
-func (t *Tree) newIf(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *IfNode {
- return &IfNode{BranchNode{tr: t, NodeType: NodeIf, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (i *IfNode) Copy() Node {
- return i.tr.newIf(i.Pos, i.Line, i.Pipe.CopyPipe(), i.List.CopyList(), i.ElseList.CopyList())
-}
-
-// RangeNode represents a {{range}} action and its commands.
-type RangeNode struct {
- BranchNode
-}
-
-func (t *Tree) newRange(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *RangeNode {
- return &RangeNode{BranchNode{tr: t, NodeType: NodeRange, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (r *RangeNode) Copy() Node {
- return r.tr.newRange(r.Pos, r.Line, r.Pipe.CopyPipe(), r.List.CopyList(), r.ElseList.CopyList())
-}
-
-// WithNode represents a {{with}} action and its commands.
-type WithNode struct {
- BranchNode
-}
-
-func (t *Tree) newWith(pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) *WithNode {
- return &WithNode{BranchNode{tr: t, NodeType: NodeWith, Pos: pos, Line: line, Pipe: pipe, List: list, ElseList: elseList}}
-}
-
-func (w *WithNode) Copy() Node {
- return w.tr.newWith(w.Pos, w.Line, w.Pipe.CopyPipe(), w.List.CopyList(), w.ElseList.CopyList())
-}
-
-// TemplateNode represents a {{template}} action.
-type TemplateNode struct {
- NodeType
- Pos
- tr *Tree
- Line int // The line number in the input (deprecated; kept for compatibility)
- Name string // The name of the template (unquoted).
- Pipe *PipeNode // The command to evaluate as dot for the template.
-}
-
-func (t *Tree) newTemplate(pos Pos, line int, name string, pipe *PipeNode) *TemplateNode {
- return &TemplateNode{tr: t, NodeType: NodeTemplate, Pos: pos, Line: line, Name: name, Pipe: pipe}
-}
-
-func (t *TemplateNode) String() string {
- if t.Pipe == nil {
- return fmt.Sprintf("{{template %q}}", t.Name)
- }
- return fmt.Sprintf("{{template %q %s}}", t.Name, t.Pipe)
-}
-
-func (t *TemplateNode) tree() *Tree {
- return t.tr
-}
-
-func (t *TemplateNode) Copy() Node {
- return t.tr.newTemplate(t.Pos, t.Line, t.Name, t.Pipe.CopyPipe())
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go
deleted file mode 100644
index 0d77ade..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/parse/parse.go
+++ /dev/null
@@ -1,700 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package parse builds parse trees for templates as defined by text/template
-// and html/template. Clients should use those packages to construct templates
-// rather than this one, which provides shared internal data structures not
-// intended for general use.
-package parse
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "strings"
-)
-
-// Tree is the representation of a single parsed template.
-type Tree struct {
- Name string // name of the template represented by the tree.
- ParseName string // name of the top-level template during parsing, for error messages.
- Root *ListNode // top-level root of the tree.
- text string // text parsed to create the template (or its parent)
- // Parsing only; cleared after parse.
- funcs []map[string]interface{}
- lex *lexer
- token [3]item // three-token lookahead for parser.
- peekCount int
- vars []string // variables defined at the moment.
-}
-
-// Copy returns a copy of the Tree. Any parsing state is discarded.
-func (t *Tree) Copy() *Tree {
- if t == nil {
- return nil
- }
- return &Tree{
- Name: t.Name,
- ParseName: t.ParseName,
- Root: t.Root.CopyList(),
- text: t.text,
- }
-}
-
-// Parse returns a map from template name to parse.Tree, created by parsing the
-// templates described in the argument string. The top-level template will be
-// given the specified name. If an error is encountered, parsing stops and an
-// empty map is returned with the error.
-func Parse(name, text, leftDelim, rightDelim string, funcs ...map[string]interface{}) (treeSet map[string]*Tree, err error) {
- treeSet = make(map[string]*Tree)
- t := New(name)
- t.text = text
- _, err = t.Parse(text, leftDelim, rightDelim, treeSet, funcs...)
- return
-}
-
-// next returns the next token.
-func (t *Tree) next() item {
- if t.peekCount > 0 {
- t.peekCount--
- } else {
- t.token[0] = t.lex.nextItem()
- }
- return t.token[t.peekCount]
-}
-
-// backup backs the input stream up one token.
-func (t *Tree) backup() {
- t.peekCount++
-}
-
-// backup2 backs the input stream up two tokens.
-// The zeroth token is already there.
-func (t *Tree) backup2(t1 item) {
- t.token[1] = t1
- t.peekCount = 2
-}
-
-// backup3 backs the input stream up three tokens
-// The zeroth token is already there.
-func (t *Tree) backup3(t2, t1 item) { // Reverse order: we're pushing back.
- t.token[1] = t1
- t.token[2] = t2
- t.peekCount = 3
-}
-
-// peek returns but does not consume the next token.
-func (t *Tree) peek() item {
- if t.peekCount > 0 {
- return t.token[t.peekCount-1]
- }
- t.peekCount = 1
- t.token[0] = t.lex.nextItem()
- return t.token[0]
-}
-
-// nextNonSpace returns the next non-space token.
-func (t *Tree) nextNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- return token
-}
-
-// peekNonSpace returns but does not consume the next non-space token.
-func (t *Tree) peekNonSpace() (token item) {
- for {
- token = t.next()
- if token.typ != itemSpace {
- break
- }
- }
- t.backup()
- return token
-}
-
-// Parsing.
-
-// New allocates a new parse tree with the given name.
-func New(name string, funcs ...map[string]interface{}) *Tree {
- return &Tree{
- Name: name,
- funcs: funcs,
- }
-}
-
-// ErrorContext returns a textual representation of the location of the node in the input text.
-// The receiver is only used when the node does not have a pointer to the tree inside,
-// which can occur in old code.
-func (t *Tree) ErrorContext(n Node) (location, context string) {
- pos := int(n.Position())
- tree := n.tree()
- if tree == nil {
- tree = t
- }
- text := tree.text[:pos]
- byteNum := strings.LastIndex(text, "\n")
- if byteNum == -1 {
- byteNum = pos // On first line.
- } else {
- byteNum++ // After the newline.
- byteNum = pos - byteNum
- }
- lineNum := 1 + strings.Count(text, "\n")
- context = n.String()
- if len(context) > 20 {
- context = fmt.Sprintf("%.20s...", context)
- }
- return fmt.Sprintf("%s:%d:%d", tree.ParseName, lineNum, byteNum), context
-}
-
-// errorf formats the error and terminates processing.
-func (t *Tree) errorf(format string, args ...interface{}) {
- t.Root = nil
- format = fmt.Sprintf("template: %s:%d: %s", t.ParseName, t.lex.lineNumber(), format)
- panic(fmt.Errorf(format, args...))
-}
-
-// error terminates processing.
-func (t *Tree) error(err error) {
- t.errorf("%s", err)
-}
-
-// expect consumes the next token and guarantees it has the required type.
-func (t *Tree) expect(expected itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected {
- t.unexpected(token, context)
- }
- return token
-}
-
-// expectOneOf consumes the next token and guarantees it has one of the required types.
-func (t *Tree) expectOneOf(expected1, expected2 itemType, context string) item {
- token := t.nextNonSpace()
- if token.typ != expected1 && token.typ != expected2 {
- t.unexpected(token, context)
- }
- return token
-}
-
-// unexpected complains about the token and terminates processing.
-func (t *Tree) unexpected(token item, context string) {
- t.errorf("unexpected %s in %s", token, context)
-}
-
-// recover is the handler that turns panics into returns from the top level of Parse.
-func (t *Tree) recover(errp *error) {
- e := recover()
- if e != nil {
- if _, ok := e.(runtime.Error); ok {
- panic(e)
- }
- if t != nil {
- t.stopParse()
- }
- *errp = e.(error)
- }
- return
-}
-
-// startParse initializes the parser, using the lexer.
-func (t *Tree) startParse(funcs []map[string]interface{}, lex *lexer) {
- t.Root = nil
- t.lex = lex
- t.vars = []string{"$"}
- t.funcs = funcs
-}
-
-// stopParse terminates parsing.
-func (t *Tree) stopParse() {
- t.lex = nil
- t.vars = nil
- t.funcs = nil
-}
-
-// Parse parses the template definition string to construct a representation of
-// the template for execution. If either action delimiter string is empty, the
-// default ("{{" or "}}") is used. Embedded template definitions are added to
-// the treeSet map.
-func (t *Tree) Parse(text, leftDelim, rightDelim string, treeSet map[string]*Tree, funcs ...map[string]interface{}) (tree *Tree, err error) {
- defer t.recover(&err)
- t.ParseName = t.Name
- t.startParse(funcs, lex(t.Name, text, leftDelim, rightDelim))
- t.text = text
- t.parse(treeSet)
- t.add(treeSet)
- t.stopParse()
- return t, nil
-}
-
-// add adds tree to the treeSet.
-func (t *Tree) add(treeSet map[string]*Tree) {
- tree := treeSet[t.Name]
- if tree == nil || IsEmptyTree(tree.Root) {
- treeSet[t.Name] = t
- return
- }
- if !IsEmptyTree(t.Root) {
- t.errorf("template: multiple definition of template %q", t.Name)
- }
-}
-
-// IsEmptyTree reports whether this tree (node) is empty of everything but space.
-func IsEmptyTree(n Node) bool {
- switch n := n.(type) {
- case nil:
- return true
- case *ActionNode:
- case *IfNode:
- case *ListNode:
- for _, node := range n.Nodes {
- if !IsEmptyTree(node) {
- return false
- }
- }
- return true
- case *RangeNode:
- case *TemplateNode:
- case *TextNode:
- return len(bytes.TrimSpace(n.Text)) == 0
- case *WithNode:
- default:
- panic("unknown node: " + n.String())
- }
- return false
-}
-
-// parse is the top-level parser for a template, essentially the same
-// as itemList except it also parses {{define}} actions.
-// It runs to EOF.
-func (t *Tree) parse(treeSet map[string]*Tree) (next Node) {
- t.Root = t.newList(t.peek().pos)
- for t.peek().typ != itemEOF {
- if t.peek().typ == itemLeftDelim {
- delim := t.next()
- if t.nextNonSpace().typ == itemDefine {
- newT := New("definition") // name will be updated once we know it.
- newT.text = t.text
- newT.ParseName = t.ParseName
- newT.startParse(t.funcs, t.lex)
- newT.parseDefinition(treeSet)
- continue
- }
- t.backup2(delim)
- }
- n := t.textOrAction()
- if n.Type() == nodeEnd {
- t.errorf("unexpected %s", n)
- }
- t.Root.append(n)
- }
- return nil
-}
-
-// parseDefinition parses a {{define}} ... {{end}} template definition and
-// installs the definition in the treeSet map. The "define" keyword has already
-// been scanned.
-func (t *Tree) parseDefinition(treeSet map[string]*Tree) {
- const context = "define clause"
- name := t.expectOneOf(itemString, itemRawString, context)
- var err error
- t.Name, err = strconv.Unquote(name.val)
- if err != nil {
- t.error(err)
- }
- t.expect(itemRightDelim, context)
- var end Node
- t.Root, end = t.itemList()
- if end.Type() != nodeEnd {
- t.errorf("unexpected %s in %s", end, context)
- }
- t.add(treeSet)
- t.stopParse()
-}
-
-// itemList:
-// textOrAction*
-// Terminates at {{end}} or {{else}}, returned separately.
-func (t *Tree) itemList() (list *ListNode, next Node) {
- list = t.newList(t.peekNonSpace().pos)
- for t.peekNonSpace().typ != itemEOF {
- n := t.textOrAction()
- switch n.Type() {
- case nodeEnd, nodeElse:
- return list, n
- }
- list.append(n)
- }
- t.errorf("unexpected EOF")
- return
-}
-
-// textOrAction:
-// text | action
-func (t *Tree) textOrAction() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemElideNewline:
- return t.elideNewline()
- case itemText:
- return t.newText(token.pos, token.val)
- case itemLeftDelim:
- return t.action()
- default:
- t.unexpected(token, "input")
- }
- return nil
-}
-
-// elideNewline:
-// Remove newlines trailing rightDelim if \\ is present.
-func (t *Tree) elideNewline() Node {
- token := t.peek()
- if token.typ != itemText {
- t.unexpected(token, "input")
- return nil
- }
-
- t.next()
- stripped := strings.TrimLeft(token.val, "\n\r")
- diff := len(token.val) - len(stripped)
- if diff > 0 {
- // This is a bit nasty. We mutate the token in-place to remove
- // preceding newlines.
- token.pos += Pos(diff)
- token.val = stripped
- }
- return t.newText(token.pos, token.val)
-}
-
-// Action:
-// control
-// command ("|" command)*
-// Left delim is past. Now get actions.
-// First word could be a keyword such as range.
-func (t *Tree) action() (n Node) {
- switch token := t.nextNonSpace(); token.typ {
- case itemElse:
- return t.elseControl()
- case itemEnd:
- return t.endControl()
- case itemIf:
- return t.ifControl()
- case itemRange:
- return t.rangeControl()
- case itemTemplate:
- return t.templateControl()
- case itemWith:
- return t.withControl()
- }
- t.backup()
- // Do not pop variables; they persist until "end".
- return t.newAction(t.peek().pos, t.lex.lineNumber(), t.pipeline("command"))
-}
-
-// Pipeline:
-// declarations? command ('|' command)*
-func (t *Tree) pipeline(context string) (pipe *PipeNode) {
- var decl []*VariableNode
- pos := t.peekNonSpace().pos
- // Are there declarations?
- for {
- if v := t.peekNonSpace(); v.typ == itemVariable {
- t.next()
- // Since space is a token, we need 3-token look-ahead here in the worst case:
- // in "$x foo" we need to read "foo" (as opposed to ":=") to know that $x is an
- // argument variable rather than a declaration. So remember the token
- // adjacent to the variable so we can push it back if necessary.
- tokenAfterVariable := t.peek()
- if next := t.peekNonSpace(); next.typ == itemColonEquals || (next.typ == itemChar && next.val == ",") {
- t.nextNonSpace()
- variable := t.newVariable(v.pos, v.val)
- decl = append(decl, variable)
- t.vars = append(t.vars, v.val)
- if next.typ == itemChar && next.val == "," {
- if context == "range" && len(decl) < 2 {
- continue
- }
- t.errorf("too many declarations in %s", context)
- }
- } else if tokenAfterVariable.typ == itemSpace {
- t.backup3(v, tokenAfterVariable)
- } else {
- t.backup2(v)
- }
- }
- break
- }
- pipe = t.newPipeline(pos, t.lex.lineNumber(), decl)
- for {
- switch token := t.nextNonSpace(); token.typ {
- case itemRightDelim, itemRightParen:
- if len(pipe.Cmds) == 0 {
- t.errorf("missing value for %s", context)
- }
- if token.typ == itemRightParen {
- t.backup()
- }
- return
- case itemBool, itemCharConstant, itemComplex, itemDot, itemField, itemIdentifier,
- itemNumber, itemNil, itemRawString, itemString, itemVariable, itemLeftParen:
- t.backup()
- pipe.append(t.command())
- default:
- t.unexpected(token, context)
- }
- }
-}
-
-func (t *Tree) parseControl(allowElseIf bool, context string) (pos Pos, line int, pipe *PipeNode, list, elseList *ListNode) {
- defer t.popVars(len(t.vars))
- line = t.lex.lineNumber()
- pipe = t.pipeline(context)
- var next Node
- list, next = t.itemList()
- switch next.Type() {
- case nodeEnd: //done
- case nodeElse:
- if allowElseIf {
- // Special case for "else if". If the "else" is followed immediately by an "if",
- // the elseControl will have left the "if" token pending. Treat
- // {{if a}}_{{else if b}}_{{end}}
- // as
- // {{if a}}_{{else}}{{if b}}_{{end}}{{end}}.
- // To do this, parse the if as usual and stop at it {{end}}; the subsequent{{end}}
- // is assumed. This technique works even for long if-else-if chains.
- // TODO: Should we allow else-if in with and range?
- if t.peek().typ == itemIf {
- t.next() // Consume the "if" token.
- elseList = t.newList(next.Position())
- elseList.append(t.ifControl())
- // Do not consume the next item - only one {{end}} required.
- break
- }
- }
- elseList, next = t.itemList()
- if next.Type() != nodeEnd {
- t.errorf("expected end; found %s", next)
- }
- }
- return pipe.Position(), line, pipe, list, elseList
-}
-
-// If:
-// {{if pipeline}} itemList {{end}}
-// {{if pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) ifControl() Node {
- return t.newIf(t.parseControl(true, "if"))
-}
-
-// Range:
-// {{range pipeline}} itemList {{end}}
-// {{range pipeline}} itemList {{else}} itemList {{end}}
-// Range keyword is past.
-func (t *Tree) rangeControl() Node {
- return t.newRange(t.parseControl(false, "range"))
-}
-
-// With:
-// {{with pipeline}} itemList {{end}}
-// {{with pipeline}} itemList {{else}} itemList {{end}}
-// If keyword is past.
-func (t *Tree) withControl() Node {
- return t.newWith(t.parseControl(false, "with"))
-}
-
-// End:
-// {{end}}
-// End keyword is past.
-func (t *Tree) endControl() Node {
- return t.newEnd(t.expect(itemRightDelim, "end").pos)
-}
-
-// Else:
-// {{else}}
-// Else keyword is past.
-func (t *Tree) elseControl() Node {
- // Special case for "else if".
- peek := t.peekNonSpace()
- if peek.typ == itemIf {
- // We see "{{else if ... " but in effect rewrite it to {{else}}{{if ... ".
- return t.newElse(peek.pos, t.lex.lineNumber())
- }
- return t.newElse(t.expect(itemRightDelim, "else").pos, t.lex.lineNumber())
-}
-
-// Template:
-// {{template stringValue pipeline}}
-// Template keyword is past. The name must be something that can evaluate
-// to a string.
-func (t *Tree) templateControl() Node {
- var name string
- token := t.nextNonSpace()
- switch token.typ {
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- name = s
- default:
- t.unexpected(token, "template invocation")
- }
- var pipe *PipeNode
- if t.nextNonSpace().typ != itemRightDelim {
- t.backup()
- // Do not pop variables; they persist until "end".
- pipe = t.pipeline("template")
- }
- return t.newTemplate(token.pos, t.lex.lineNumber(), name, pipe)
-}
-
-// command:
-// operand (space operand)*
-// space-separated arguments up to a pipeline character or right delimiter.
-// we consume the pipe character but leave the right delim to terminate the action.
-func (t *Tree) command() *CommandNode {
- cmd := t.newCommand(t.peekNonSpace().pos)
- for {
- t.peekNonSpace() // skip leading spaces.
- operand := t.operand()
- if operand != nil {
- cmd.append(operand)
- }
- switch token := t.next(); token.typ {
- case itemSpace:
- continue
- case itemError:
- t.errorf("%s", token.val)
- case itemRightDelim, itemRightParen:
- t.backup()
- case itemPipe:
- default:
- t.errorf("unexpected %s in operand; missing space?", token)
- }
- break
- }
- if len(cmd.Args) == 0 {
- t.errorf("empty command")
- }
- return cmd
-}
-
-// operand:
-// term .Field*
-// An operand is a space-separated component of a command,
-// a term possibly followed by field accesses.
-// A nil return means the next item is not an operand.
-func (t *Tree) operand() Node {
- node := t.term()
- if node == nil {
- return nil
- }
- if t.peek().typ == itemField {
- chain := t.newChain(t.peek().pos, node)
- for t.peek().typ == itemField {
- chain.Add(t.next().val)
- }
- // Compatibility with original API: If the term is of type NodeField
- // or NodeVariable, just put more fields on the original.
- // Otherwise, keep the Chain node.
- // TODO: Switch to Chains always when we can.
- switch node.Type() {
- case NodeField:
- node = t.newField(chain.Position(), chain.String())
- case NodeVariable:
- node = t.newVariable(chain.Position(), chain.String())
- default:
- node = chain
- }
- }
- return node
-}
-
-// term:
-// literal (number, string, nil, boolean)
-// function (identifier)
-// .
-// .Field
-// $
-// '(' pipeline ')'
-// A term is a simple "expression".
-// A nil return means the next item is not a term.
-func (t *Tree) term() Node {
- switch token := t.nextNonSpace(); token.typ {
- case itemError:
- t.errorf("%s", token.val)
- case itemIdentifier:
- if !t.hasFunction(token.val) {
- t.errorf("function %q not defined", token.val)
- }
- return NewIdentifier(token.val).SetTree(t).SetPos(token.pos)
- case itemDot:
- return t.newDot(token.pos)
- case itemNil:
- return t.newNil(token.pos)
- case itemVariable:
- return t.useVar(token.pos, token.val)
- case itemField:
- return t.newField(token.pos, token.val)
- case itemBool:
- return t.newBool(token.pos, token.val == "true")
- case itemCharConstant, itemComplex, itemNumber:
- number, err := t.newNumber(token.pos, token.val, token.typ)
- if err != nil {
- t.error(err)
- }
- return number
- case itemLeftParen:
- pipe := t.pipeline("parenthesized pipeline")
- if token := t.next(); token.typ != itemRightParen {
- t.errorf("unclosed right paren: unexpected %s", token)
- }
- return pipe
- case itemString, itemRawString:
- s, err := strconv.Unquote(token.val)
- if err != nil {
- t.error(err)
- }
- return t.newString(token.pos, token.val, s)
- }
- t.backup()
- return nil
-}
-
-// hasFunction reports if a function name exists in the Tree's maps.
-func (t *Tree) hasFunction(name string) bool {
- for _, funcMap := range t.funcs {
- if funcMap == nil {
- continue
- }
- if funcMap[name] != nil {
- return true
- }
- }
- return false
-}
-
-// popVars trims the variable list to the specified length
-func (t *Tree) popVars(n int) {
- t.vars = t.vars[:n]
-}
-
-// useVar returns a node for a variable reference. It errors if the
-// variable is not defined.
-func (t *Tree) useVar(pos Pos, name string) Node {
- v := t.newVariable(pos, name)
- for _, varName := range t.vars {
- if varName == v.Ident[0] {
- return v
- }
- }
- t.errorf("undefined variable %q", v.Ident[0])
- return nil
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/template.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/template.go
deleted file mode 100644
index 447ed2a..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/template/template.go
+++ /dev/null
@@ -1,218 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package template
-
-import (
- "fmt"
- "reflect"
-
- "github.com/alecthomas/template/parse"
-)
-
-// common holds the information shared by related templates.
-type common struct {
- tmpl map[string]*Template
- // We use two maps, one for parsing and one for execution.
- // This separation makes the API cleaner since it doesn't
- // expose reflection to the client.
- parseFuncs FuncMap
- execFuncs map[string]reflect.Value
-}
-
-// Template is the representation of a parsed template. The *parse.Tree
-// field is exported only for use by html/template and should be treated
-// as unexported by all other clients.
-type Template struct {
- name string
- *parse.Tree
- *common
- leftDelim string
- rightDelim string
-}
-
-// New allocates a new template with the given name.
-func New(name string) *Template {
- return &Template{
- name: name,
- }
-}
-
-// Name returns the name of the template.
-func (t *Template) Name() string {
- return t.name
-}
-
-// New allocates a new template associated with the given one and with the same
-// delimiters. The association, which is transitive, allows one template to
-// invoke another with a {{template}} action.
-func (t *Template) New(name string) *Template {
- t.init()
- return &Template{
- name: name,
- common: t.common,
- leftDelim: t.leftDelim,
- rightDelim: t.rightDelim,
- }
-}
-
-func (t *Template) init() {
- if t.common == nil {
- t.common = new(common)
- t.tmpl = make(map[string]*Template)
- t.parseFuncs = make(FuncMap)
- t.execFuncs = make(map[string]reflect.Value)
- }
-}
-
-// Clone returns a duplicate of the template, including all associated
-// templates. The actual representation is not copied, but the name space of
-// associated templates is, so further calls to Parse in the copy will add
-// templates to the copy but not to the original. Clone can be used to prepare
-// common templates and use them with variant definitions for other templates
-// by adding the variants after the clone is made.
-func (t *Template) Clone() (*Template, error) {
- nt := t.copy(nil)
- nt.init()
- nt.tmpl[t.name] = nt
- for k, v := range t.tmpl {
- if k == t.name { // Already installed.
- continue
- }
- // The associated templates share nt's common structure.
- tmpl := v.copy(nt.common)
- nt.tmpl[k] = tmpl
- }
- for k, v := range t.parseFuncs {
- nt.parseFuncs[k] = v
- }
- for k, v := range t.execFuncs {
- nt.execFuncs[k] = v
- }
- return nt, nil
-}
-
-// copy returns a shallow copy of t, with common set to the argument.
-func (t *Template) copy(c *common) *Template {
- nt := New(t.name)
- nt.Tree = t.Tree
- nt.common = c
- nt.leftDelim = t.leftDelim
- nt.rightDelim = t.rightDelim
- return nt
-}
-
-// AddParseTree creates a new template with the name and parse tree
-// and associates it with t.
-func (t *Template) AddParseTree(name string, tree *parse.Tree) (*Template, error) {
- if t.common != nil && t.tmpl[name] != nil {
- return nil, fmt.Errorf("template: redefinition of template %q", name)
- }
- nt := t.New(name)
- nt.Tree = tree
- t.tmpl[name] = nt
- return nt, nil
-}
-
-// Templates returns a slice of the templates associated with t, including t
-// itself.
-func (t *Template) Templates() []*Template {
- if t.common == nil {
- return nil
- }
- // Return a slice so we don't expose the map.
- m := make([]*Template, 0, len(t.tmpl))
- for _, v := range t.tmpl {
- m = append(m, v)
- }
- return m
-}
-
-// Delims sets the action delimiters to the specified strings, to be used in
-// subsequent calls to Parse, ParseFiles, or ParseGlob. Nested template
-// definitions will inherit the settings. An empty delimiter stands for the
-// corresponding default: {{ or }}.
-// The return value is the template, so calls can be chained.
-func (t *Template) Delims(left, right string) *Template {
- t.leftDelim = left
- t.rightDelim = right
- return t
-}
-
-// Funcs adds the elements of the argument map to the template's function map.
-// It panics if a value in the map is not a function with appropriate return
-// type. However, it is legal to overwrite elements of the map. The return
-// value is the template, so calls can be chained.
-func (t *Template) Funcs(funcMap FuncMap) *Template {
- t.init()
- addValueFuncs(t.execFuncs, funcMap)
- addFuncs(t.parseFuncs, funcMap)
- return t
-}
-
-// Lookup returns the template with the given name that is associated with t,
-// or nil if there is no such template.
-func (t *Template) Lookup(name string) *Template {
- if t.common == nil {
- return nil
- }
- return t.tmpl[name]
-}
-
-// Parse parses a string into a template. Nested template definitions will be
-// associated with the top-level template t. Parse may be called multiple times
-// to parse definitions of templates to associate with t. It is an error if a
-// resulting template is non-empty (contains content other than template
-// definitions) and would replace a non-empty template with the same name.
-// (In multiple calls to Parse with the same receiver template, only one call
-// can contain text other than space, comments, and template definitions.)
-func (t *Template) Parse(text string) (*Template, error) {
- t.init()
- trees, err := parse.Parse(t.name, text, t.leftDelim, t.rightDelim, t.parseFuncs, builtins)
- if err != nil {
- return nil, err
- }
- // Add the newly parsed trees, including the one for t, into our common structure.
- for name, tree := range trees {
- // If the name we parsed is the name of this template, overwrite this template.
- // The associate method checks it's not a redefinition.
- tmpl := t
- if name != t.name {
- tmpl = t.New(name)
- }
- // Even if t == tmpl, we need to install it in the common.tmpl map.
- if replace, err := t.associate(tmpl, tree); err != nil {
- return nil, err
- } else if replace {
- tmpl.Tree = tree
- }
- tmpl.leftDelim = t.leftDelim
- tmpl.rightDelim = t.rightDelim
- }
- return t, nil
-}
-
-// associate installs the new template into the group of templates associated
-// with t. It is an error to reuse a name except to overwrite an empty
-// template. The two are already known to share the common structure.
-// The boolean return value reports wither to store this tree as t.Tree.
-func (t *Template) associate(new *Template, tree *parse.Tree) (bool, error) {
- if new.common != t.common {
- panic("internal error: associate not common")
- }
- name := new.name
- if old := t.tmpl[name]; old != nil {
- oldIsEmpty := parse.IsEmptyTree(old.Root)
- newIsEmpty := parse.IsEmptyTree(tree.Root)
- if newIsEmpty {
- // Whether old is empty or not, new is empty; no reason to replace old.
- return false, nil
- }
- if !oldIsEmpty {
- return false, fmt.Errorf("template: redefinition of template %q", name)
- }
- }
- t.tmpl[name] = new
- return true, nil
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/COPYING b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/COPYING
deleted file mode 100644
index 2993ec0..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/COPYING
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (C) 2014 Alec Thomas
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/README.md b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/README.md
deleted file mode 100644
index bee884e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/README.md
+++ /dev/null
@@ -1,11 +0,0 @@
-# Units - Helpful unit multipliers and functions for Go
-
-The goal of this package is to have functionality similar to the [time](http://golang.org/pkg/time/) package.
-
-It allows for code like this:
-
-```go
-n, err := ParseBase2Bytes("1KB")
-// n == 1024
-n = units.Mebibyte * 512
-```
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go
deleted file mode 100644
index eaadeb8..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/bytes.go
+++ /dev/null
@@ -1,83 +0,0 @@
-package units
-
-// Base2Bytes is the old non-SI power-of-2 byte scale (1024 bytes in a kilobyte,
-// etc.).
-type Base2Bytes int64
-
-// Base-2 byte units.
-const (
- Kibibyte Base2Bytes = 1024
- KiB = Kibibyte
- Mebibyte = Kibibyte * 1024
- MiB = Mebibyte
- Gibibyte = Mebibyte * 1024
- GiB = Gibibyte
- Tebibyte = Gibibyte * 1024
- TiB = Tebibyte
- Pebibyte = Tebibyte * 1024
- PiB = Pebibyte
- Exbibyte = Pebibyte * 1024
- EiB = Exbibyte
-)
-
-var (
- bytesUnitMap = MakeUnitMap("iB", "B", 1024)
- oldBytesUnitMap = MakeUnitMap("B", "B", 1024)
-)
-
-// ParseBase2Bytes supports both iB and B in base-2 multipliers. That is, KB
-// and KiB are both 1024.
-func ParseBase2Bytes(s string) (Base2Bytes, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, oldBytesUnitMap)
- }
- return Base2Bytes(n), err
-}
-
-func (b Base2Bytes) String() string {
- return ToString(int64(b), 1024, "iB", "B")
-}
-
-var (
- metricBytesUnitMap = MakeUnitMap("B", "B", 1000)
-)
-
-// MetricBytes are SI byte units (1000 bytes in a kilobyte).
-type MetricBytes SI
-
-// SI base-10 byte units.
-const (
- Kilobyte MetricBytes = 1000
- KB = Kilobyte
- Megabyte = Kilobyte * 1000
- MB = Megabyte
- Gigabyte = Megabyte * 1000
- GB = Gigabyte
- Terabyte = Gigabyte * 1000
- TB = Terabyte
- Petabyte = Terabyte * 1000
- PB = Petabyte
- Exabyte = Petabyte * 1000
- EB = Exabyte
-)
-
-// ParseMetricBytes parses base-10 metric byte units. That is, KB is 1000 bytes.
-func ParseMetricBytes(s string) (MetricBytes, error) {
- n, err := ParseUnit(s, metricBytesUnitMap)
- return MetricBytes(n), err
-}
-
-func (m MetricBytes) String() string {
- return ToString(int64(m), 1000, "B", "B")
-}
-
-// ParseStrictBytes supports both iB and B suffixes for base 2 and metric,
-// respectively. That is, KiB represents 1024 and KB represents 1000.
-func ParseStrictBytes(s string) (int64, error) {
- n, err := ParseUnit(s, bytesUnitMap)
- if err != nil {
- n, err = ParseUnit(s, metricBytesUnitMap)
- }
- return int64(n), err
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/doc.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/doc.go
deleted file mode 100644
index 156ae38..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/doc.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Package units provides helpful unit multipliers and functions for Go.
-//
-// The goal of this package is to have functionality similar to the time [1] package.
-//
-//
-// [1] http://golang.org/pkg/time/
-//
-// It allows for code like this:
-//
-// n, err := ParseBase2Bytes("1KB")
-// // n == 1024
-// n = units.Mebibyte * 512
-package units
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/si.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/si.go
deleted file mode 100644
index 8234a9d..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/si.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package units
-
-// SI units.
-type SI int64
-
-// SI unit multiples.
-const (
- Kilo SI = 1000
- Mega = Kilo * 1000
- Giga = Mega * 1000
- Tera = Giga * 1000
- Peta = Tera * 1000
- Exa = Peta * 1000
-)
-
-func MakeUnitMap(suffix, shortSuffix string, scale int64) map[string]float64 {
- return map[string]float64{
- shortSuffix: 1,
- "K" + suffix: float64(scale),
- "M" + suffix: float64(scale * scale),
- "G" + suffix: float64(scale * scale * scale),
- "T" + suffix: float64(scale * scale * scale * scale),
- "P" + suffix: float64(scale * scale * scale * scale * scale),
- "E" + suffix: float64(scale * scale * scale * scale * scale * scale),
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/util.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/util.go
deleted file mode 100644
index 6527e92..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/alecthomas/units/util.go
+++ /dev/null
@@ -1,138 +0,0 @@
-package units
-
-import (
- "errors"
- "fmt"
- "strings"
-)
-
-var (
- siUnits = []string{"", "K", "M", "G", "T", "P", "E"}
-)
-
-func ToString(n int64, scale int64, suffix, baseSuffix string) string {
- mn := len(siUnits)
- out := make([]string, mn)
- for i, m := range siUnits {
- if n%scale != 0 || i == 0 && n == 0 {
- s := suffix
- if i == 0 {
- s = baseSuffix
- }
- out[mn-1-i] = fmt.Sprintf("%d%s%s", n%scale, m, s)
- }
- n /= scale
- if n == 0 {
- break
- }
- }
- return strings.Join(out, "")
-}
-
-// Below code ripped straight from http://golang.org/src/pkg/time/format.go?s=33392:33438#L1123
-var errLeadingInt = errors.New("units: bad [0-9]*") // never printed
-
-// leadingInt consumes the leading [0-9]* from s.
-func leadingInt(s string) (x int64, rem string, err error) {
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c < '0' || c > '9' {
- break
- }
- if x >= (1<<63-10)/10 {
- // overflow
- return 0, "", errLeadingInt
- }
- x = x*10 + int64(c) - '0'
- }
- return x, s[i:], nil
-}
-
-func ParseUnit(s string, unitMap map[string]float64) (int64, error) {
- // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+
- orig := s
- f := float64(0)
- neg := false
-
- // Consume [-+]?
- if s != "" {
- c := s[0]
- if c == '-' || c == '+' {
- neg = c == '-'
- s = s[1:]
- }
- }
- // Special case: if all that is left is "0", this is zero.
- if s == "0" {
- return 0, nil
- }
- if s == "" {
- return 0, errors.New("units: invalid " + orig)
- }
- for s != "" {
- g := float64(0) // this element of the sequence
-
- var x int64
- var err error
-
- // The next character must be [0-9.]
- if !(s[0] == '.' || ('0' <= s[0] && s[0] <= '9')) {
- return 0, errors.New("units: invalid " + orig)
- }
- // Consume [0-9]*
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- g = float64(x)
- pre := pl != len(s) // whether we consumed anything before a period
-
- // Consume (\.[0-9]*)?
- post := false
- if s != "" && s[0] == '.' {
- s = s[1:]
- pl := len(s)
- x, s, err = leadingInt(s)
- if err != nil {
- return 0, errors.New("units: invalid " + orig)
- }
- scale := 1.0
- for n := pl - len(s); n > 0; n-- {
- scale *= 10
- }
- g += float64(x) / scale
- post = pl != len(s)
- }
- if !pre && !post {
- // no digits (e.g. ".s" or "-.s")
- return 0, errors.New("units: invalid " + orig)
- }
-
- // Consume unit.
- i := 0
- for ; i < len(s); i++ {
- c := s[i]
- if c == '.' || ('0' <= c && c <= '9') {
- break
- }
- }
- u := s[:i]
- s = s[i:]
- unit, ok := unitMap[u]
- if !ok {
- return 0, errors.New("units: unknown unit " + u + " in " + orig)
- }
-
- f += g * unit
- }
-
- if neg {
- f = -f
- }
- if f < float64(-1<<63) || f > float64(1<<63-1) {
- return 0, errors.New("units: overflow parsing unit")
- }
- return int64(f), nil
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.gitignore b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.gitignore
deleted file mode 100644
index b00e55b..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.gitignore
+++ /dev/null
@@ -1,27 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
-
-.idea/
-*.iml
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.travis.yml b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.travis.yml
deleted file mode 100644
index 7942b45..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/.travis.yml
+++ /dev/null
@@ -1,24 +0,0 @@
-language: go
-
-go:
-# - 1.0 This won't work with coveralls
-# - 1.1 This won't work with coveralls
-# - 1.2 This won't work with our XML wonderfulness
- - 1.3
- - 1.4
- - 1.5
- - tip
-
-before_install:
- - go get -u -v github.com/axw/gocov/gocov
- - go get -u -v github.com/mattn/goveralls
- - go get -u -v github.com/golang/lint/golint
-
-script:
- - diff -u <(echo -n) <(gofmt -s -d ./)
- - diff -u <(echo -n) <(go vet ./...)
- - diff -u <(echo -n) <(golint ./...)
- - go test -v -race -covermode=atomic -coverprofile=coverage.out
-
-after_success:
- - goveralls -coverprofile=coverage.out -service=travis-ci -repotoken $COVERALLS_TOKEN
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/LICENSE b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/LICENSE
deleted file mode 100644
index 1a0a94d..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "{}"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright 2015 – Levi Gross
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
-
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/README.md b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/README.md
deleted file mode 100644
index 9c47bd3..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/README.md
+++ /dev/null
@@ -1,123 +0,0 @@
-# GRequests
-A Go "clone" of the great and famous Requests library
-
-[![Build Status](https://travis-ci.org/levigross/grequests.svg?branch=master)](https://travis-ci.org/levigross/grequests) [![GoDoc](https://godoc.org/github.com/levigross/grequests?status.svg)](https://godoc.org/github.com/levigross/grequests)
-[![Coverage Status](https://coveralls.io/repos/levigross/grequests/badge.svg)](https://coveralls.io/r/levigross/grequests)
-[![Join the chat at https://gitter.im/levigross/grequests](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/levigross/grequests?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-License
-======
-
-GRequests is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text
-
-Features
-========
-
-- Asynchronous and synchronous functionality built in
-- Works with every version of Go from 1.3
-- Responses can be serialized into JSON and XML
-- Easy file uploads
-- Easy file downloads
-- Support for the following HTTP verbs `GET, HEAD, POST, PUT, DELETE, PATCH, OPTIONS`
-
-Install
-=======
-`go get -u github.com/levigross/grequests`
-
-Usage
-======
-`import "github.com/levigross/grequests"`
-
-Basic Examples
-=========
-Basic GET request:
-
-```go
-resp, err := grequests.Get("http://httpbin.org/get", nil)
-// You can modify the request by passing an optional RequestOptions struct
-
-if err != nil {
- log.Fatalln("Unable to make request: ", err)
-}
-
-fmt.Println(resp.String())
-// {
-// "args": {},
-// "headers": {
-// "Accept": "*/*",
-// "Host": "httpbin.org",
-```
-
-If an error occurs all of the other properties and methods of a `Response` will be `nil`
-
-Quirks
-=======
-## Request Quirks
-
-When passing parameters to be added to a URL, if the URL has existing parameters that *_contradict_* with what has been passed within `Params` – `Params` will be the "source of authority" and overwrite the contradicting URL parameter.
-
-Lets see how it works...
-
-```go
-ro := &RequestOptions{
- Params: map[string]string{"Hello": "Goodbye"},
-}
-Get("http://httpbin.org/get?Hello=World", ro)
-// The URL is now http://httpbin.org/get?Hello=Goodbye
-```
-
-## Response Quirks
-
-Order matters! This is because `grequests.Response` is implemented as an `io.ReadCloser` which proxies the *http.Response.Body* `io.ReadCloser` interface. It also includes an internal buffer for use in `Response.String()` and `Response.Bytes()`.
-
-Here are a list of methods that consume the *http.Response.Body* `io.ReadCloser` interface.
-
-- Response.JSON
-- Response.XML
-- Response.DownloadToFile
-- Response.Close
-- Response.Read
-
-The following methods make use of an internal byte buffer
-
-- Response.String
-- Response.Bytes
-
-In the code below, once the file is downloaded – the `Response` struct no longer has access to the request bytes
-
-```go
-response := Get("http://some-wonderful-file.txt", nil)
-
-if err := response.DownloadToFile("randomFile"); err != nil {
- log.Println("Unable to download file: ", err)
-}
-
-// At this point the .String and .Bytes method will return empty responses
-
-response.Bytes() == nil // true
-response.String() == "" // true
-
-```
-
-But if we were to call `response.Bytes()` or `response.String()` first, every operation will succeed until the internal buffer is cleared:
-
-```go
-response := Get("http://some-wonderful-file.txt", nil)
-
-// This call to .Bytes caches the request bytes in an internal byte buffer – which can be used again and again until it is cleared
-response.Bytes() == `file-bytes`
-response.String() == "file-string"
-
-// This will work because it will use the internal byte buffer
-if err := resp.DownloadToFile("randomFile"); err != nil {
- log.Println("Unable to download file: ", err)
-}
-
-// Now if we clear the internal buffer....
-response.ClearInternalBuffer()
-
-// At this point the .String and .Bytes method will return empty responses
-
-response.Bytes() == nil // true
-response.String() == "" // true
-```
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/base.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/base.go
deleted file mode 100644
index a517184..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/base.go
+++ /dev/null
@@ -1,58 +0,0 @@
-// Package grequests implements a friendly API over Go's existing net/http library
-package grequests
-
-// Get takes 2 parameters and returns a Response Struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Get(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("GET", url, ro)
-}
-
-// Put takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Put(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("PUT", url, ro)
-}
-
-// Patch takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Patch(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("PATCH", url, ro)
-}
-
-// Delete takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Delete(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("DELETE", url, ro)
-}
-
-// Post takes 2 parameters and returns a Response channel. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Post(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("POST", url, ro)
-}
-
-// Head takes 2 parameters and returns a Response channel. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Head(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("HEAD", url, ro)
-}
-
-// Options takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-func Options(url string, ro *RequestOptions) (*Response, error) {
- return doRegularRequest("OPTIONS", url, ro)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/circle.yml b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/circle.yml
deleted file mode 100644
index a98278c..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/circle.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-dependencies:
- pre:
- - go get github.com/axw/gocov/gocov
- - go get github.com/mattn/goveralls
- - go get github.com/golang/lint
-test:
- override:
- - diff -u <(echo -n) <(gofmt -s -d ./)
- - diff -u <(echo -n) <(go vet ./...)
- - diff -u <(echo -n) <(golint ./...)
- - go test -v -race ./...
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/file_upload.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/file_upload.go
deleted file mode 100644
index 05a341a..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/file_upload.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package grequests
-
-import (
- "errors"
- "io"
- "os"
- "path/filepath"
-)
-
-// FileUpload is a struct that is used to specify the file that a User
-// wishes to upload.
-type FileUpload struct {
- // Filename is the name of the file that you wish to upload. We use this to guess the mimetype as well as pass it onto the server
- FileName string
-
- // FileContents is happy as long as you pass it a io.ReadCloser (which most file use anyways)
- FileContents io.ReadCloser
-}
-
-// FileUploadFromDisk allows you to create a FileUpload struct slice by just specifying a location on the disk
-func FileUploadFromDisk(fileName string) ([]FileUpload, error) {
- fd, err := os.Open(fileName)
-
- if err != nil {
- return nil, err
- }
-
- return []FileUpload{{FileContents: fd, FileName: fileName}}, nil
-
-}
-
-// FileUploadFromGlob allows you to create a FileUpload struct slice by just specifying a glob location on the disk
-// this function will gloss over all errors in the files and only upload the files that don't return errors from the glob
-func FileUploadFromGlob(fileSystemGlob string) ([]FileUpload, error) {
- files, err := filepath.Glob(fileSystemGlob)
-
- if err != nil {
- return nil, err
- }
-
- if len(files) == 0 {
- return nil, errors.New("grequests: No files have been returned in the glob")
- }
-
- filesToUpload := make([]FileUpload, 0, len(files))
-
- for _, f := range files {
- if s, err := os.Stat(f); err != nil || s.IsDir() {
- continue
- }
-
- // ignoring error because I can stat the file
- fd, _ := os.Open(f)
-
- filesToUpload = append(filesToUpload, FileUpload{FileContents: fd, FileName: filepath.Base(fd.Name())})
-
- }
-
- return filesToUpload, nil
-
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/request.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/request.go
deleted file mode 100644
index d08fc8e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/request.go
+++ /dev/null
@@ -1,480 +0,0 @@
-package grequests
-
-import (
- "bytes"
- "crypto/tls"
- "encoding/json"
- "encoding/xml"
- "errors"
- "io"
- "mime"
- "mime/multipart"
- "net"
- "net/http"
- "net/http/cookiejar"
- "net/url"
- "strconv"
- "strings"
- "time"
-
- "golang.org/x/net/publicsuffix"
-)
-
-// RequestOptions is the location that of where the data
-type RequestOptions struct {
-
- // Data is a map of key values that will eventually convert into the
- // query string of a GET request or the body of a POST request.
- Data map[string]string
-
- // Params is a map of query strings that may be used within a GET request
- Params map[string]string
-
- // Files is where you can include files to upload. The use of this data
- // structure is limited to POST requests
- Files []FileUpload
-
- // JSON can be used when you wish to send JSON within the request body
- JSON interface{}
-
- // XML can be used if you wish to send XML within the request body
- XML interface{}
-
- // Headers if you want to add custom HTTP headers to the request,
- // this is your friend
- Headers map[string]string
-
- // InsecureSkipVerify is a flag that specifies if we should validate the
- // server's TLS certificate. It should be noted that Go's TLS verify mechanism
- // doesn't validate if a certificate has been revoked
- InsecureSkipVerify bool
-
- // DisableCompression will disable gzip compression on requests
- DisableCompression bool
-
- // UserAgent allows you to set an arbitrary custom user agent
- UserAgent string
-
- // Auth allows you to specify a user name and password that you wish to
- // use when requesting the URL. It will use basic HTTP authentication
- // formatting the username and password in base64 the format is:
- // []string{username, password}
- Auth []string
-
- // IsAjax is a flag that can be set to make the request appear
- // to be generated by browser Javascript
- IsAjax bool
-
- // Cookies is an array of `http.Cookie` that allows you to attach
- // cookies to your request
- Cookies []http.Cookie
-
- // UseCookieJar will create a custom HTTP client that will
- // process and store HTTP cookies when they are sent down
- UseCookieJar bool
-
- // Proxies is a map in the following format
- // *protocol* => proxy address e.g http => http://127.0.0.1:8080
- Proxies map[string]*url.URL
-
- // TLSHandshakeTimeout specifies the maximum amount of time waiting to
- // wait for a TLS handshake. Zero means no timeout.
- TLSHandshakeTimeout time.Duration
-
- // DialTimeout is the maximum amount of time a dial will wait for
- // a connect to complete.
- DialTimeout time.Duration
-
- // KeepAlive specifies the keep-alive period for an active
- // network connection. If zero, keep-alive are not enabled.
- DialKeepAlive time.Duration
-
- // RequestTimeout is the maximum amount of time a whole request(include dial / request / redirect)
- // will wait.
- RequestTimeout time.Duration
-
- // HTTPClient can be provided if you wish to supply a custom HTTP client
- // this is useful if you want to use an OAUTH client with your request.
- HTTPClient *http.Client
-
- // RedirectLocationTrusted is a flag that will enable all headers to be
- // forwarded to the redirect location. Otherwise, the headers specified in
- // `SensitiveHTTPHeaders` will be removed from the request.
- RedirectLocationTrusted bool
-
- // SensitiveHTTPHeaders is a map of sensitive HTTP headers that a user
- // doesn't want passed on a redirect.
- SensitiveHTTPHeaders map[string]struct{}
-
- // RedirectLimit is the acceptable amount of redirects that we should expect
- // before returning an error be default this is set to 30. You can change this
- // globally by modifying the `RedirectLimit` variable.
- RedirectLimit int
-}
-
-func doRegularRequest(requestVerb, url string, ro *RequestOptions) (*Response, error) {
- return buildResponse(buildRequest(requestVerb, url, ro, nil))
-}
-
-func doSessionRequest(requestVerb, url string, ro *RequestOptions, httpClient *http.Client) (*Response, error) {
- return buildResponse(buildRequest(requestVerb, url, ro, httpClient))
-}
-
-// buildRequest is where most of the magic happens for request processing
-func buildRequest(httpMethod, url string, ro *RequestOptions, httpClient *http.Client) (*http.Response, error) {
- if ro == nil {
- ro = &RequestOptions{}
- }
- // Create our own HTTP client
-
- if httpClient == nil {
- httpClient = BuildHTTPClient(*ro)
- }
-
- // Build our URL
- var err error
-
- if len(ro.Params) != 0 {
- if url, err = buildURLParams(url, ro.Params); err != nil {
- return nil, err
- }
- }
-
- // Build the request
- req, err := buildHTTPRequest(httpMethod, url, ro)
-
- if err != nil {
- return nil, err
- }
-
- // Do we need to add any HTTP headers or Basic Auth?
- addHTTPHeaders(ro, req)
- addCookies(ro, req)
- addRedirectFunctionality(httpClient, ro)
-
- return httpClient.Do(req)
-}
-
-func buildHTTPRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
- if ro.JSON != nil {
- return createBasicJSONRequest(httpMethod, userURL, ro)
- }
-
- if ro.XML != nil {
- return createBasicXMLRequest(httpMethod, userURL, ro)
- }
-
- if ro.Files != nil {
- return createFileUploadRequest(httpMethod, userURL, ro)
- }
-
- if ro.Data != nil {
- return createBasicRequest(httpMethod, userURL, ro)
- }
-
- return http.NewRequest(httpMethod, userURL, nil)
-}
-
-func createFileUploadRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
- if httpMethod == "POST" {
- return createMultiPartPostRequest(httpMethod, userURL, ro)
- }
-
- // This may be a PUT or PATCH request so we will just put the raw
- // io.ReadCloser in the request body
- // and guess the MIME type from the file name
-
- // At the moment, we will only support 1 file upload as a time
- // when uploading using PUT or PATCH
-
- req, err := http.NewRequest(httpMethod, userURL, ro.Files[0].FileContents)
-
- if err != nil {
- return nil, err
- }
-
- req.Header.Set("Content-Type", mime.TypeByExtension(ro.Files[0].FileName))
-
- return req, nil
-
-}
-
-func createBasicXMLRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
- tempBuffer := &bytes.Buffer{}
-
- switch ro.XML.(type) {
- case string:
- tempBuffer.WriteString(ro.XML.(string))
- case []byte:
- tempBuffer.Write(ro.XML.([]byte))
- default:
- if err := xml.NewEncoder(tempBuffer).Encode(ro.XML); err != nil {
- return nil, err
- }
- }
-
- req, err := http.NewRequest(httpMethod, userURL, tempBuffer)
- if err != nil {
- return nil, err
- }
-
- req.Header.Set("Content-Type", "application/xml")
-
- return req, nil
-
-}
-func createMultiPartPostRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
- requestBody := &bytes.Buffer{}
-
- multipartWriter := multipart.NewWriter(requestBody)
-
- for i, f := range ro.Files {
-
- if f.FileContents == nil {
- return nil, errors.New("grequests: Pointer FileContents cannot be nil")
- }
-
- fileName := "file"
-
- if len(ro.Files) > 1 {
- fileName = strings.Join([]string{"file", strconv.Itoa(i + 1)}, "")
- }
-
- writer, err := multipartWriter.CreateFormFile(fileName, f.FileName)
-
- if err != nil {
- return nil, err
- }
-
- if _, err = io.Copy(writer, f.FileContents); err != nil && err != io.EOF {
- return nil, err
- }
-
- f.FileContents.Close()
-
- }
-
- // Populate the other parts of the form (if there are any)
- for key, value := range ro.Data {
- multipartWriter.WriteField(key, value)
- }
-
- if err := multipartWriter.Close(); err != nil {
- return nil, err
- }
-
- req, err := http.NewRequest(httpMethod, userURL, requestBody)
-
- if err != nil {
- return nil, err
- }
-
- req.Header.Add("Content-Type", multipartWriter.FormDataContentType())
-
- return req, err
-}
-
-func createBasicJSONRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
-
- tempBuffer := &bytes.Buffer{}
- switch ro.JSON.(type) {
- case string:
- tempBuffer.WriteString(ro.JSON.(string))
- case []byte:
- tempBuffer.Write(ro.JSON.([]byte))
- default:
- if err := json.NewEncoder(tempBuffer).Encode(ro.JSON); err != nil {
- return nil, err
- }
- }
-
- req, err := http.NewRequest(httpMethod, userURL, tempBuffer)
- if err != nil {
- return nil, err
- }
-
- req.Header.Set("Content-Type", "application/json")
-
- return req, nil
-
-}
-func createBasicRequest(httpMethod, userURL string, ro *RequestOptions) (*http.Request, error) {
-
- req, err := http.NewRequest(httpMethod, userURL, strings.NewReader(encodePostValues(ro.Data)))
-
- if err != nil {
- return nil, err
- }
-
- // The content type must be set to a regular form
- req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
-
- return req, nil
-}
-
-func encodePostValues(postValues map[string]string) string {
- urlValues := &url.Values{}
-
- for key, value := range postValues {
- urlValues.Set(key, value)
- }
-
- return urlValues.Encode() // This will sort all of the string values
-}
-
-// proxySettings will default to the default proxy settings if none are provided
-// if settings are provided – they will override the environment variables
-func (ro RequestOptions) proxySettings(req *http.Request) (*url.URL, error) {
- // No proxies – lets use the default
- if len(ro.Proxies) == 0 {
- return http.ProxyFromEnvironment(req)
- }
-
- // There was a proxy specified – do we support the protocol?
- if _, ok := ro.Proxies[req.URL.Scheme]; ok {
- return ro.Proxies[req.URL.Scheme], nil
- }
-
- // Proxies were specified but not for any protocol that we use
- return http.ProxyFromEnvironment(req)
-
-}
-
-// dontUseDefaultClient will tell the "client creator" if a custom client is needed
-// it checks the following items (and will create a custom client of these are)
-// true
-// 1. Do we want to accept invalid SSL certificates?
-// 2. Do we want to disable compression?
-// 3. Do we want a custom proxy?
-// 4. Do we want to change the default timeout for TLS Handshake?
-// 5. Do we want to change the default request timeout?
-// 6. Do we want to change the default connection timeout?
-// 7. Do you want to use the http.Client's cookieJar?
-func (ro RequestOptions) dontUseDefaultClient() bool {
- return ro.InsecureSkipVerify == true ||
- ro.DisableCompression == true ||
- len(ro.Proxies) != 0 ||
- ro.TLSHandshakeTimeout != 0 ||
- ro.DialTimeout != 0 ||
- ro.DialKeepAlive != 0 ||
- len(ro.Cookies) != 0 ||
- ro.UseCookieJar != false ||
- ro.RequestTimeout != 0
-}
-
-// BuildHTTPClient is a function that will return a custom HTTP client based on the request options provided
-// the check is in UseDefaultClient
-func BuildHTTPClient(ro RequestOptions) *http.Client {
-
- if ro.HTTPClient != nil {
- return ro.HTTPClient
- }
-
- // Does the user want to change the defaults?
- if !ro.dontUseDefaultClient() {
- return http.DefaultClient
- }
-
- // Using the user config for tls timeout or default
- if ro.TLSHandshakeTimeout == 0 {
- ro.TLSHandshakeTimeout = tlsHandshakeTimeout
- }
-
- // Using the user config for dial timeout or default
- if ro.DialTimeout == 0 {
- ro.DialTimeout = dialTimeout
- }
-
- // Using the user config for dial keep alive or default
- if ro.DialKeepAlive == 0 {
- ro.DialKeepAlive = dialKeepAlive
- }
-
- if ro.RequestTimeout == 0 {
- ro.RequestTimeout = requestTimeout
- }
-
- var cookieJar http.CookieJar
-
- if ro.UseCookieJar {
- // The function does not return an error ever... so we are just ignoring it
- cookieJar, _ = cookiejar.New(&cookiejar.Options{PublicSuffixList: publicsuffix.List})
- }
-
- return &http.Client{
- Jar: cookieJar,
- Transport: createHTTPTransport(ro),
- Timeout: ro.RequestTimeout,
- }
-}
-
-func createHTTPTransport(ro RequestOptions) *http.Transport {
- ourHTTPTransport := &http.Transport{
- // These are borrowed from the default transporter
- Proxy: ro.proxySettings,
- Dial: (&net.Dialer{
- Timeout: ro.DialTimeout,
- KeepAlive: ro.DialKeepAlive,
- }).Dial,
- TLSHandshakeTimeout: ro.TLSHandshakeTimeout,
-
- // Here comes the user settings
- TLSClientConfig: &tls.Config{InsecureSkipVerify: ro.InsecureSkipVerify},
- DisableCompression: ro.DisableCompression,
- }
- EnsureTransporterFinalized(ourHTTPTransport)
- return ourHTTPTransport
-}
-
-// buildURLParams returns a URL with all of the params
-// Note: This function will override current URL params if they contradict what is provided in the map
-// That is what the "magic" is on the last line
-func buildURLParams(userURL string, params map[string]string) (string, error) {
- parsedURL, err := url.Parse(userURL)
-
- if err != nil {
- return "", err
- }
-
- parsedQuery, err := url.ParseQuery(parsedURL.RawQuery)
-
- for key, value := range params {
- parsedQuery.Set(key, value)
- }
-
- return strings.Join(
- []string{strings.Replace(parsedURL.String(),
- "?"+parsedURL.RawQuery, "", -1),
- parsedQuery.Encode()},
- "?"), nil
-}
-
-// addHTTPHeaders adds any additional HTTP headers that need to be added are added here including:
-// 1. Custom User agent
-// 2. Authorization Headers
-// 3. Any other header requested
-func addHTTPHeaders(ro *RequestOptions, req *http.Request) {
- for key, value := range ro.Headers {
- req.Header.Set(key, value)
- }
-
- if ro.UserAgent != "" {
- req.Header.Set("User-Agent", ro.UserAgent)
- } else {
- req.Header.Set("User-Agent", localUserAgent)
- }
-
- if ro.Auth != nil {
- req.SetBasicAuth(ro.Auth[0], ro.Auth[1])
- }
-
- if ro.IsAjax == true {
- req.Header.Set("X-Requested-With", "XMLHttpRequest")
- }
-}
-
-func addCookies(ro *RequestOptions, req *http.Request) {
- for _, c := range ro.Cookies {
- req.AddCookie(&c)
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/response.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/response.go
deleted file mode 100644
index 3983d22..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/response.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package grequests
-
-import (
- "bytes"
- "encoding/json"
- "encoding/xml"
- "io"
- "net/http"
- "os"
-)
-
-// Response is what is returned to a user when they fire off a request
-type Response struct {
-
- // Ok is a boolean flag that validates that the server returned a 2xx code
- Ok bool
-
- // This is the Go error flag – if something went wrong within the request, this flag will be set.
- Error error
-
- // We want to abstract (at least at the moment) the Go http.Response object away. So we are going to make use of it
- // internal but not give the user access
- RawResponse *http.Response
-
- // StatusCode is the HTTP Status Code returned by the HTTP Response. Taken from resp.StatusCode
- StatusCode int
-
- // Header is a net/http/Header structure
- Header http.Header
-
- internalByteBuffer *bytes.Buffer
-}
-
-func buildResponse(resp *http.Response, err error) (*Response, error) {
- // If the connection didn't succeed we just return a blank response
- if err != nil {
- return &Response{Error: err}, err
- }
-
- goodResp := &Response{
- // If your code is within the 2xx range – the response is considered `Ok`
- Ok: resp.StatusCode >= 200 && resp.StatusCode < 300,
- Error: nil,
- RawResponse: resp,
- StatusCode: resp.StatusCode,
- Header: resp.Header,
- internalByteBuffer: bytes.NewBuffer([]byte{}),
- }
- EnsureResponseFinalized(goodResp)
- return goodResp, nil
-}
-
-// Read is part of our ability to support io.ReadCloser if someone wants to make use of the raw body
-func (r *Response) Read(p []byte) (n int, err error) {
-
- if r.Error != nil {
- return -1, r.Error
- }
-
- return r.RawResponse.Body.Read(p)
-}
-
-// Close is part of our ability to support io.ReadCloser if someone wants to make use of the raw body
-func (r *Response) Close() error {
-
- if r.Error != nil {
- return r.Error
- }
-
- return r.RawResponse.Body.Close()
-}
-
-// DownloadToFile allows you to download the contents of the response to a file
-func (r *Response) DownloadToFile(fileName string) error {
-
- if r.Error != nil {
- return r.Error
- }
-
- fd, err := os.Create(fileName)
-
- if err != nil {
- return err
- }
-
- defer r.Close() // This is a noop if we use the internal ByteBuffer
- defer fd.Close()
-
- if _, err := io.Copy(fd, r.getInternalReader()); err != nil && err != io.EOF {
- return err
- }
-
- return nil
-}
-
-// getInternalReader because we implement io.ReadCloser and optionally hold a large buffer of the response (created by
-// the user's request)
-func (r *Response) getInternalReader() io.Reader {
-
- if r.internalByteBuffer.Len() != 0 {
- return r.internalByteBuffer
- }
- return r
-}
-
-// XML is a method that will populate a struct that is provided `userStruct` with the XML returned within the
-// response body
-func (r *Response) XML(userStruct interface{}, charsetReader XMLCharDecoder) error {
-
- if r.Error != nil {
- return r.Error
- }
-
- xmlDecoder := xml.NewDecoder(r.getInternalReader())
-
- if charsetReader != nil {
- xmlDecoder.CharsetReader = charsetReader
- }
-
- defer r.Close()
-
- if err := xmlDecoder.Decode(&userStruct); err != nil && err != io.EOF {
- return err
- }
-
- return nil
-}
-
-// JSON is a method that will populate a struct that is provided `userStruct` with the JSON returned within the
-// response body
-func (r *Response) JSON(userStruct interface{}) error {
-
- if r.Error != nil {
- return r.Error
- }
-
- jsonDecoder := json.NewDecoder(r.getInternalReader())
- defer r.Close()
-
- if err := jsonDecoder.Decode(&userStruct); err != nil && err != io.EOF {
- return err
- }
-
- return nil
-}
-
-// createResponseBytesBuffer is a utility method that will populate the internal byte reader – this is largely used for .String()
-// and .Bytes()
-func (r *Response) populateResponseByteBuffer() {
-
- // Have I done this already?
- if r.internalByteBuffer.Len() != 0 {
- return
- }
-
- defer r.Close()
-
- // Is there any content?
- if r.RawResponse.ContentLength == 0 {
- return
- }
-
- // Did the server tell us how big the response is going to be?
- if r.RawResponse.ContentLength > 0 {
- r.internalByteBuffer.Grow(int(r.RawResponse.ContentLength))
- }
-
- if _, err := io.Copy(r.internalByteBuffer, r); err != nil && err != io.EOF {
- r.Error = err
- r.RawResponse.Body.Close()
- }
-
-}
-
-// Bytes returns the response as a byte array
-func (r *Response) Bytes() []byte {
-
- if r.Error != nil {
- return nil
- }
-
- r.populateResponseByteBuffer()
-
- // Are we still empty?
- if r.internalByteBuffer.Len() == 0 {
- return nil
- }
- return r.internalByteBuffer.Bytes()
-
-}
-
-// String returns the response as a string
-func (r *Response) String() string {
- if r.Error != nil {
- return ""
- }
-
- r.populateResponseByteBuffer()
-
- return r.internalByteBuffer.String()
-}
-
-// ClearInternalBuffer is a function that will clear the internal buffer that we use to hold the .String() and .Bytes()
-// data. Once you have used these functions – you may want to free up the memory.
-func (r *Response) ClearInternalBuffer() {
-
- if r.Error != nil {
- return // This is a noop as we will be dereferencing a null pointer
- }
-
- r.internalByteBuffer.Reset()
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/session.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/session.go
deleted file mode 100644
index 279fbb6..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/session.go
+++ /dev/null
@@ -1,92 +0,0 @@
-package grequests
-
-import "net/http"
-
-// Session allows a user to make use of persistent cookies in between
-// HTTP requests
-type Session struct {
-
- // HTTPClient is the client that we will use to request the resources
- HTTPClient *http.Client
-}
-
-// NewSession returns a session struct which enables can be used to maintain establish a persistent state with the
-// server
-// This function will set UseCookieJar to true as that is the purpose of using the session
-func NewSession(ro *RequestOptions) *Session {
- if ro == nil {
- ro = &RequestOptions{}
- }
-
- ro.UseCookieJar = true
-
- return &Session{HTTPClient: BuildHTTPClient(*ro)}
-}
-
-// Get takes 2 parameters and returns a Response Struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Get(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("GET", url, ro, s.HTTPClient)
-}
-
-// Put takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Put(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("PUT", url, ro, s.HTTPClient)
-}
-
-// Patch takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Patch(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("PATCH", url, ro, s.HTTPClient)
-}
-
-// Delete takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Delete(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("DELETE", url, ro, s.HTTPClient)
-}
-
-// Post takes 2 parameters and returns a Response channel. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Post(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("POST", url, ro, s.HTTPClient)
-}
-
-// Head takes 2 parameters and returns a Response channel. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Head(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("HEAD", url, ro, s.HTTPClient)
-}
-
-// Options takes 2 parameters and returns a Response struct. These two options are:
-// 1. A URL
-// 2. A RequestOptions struct
-// If you do not intend to use the `RequestOptions` you can just pass nil
-// A new session is created by calling NewSession with a request options struct
-func (s *Session) Options(url string, ro *RequestOptions) (*Response, error) {
- return doSessionRequest("OPTIONS", url, ro, s.HTTPClient)
-}
-
-// CloseIdleConnections closes the idle connections that a session client may make use of
-func (s *Session) CloseIdleConnections() {
- s.HTTPClient.Transport.(*http.Transport).CloseIdleConnections()
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/utils.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/utils.go
deleted file mode 100644
index 03eab03..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/levigross/grequests/utils.go
+++ /dev/null
@@ -1,98 +0,0 @@
-package grequests
-
-import (
- "errors"
- "io"
- "net/http"
- "runtime"
- "time"
-)
-
-const (
- localUserAgent = "GRequests/0.7"
-
- // Default value for net.Dialer Timeout
- dialTimeout = 30 * time.Second
-
- // Default value for net.Dialer KeepAlive
- dialKeepAlive = 30 * time.Second
-
- // Default value for http.Transport TLSHandshakeTimeout
- tlsHandshakeTimeout = 10 * time.Second
-
- // Default value for Request Timeout
- requestTimeout = 90 * time.Second
-)
-
-var (
- // ErrRedirectLimitExceeded is the error returned when the request responded
- // with too many redirects
- ErrRedirectLimitExceeded = errors.New("grequests: Request exceeded redirect count")
-
- // RedirectLimit is a tunable variable that specifies how many times we can
- // redirect in response to a redirect. This is the global variable, if you
- // wish to set this on a request by request basis, set it within the
- // `RequestOptions` structure
- RedirectLimit = 30
-
- // SensitiveHTTPHeaders is a map of sensitive HTTP headers that a user
- // doesn't want passed on a redirect. This is the global variable, if you
- // wish to set this on a request by request basis, set it within the
- // `RequestOptions` structure
- SensitiveHTTPHeaders = map[string]struct{}{
- "WWW-Authenticate": {},
- "Authorization": {},
- "Proxy-Authorization": {},
- }
-)
-
-// XMLCharDecoder is a helper type that takes a stream of bytes (not encoded in
-// UTF-8) and returns a reader that encodes the bytes into UTF-8. This is done
-// because Go's XML library only supports XML encoded in UTF-8
-type XMLCharDecoder func(charset string, input io.Reader) (io.Reader, error)
-
-func addRedirectFunctionality(client *http.Client, ro *RequestOptions) {
- client.CheckRedirect = func(req *http.Request, via []*http.Request) error {
- if ro.RedirectLimit == 0 {
- ro.RedirectLimit = RedirectLimit
- }
-
- if len(via) >= ro.RedirectLimit {
- return ErrRedirectLimitExceeded
- }
-
- if ro.SensitiveHTTPHeaders == nil {
- ro.SensitiveHTTPHeaders = SensitiveHTTPHeaders
- }
-
- for k, vv := range via[0].Header {
- // Is this a sensitive header?
- if _, found := ro.SensitiveHTTPHeaders[k]; found && !ro.RedirectLocationTrusted {
- continue
- }
-
- for _, v := range vv {
- req.Header.Add(k, v)
- }
- }
-
- return nil
- }
-}
-
-// EnsureTransporterFinalized will ensure that when the HTTP client is GCed
-// the runtime will close the idle connections (so that they won't leak)
-// this function was adopted from Hashicorp's go-cleanhttp package
-func EnsureTransporterFinalized(httpTransport *http.Transport) {
- runtime.SetFinalizer(&httpTransport, func(transportInt **http.Transport) {
- (*transportInt).CloseIdleConnections()
- })
-}
-
-// EnsureResponseFinalized will ensure that when the Response is GCed
-// the request body is closed so we aren't leaking fds
-func EnsureResponseFinalized(httpResp *Response) {
- runtime.SetFinalizer(&httpResp, func(httpResponseInt **Response) {
- (*httpResponseInt).RawResponse.Body.Close()
- })
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml
deleted file mode 100644
index 70e012b..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/.travis.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-language: go
-
-go:
- - 1.0
- - 1.1
- - tip
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS
deleted file mode 100644
index 958416e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/CONTRIBUTORS
+++ /dev/null
@@ -1,5 +0,0 @@
-Alec Thomas
-Guilhem Lettron
-Ivan Daniluk
-Nimi Wariboko Jr
-Róbert Selvek
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/LICENSE b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/LICENSE
deleted file mode 100644
index f1f6cfc..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2013 Örjan Persson. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/README.md b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/README.md
deleted file mode 100644
index 65177d1..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/README.md
+++ /dev/null
@@ -1,89 +0,0 @@
-## Golang logging library
-
-[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/op/go-logging) [![build](https://img.shields.io/travis/op/go-logging.svg?style=flat)](https://travis-ci.org/op/go-logging)
-
-Package logging implements a logging infrastructure for Go. Its output format
-is customizable and supports different logging backends like syslog, file and
-memory. Multiple backends can be utilized with different log levels per backend
-and logger.
-
-## Example
-
-Let's have a look at an [example](examples/example.go) which demonstrates most
-of the features found in this library.
-
-[![Example Output](examples/example.png)](examples/example.go)
-
-```go
-package main
-
-import (
- "os"
-
- "github.com/op/go-logging"
-)
-
-var log = logging.MustGetLogger("example")
-
-// Example format string. Everything except the message has a custom color
-// which is dependent on the log level. Many fields have a custom output
-// formatting too, eg. the time returns the hour down to the milli second.
-var format = logging.MustStringFormatter(
- `%{color}%{time:15:04:05.000} %{shortfunc} â–¶ %{level:.4s} %{id:03x}%{color:reset} %{message}`,
-)
-
-// Password is just an example type implementing the Redactor interface. Any
-// time this is logged, the Redacted() function will be called.
-type Password string
-
-func (p Password) Redacted() interface{} {
- return logging.Redact(string(p))
-}
-
-func main() {
- // For demo purposes, create two backend for os.Stderr.
- backend1 := logging.NewLogBackend(os.Stderr, "", 0)
- backend2 := logging.NewLogBackend(os.Stderr, "", 0)
-
- // For messages written to backend2 we want to add some additional
- // information to the output, including the used log level and the name of
- // the function.
- backend2Formatter := logging.NewBackendFormatter(backend2, format)
-
- // Only errors and more severe messages should be sent to backend1
- backend1Leveled := logging.AddModuleLevel(backend1)
- backend1Leveled.SetLevel(logging.ERROR, "")
-
- // Set the backends to be used.
- logging.SetBackend(backend1Leveled, backend2Formatter)
-
- log.Debugf("debug %s", Password("secret"))
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("err")
- log.Critical("crit")
-}
-```
-
-## Installing
-
-### Using *go get*
-
- $ go get github.com/op/go-logging
-
-After this command *go-logging* is ready to use. Its source will be in:
-
- $GOROOT/src/pkg/github.com/op/go-logging
-
-You can use `go get -u` to update the package.
-
-## Documentation
-
-For docs, see http://godoc.org/github.com/op/go-logging or run:
-
- $ godoc github.com/op/go-logging
-
-## Additional resources
-
-* [wslog](https://godoc.org/github.com/cryptix/go/logging/wslog) -- exposes log messages through a WebSocket.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/backend.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/backend.go
deleted file mode 100644
index 74d9201..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/backend.go
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-// defaultBackend is the backend used for all logging calls.
-var defaultBackend LeveledBackend
-
-// Backend is the interface which a log backend need to implement to be able to
-// be used as a logging backend.
-type Backend interface {
- Log(Level, int, *Record) error
-}
-
-// SetBackend replaces the backend currently set with the given new logging
-// backend.
-func SetBackend(backends ...Backend) LeveledBackend {
- var backend Backend
- if len(backends) == 1 {
- backend = backends[0]
- } else {
- backend = MultiLogger(backends...)
- }
-
- defaultBackend = AddModuleLevel(backend)
- return defaultBackend
-}
-
-// SetLevel sets the logging level for the specified module. The module
-// corresponds to the string specified in GetLogger.
-func SetLevel(level Level, module string) {
- defaultBackend.SetLevel(level, module)
-}
-
-// GetLevel returns the logging level for the specified module.
-func GetLevel(module string) Level {
- return defaultBackend.GetLevel(module)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go
deleted file mode 100644
index 9f4ddee..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.go
+++ /dev/null
@@ -1,49 +0,0 @@
-package main
-
-import (
- "os"
-
- "github.com/op/go-logging"
-)
-
-var log = logging.MustGetLogger("example")
-
-// Example format string. Everything except the message has a custom color
-// which is dependent on the log level. Many fields have a custom output
-// formatting too, eg. the time returns the hour down to the milli second.
-var format = logging.MustStringFormatter(
- `%{color}%{time:15:04:05.000} %{shortfunc} â–¶ %{level:.4s} %{id:03x}%{color:reset} %{message}`,
-)
-
-// Password is just an example type implementing the Redactor interface. Any
-// time this is logged, the Redacted() function will be called.
-type Password string
-
-func (p Password) Redacted() interface{} {
- return logging.Redact(string(p))
-}
-
-func main() {
- // For demo purposes, create two backend for os.Stderr.
- backend1 := logging.NewLogBackend(os.Stderr, "", 0)
- backend2 := logging.NewLogBackend(os.Stderr, "", 0)
-
- // For messages written to backend2 we want to add some additional
- // information to the output, including the used log level and the name of
- // the function.
- backend2Formatter := logging.NewBackendFormatter(backend2, format)
-
- // Only errors and more severe messages should be sent to backend1
- backend1Leveled := logging.AddModuleLevel(backend1)
- backend1Leveled.SetLevel(logging.ERROR, "")
-
- // Set the backends to be used.
- logging.SetBackend(backend1Leveled, backend2Formatter)
-
- log.Debugf("debug %s", Password("secret"))
- log.Info("info")
- log.Notice("notice")
- log.Warning("warning")
- log.Error("err")
- log.Critical("crit")
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.png b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.png
deleted file mode 100644
index ff3392b..0000000
Binary files a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/examples/example.png and /dev/null differ
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/format.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/format.go
deleted file mode 100644
index 0fb5a4f..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/format.go
+++ /dev/null
@@ -1,365 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-import (
- "bytes"
- "errors"
- "fmt"
- "io"
- "os"
- "path"
- "path/filepath"
- "regexp"
- "runtime"
- "strings"
- "sync"
- "time"
-)
-
-// TODO see Formatter interface in fmt/print.go
-// TODO try text/template, maybe it have enough performance
-// TODO other template systems?
-// TODO make it possible to specify formats per backend?
-type fmtVerb int
-
-const (
- fmtVerbTime fmtVerb = iota
- fmtVerbLevel
- fmtVerbID
- fmtVerbPid
- fmtVerbProgram
- fmtVerbModule
- fmtVerbMessage
- fmtVerbLongfile
- fmtVerbShortfile
- fmtVerbLongpkg
- fmtVerbShortpkg
- fmtVerbLongfunc
- fmtVerbShortfunc
- fmtVerbLevelColor
-
- // Keep last, there are no match for these below.
- fmtVerbUnknown
- fmtVerbStatic
-)
-
-var fmtVerbs = []string{
- "time",
- "level",
- "id",
- "pid",
- "program",
- "module",
- "message",
- "longfile",
- "shortfile",
- "longpkg",
- "shortpkg",
- "longfunc",
- "shortfunc",
- "color",
-}
-
-const rfc3339Milli = "2006-01-02T15:04:05.999Z07:00"
-
-var defaultVerbsLayout = []string{
- rfc3339Milli,
- "s",
- "d",
- "d",
- "s",
- "s",
- "s",
- "s",
- "s",
- "s",
- "s",
- "s",
- "s",
- "",
-}
-
-var (
- pid = os.Getpid()
- program = filepath.Base(os.Args[0])
-)
-
-func getFmtVerbByName(name string) fmtVerb {
- for i, verb := range fmtVerbs {
- if name == verb {
- return fmtVerb(i)
- }
- }
- return fmtVerbUnknown
-}
-
-// Formatter is the required interface for a custom log record formatter.
-type Formatter interface {
- Format(calldepth int, r *Record, w io.Writer) error
-}
-
-// formatter is used by all backends unless otherwise overriden.
-var formatter struct {
- sync.RWMutex
- def Formatter
-}
-
-func getFormatter() Formatter {
- formatter.RLock()
- defer formatter.RUnlock()
- return formatter.def
-}
-
-var (
- // DefaultFormatter is the default formatter used and is only the message.
- DefaultFormatter = MustStringFormatter("%{message}")
-
- // GlogFormatter mimics the glog format
- GlogFormatter = MustStringFormatter("%{level:.1s}%{time:0102 15:04:05.999999} %{pid} %{shortfile}] %{message}")
-)
-
-// SetFormatter sets the default formatter for all new backends. A backend will
-// fetch this value once it is needed to format a record. Note that backends
-// will cache the formatter after the first point. For now, make sure to set
-// the formatter before logging.
-func SetFormatter(f Formatter) {
- formatter.Lock()
- defer formatter.Unlock()
- formatter.def = f
-}
-
-var formatRe = regexp.MustCompile(`%{([a-z]+)(?::(.*?[^\\]))?}`)
-
-type part struct {
- verb fmtVerb
- layout string
-}
-
-// stringFormatter contains a list of parts which explains how to build the
-// formatted string passed on to the logging backend.
-type stringFormatter struct {
- parts []part
-}
-
-// NewStringFormatter returns a new Formatter which outputs the log record as a
-// string based on the 'verbs' specified in the format string.
-//
-// The verbs:
-//
-// General:
-// %{id} Sequence number for log message (uint64).
-// %{pid} Process id (int)
-// %{time} Time when log occurred (time.Time)
-// %{level} Log level (Level)
-// %{module} Module (string)
-// %{program} Basename of os.Args[0] (string)
-// %{message} Message (string)
-// %{longfile} Full file name and line number: /a/b/c/d.go:23
-// %{shortfile} Final file name element and line number: d.go:23
-// %{color} ANSI color based on log level
-//
-// For normal types, the output can be customized by using the 'verbs' defined
-// in the fmt package, eg. '%{id:04d}' to make the id output be '%04d' as the
-// format string.
-//
-// For time.Time, use the same layout as time.Format to change the time format
-// when output, eg "2006-01-02T15:04:05.999Z-07:00".
-//
-// For the 'color' verb, the output can be adjusted to either use bold colors,
-// i.e., '%{color:bold}' or to reset the ANSI attributes, i.e.,
-// '%{color:reset}' Note that if you use the color verb explicitly, be sure to
-// reset it or else the color state will persist past your log message. e.g.,
-// "%{color:bold}%{time:15:04:05} %{level:-8s}%{color:reset} %{message}" will
-// just colorize the time and level, leaving the message uncolored.
-//
-// Colors on Windows is unfortunately not supported right now and is currently
-// a no-op.
-//
-// There's also a couple of experimental 'verbs'. These are exposed to get
-// feedback and needs a bit of tinkering. Hence, they might change in the
-// future.
-//
-// Experimental:
-// %{longpkg} Full package path, eg. github.com/go-logging
-// %{shortpkg} Base package path, eg. go-logging
-// %{longfunc} Full function name, eg. littleEndian.PutUint32
-// %{shortfunc} Base function name, eg. PutUint32
-func NewStringFormatter(format string) (Formatter, error) {
- var fmter = &stringFormatter{}
-
- // Find the boundaries of all %{vars}
- matches := formatRe.FindAllStringSubmatchIndex(format, -1)
- if matches == nil {
- return nil, errors.New("logger: invalid log format: " + format)
- }
-
- // Collect all variables and static text for the format
- prev := 0
- for _, m := range matches {
- start, end := m[0], m[1]
- if start > prev {
- fmter.add(fmtVerbStatic, format[prev:start])
- }
-
- name := format[m[2]:m[3]]
- verb := getFmtVerbByName(name)
- if verb == fmtVerbUnknown {
- return nil, errors.New("logger: unknown variable: " + name)
- }
-
- // Handle layout customizations or use the default. If this is not for the
- // time or color formatting, we need to prefix with %.
- layout := defaultVerbsLayout[verb]
- if m[4] != -1 {
- layout = format[m[4]:m[5]]
- }
- if verb != fmtVerbTime && verb != fmtVerbLevelColor {
- layout = "%" + layout
- }
-
- fmter.add(verb, layout)
- prev = end
- }
- end := format[prev:]
- if end != "" {
- fmter.add(fmtVerbStatic, end)
- }
-
- // Make a test run to make sure we can format it correctly.
- t, err := time.Parse(time.RFC3339, "2010-02-04T21:00:57-08:00")
- if err != nil {
- panic(err)
- }
- r := &Record{
- Id: 12345,
- Time: t,
- Module: "logger",
- fmt: "hello %s",
- args: []interface{}{"go"},
- }
- if err := fmter.Format(0, r, &bytes.Buffer{}); err != nil {
- return nil, err
- }
-
- return fmter, nil
-}
-
-// MustStringFormatter is equivalent to NewStringFormatter with a call to panic
-// on error.
-func MustStringFormatter(format string) Formatter {
- f, err := NewStringFormatter(format)
- if err != nil {
- panic("Failed to initialized string formatter: " + err.Error())
- }
- return f
-}
-
-func (f *stringFormatter) add(verb fmtVerb, layout string) {
- f.parts = append(f.parts, part{verb, layout})
-}
-
-func (f *stringFormatter) Format(calldepth int, r *Record, output io.Writer) error {
- for _, part := range f.parts {
- if part.verb == fmtVerbStatic {
- output.Write([]byte(part.layout))
- } else if part.verb == fmtVerbTime {
- output.Write([]byte(r.Time.Format(part.layout)))
- } else if part.verb == fmtVerbLevelColor {
- doFmtVerbLevelColor(part.layout, r.Level, output)
- } else {
- var v interface{}
- switch part.verb {
- case fmtVerbLevel:
- v = r.Level
- break
- case fmtVerbID:
- v = r.Id
- break
- case fmtVerbPid:
- v = pid
- break
- case fmtVerbProgram:
- v = program
- break
- case fmtVerbModule:
- v = r.Module
- break
- case fmtVerbMessage:
- v = r.Message()
- break
- case fmtVerbLongfile, fmtVerbShortfile:
- _, file, line, ok := runtime.Caller(calldepth + 1)
- if !ok {
- file = "???"
- line = 0
- } else if part.verb == fmtVerbShortfile {
- file = filepath.Base(file)
- }
- v = fmt.Sprintf("%s:%d", file, line)
- case fmtVerbLongfunc, fmtVerbShortfunc,
- fmtVerbLongpkg, fmtVerbShortpkg:
- // TODO cache pc
- v = "???"
- if pc, _, _, ok := runtime.Caller(calldepth + 1); ok {
- if f := runtime.FuncForPC(pc); f != nil {
- v = formatFuncName(part.verb, f.Name())
- }
- }
- default:
- panic("unhandled format part")
- }
- fmt.Fprintf(output, part.layout, v)
- }
- }
- return nil
-}
-
-// formatFuncName tries to extract certain part of the runtime formatted
-// function name to some pre-defined variation.
-//
-// This function is known to not work properly if the package path or name
-// contains a dot.
-func formatFuncName(v fmtVerb, f string) string {
- i := strings.LastIndex(f, "/")
- j := strings.Index(f[i+1:], ".")
- if j < 1 {
- return "???"
- }
- pkg, fun := f[:i+j+1], f[i+j+2:]
- switch v {
- case fmtVerbLongpkg:
- return pkg
- case fmtVerbShortpkg:
- return path.Base(pkg)
- case fmtVerbLongfunc:
- return fun
- case fmtVerbShortfunc:
- i = strings.LastIndex(fun, ".")
- return fun[i+1:]
- }
- panic("unexpected func formatter")
-}
-
-// backendFormatter combines a backend with a specific formatter making it
-// possible to have different log formats for different backends.
-type backendFormatter struct {
- b Backend
- f Formatter
-}
-
-// NewBackendFormatter creates a new backend which makes all records that
-// passes through it beeing formatted by the specific formatter.
-func NewBackendFormatter(b Backend, f Formatter) Backend {
- return &backendFormatter{b, f}
-}
-
-// Log implements the Log function required by the Backend interface.
-func (bf *backendFormatter) Log(level Level, calldepth int, r *Record) error {
- // Make a shallow copy of the record and replace any formatter
- r2 := *r
- r2.formatter = bf.f
- return bf.b.Log(level, calldepth+1, &r2)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/level.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/level.go
deleted file mode 100644
index 98dd191..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/level.go
+++ /dev/null
@@ -1,128 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-import (
- "errors"
- "strings"
- "sync"
-)
-
-// ErrInvalidLogLevel is used when an invalid log level has been used.
-var ErrInvalidLogLevel = errors.New("logger: invalid log level")
-
-// Level defines all available log levels for log messages.
-type Level int
-
-// Log levels.
-const (
- CRITICAL Level = iota
- ERROR
- WARNING
- NOTICE
- INFO
- DEBUG
-)
-
-var levelNames = []string{
- "CRITICAL",
- "ERROR",
- "WARNING",
- "NOTICE",
- "INFO",
- "DEBUG",
-}
-
-// String returns the string representation of a logging level.
-func (p Level) String() string {
- return levelNames[p]
-}
-
-// LogLevel returns the log level from a string representation.
-func LogLevel(level string) (Level, error) {
- for i, name := range levelNames {
- if strings.EqualFold(name, level) {
- return Level(i), nil
- }
- }
- return ERROR, ErrInvalidLogLevel
-}
-
-// Leveled interface is the interface required to be able to add leveled
-// logging.
-type Leveled interface {
- GetLevel(string) Level
- SetLevel(Level, string)
- IsEnabledFor(Level, string) bool
-}
-
-// LeveledBackend is a log backend with additional knobs for setting levels on
-// individual modules to different levels.
-type LeveledBackend interface {
- Backend
- Leveled
-}
-
-type moduleLeveled struct {
- levels map[string]Level
- backend Backend
- formatter Formatter
- once sync.Once
-}
-
-// AddModuleLevel wraps a log backend with knobs to have different log levels
-// for different modules.
-func AddModuleLevel(backend Backend) LeveledBackend {
- var leveled LeveledBackend
- var ok bool
- if leveled, ok = backend.(LeveledBackend); !ok {
- leveled = &moduleLeveled{
- levels: make(map[string]Level),
- backend: backend,
- }
- }
- return leveled
-}
-
-// GetLevel returns the log level for the given module.
-func (l *moduleLeveled) GetLevel(module string) Level {
- level, exists := l.levels[module]
- if exists == false {
- level, exists = l.levels[""]
- // no configuration exists, default to debug
- if exists == false {
- level = DEBUG
- }
- }
- return level
-}
-
-// SetLevel sets the log level for the given module.
-func (l *moduleLeveled) SetLevel(level Level, module string) {
- l.levels[module] = level
-}
-
-// IsEnabledFor will return true if logging is enabled for the given module.
-func (l *moduleLeveled) IsEnabledFor(level Level, module string) bool {
- return level <= l.GetLevel(module)
-}
-
-func (l *moduleLeveled) Log(level Level, calldepth int, rec *Record) (err error) {
- if l.IsEnabledFor(level, rec.Module) {
- // TODO get rid of traces of formatter here. BackendFormatter should be used.
- rec.formatter = l.getFormatterAndCacheCurrent()
- err = l.backend.Log(level, calldepth+1, rec)
- }
- return
-}
-
-func (l *moduleLeveled) getFormatterAndCacheCurrent() Formatter {
- l.once.Do(func() {
- if l.formatter == nil {
- l.formatter = getFormatter()
- }
- })
- return l.formatter
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_nix.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_nix.go
deleted file mode 100644
index f06a871..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_nix.go
+++ /dev/null
@@ -1,88 +0,0 @@
-// +build !windows
-
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-import (
- "bytes"
- "fmt"
- "io"
- "log"
-)
-
-type color int
-
-const (
- colorBlack = iota + 30
- colorRed
- colorGreen
- colorYellow
- colorBlue
- colorMagenta
- colorCyan
- colorWhite
-)
-
-var (
- colors = []string{
- CRITICAL: colorSeq(colorMagenta),
- ERROR: colorSeq(colorRed),
- WARNING: colorSeq(colorYellow),
- NOTICE: colorSeq(colorGreen),
- DEBUG: colorSeq(colorCyan),
- }
- boldcolors = []string{
- CRITICAL: colorSeqBold(colorMagenta),
- ERROR: colorSeqBold(colorRed),
- WARNING: colorSeqBold(colorYellow),
- NOTICE: colorSeqBold(colorGreen),
- DEBUG: colorSeqBold(colorCyan),
- }
-)
-
-// LogBackend utilizes the standard log module.
-type LogBackend struct {
- Logger *log.Logger
- Color bool
-}
-
-// NewLogBackend creates a new LogBackend.
-func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend {
- return &LogBackend{Logger: log.New(out, prefix, flag)}
-}
-
-// Log implements the Backend interface.
-func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error {
- if b.Color {
- buf := &bytes.Buffer{}
- buf.Write([]byte(colors[level]))
- buf.Write([]byte(rec.Formatted(calldepth + 1)))
- buf.Write([]byte("\033[0m"))
- // For some reason, the Go logger arbitrarily decided "2" was the correct
- // call depth...
- return b.Logger.Output(calldepth+2, buf.String())
- }
-
- return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1))
-}
-
-func colorSeq(color color) string {
- return fmt.Sprintf("\033[%dm", int(color))
-}
-
-func colorSeqBold(color color) string {
- return fmt.Sprintf("\033[%d;1m", int(color))
-}
-
-func doFmtVerbLevelColor(layout string, level Level, output io.Writer) {
- if layout == "bold" {
- output.Write([]byte(boldcolors[level]))
- } else if layout == "reset" {
- output.Write([]byte("\033[0m"))
- } else {
- output.Write([]byte(colors[level]))
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_windows.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_windows.go
deleted file mode 100644
index b8dc92c..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/log_windows.go
+++ /dev/null
@@ -1,107 +0,0 @@
-// +build windows
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-import (
- "bytes"
- "io"
- "log"
- "syscall"
-)
-
-var (
- kernel32DLL = syscall.NewLazyDLL("kernel32.dll")
- setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute")
-)
-
-// Character attributes
-// Note:
-// -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan).
-// Clearing all foreground or background colors results in black; setting all creates white.
-// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes.
-const (
- fgBlack = 0x0000
- fgBlue = 0x0001
- fgGreen = 0x0002
- fgCyan = 0x0003
- fgRed = 0x0004
- fgMagenta = 0x0005
- fgYellow = 0x0006
- fgWhite = 0x0007
- fgIntensity = 0x0008
- fgMask = 0x000F
-)
-
-var (
- colors = []uint16{
- INFO: fgWhite,
- CRITICAL: fgMagenta,
- ERROR: fgRed,
- WARNING: fgYellow,
- NOTICE: fgGreen,
- DEBUG: fgCyan,
- }
- boldcolors = []uint16{
- INFO: fgWhite | fgIntensity,
- CRITICAL: fgMagenta | fgIntensity,
- ERROR: fgRed | fgIntensity,
- WARNING: fgYellow | fgIntensity,
- NOTICE: fgGreen | fgIntensity,
- DEBUG: fgCyan | fgIntensity,
- }
-)
-
-type file interface {
- Fd() uintptr
-}
-
-// LogBackend utilizes the standard log module.
-type LogBackend struct {
- Logger *log.Logger
- Color bool
-
- // f is set to a non-nil value if the underlying writer which logs writes to
- // implements the file interface. This makes us able to colorise the output.
- f file
-}
-
-// NewLogBackend creates a new LogBackend.
-func NewLogBackend(out io.Writer, prefix string, flag int) *LogBackend {
- b := &LogBackend{Logger: log.New(out, prefix, flag)}
-
- // Unfortunately, the API used only takes an io.Writer where the Windows API
- // need the actual fd to change colors.
- if f, ok := out.(file); ok {
- b.f = f
- }
-
- return b
-}
-
-func (b *LogBackend) Log(level Level, calldepth int, rec *Record) error {
- if b.Color && b.f != nil {
- buf := &bytes.Buffer{}
- setConsoleTextAttribute(b.f, colors[level])
- buf.Write([]byte(rec.Formatted(calldepth + 1)))
- err := b.Logger.Output(calldepth+2, buf.String())
- setConsoleTextAttribute(b.f, fgWhite)
- return err
- }
- return b.Logger.Output(calldepth+2, rec.Formatted(calldepth+1))
-}
-
-// setConsoleTextAttribute sets the attributes of characters written to the
-// console screen buffer by the WriteFile or WriteConsole function.
-// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx.
-func setConsoleTextAttribute(f file, attribute uint16) bool {
- ok, _, _ := setConsoleTextAttributeProc.Call(f.Fd(), uintptr(attribute), 0)
- return ok != 0
-}
-
-func doFmtVerbLevelColor(layout string, level Level, output io.Writer) {
- // TODO not supported on Windows since the io.Writer here is actually a
- // bytes.Buffer.
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/logger.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/logger.go
deleted file mode 100644
index b430124..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/logger.go
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package logging implements a logging infrastructure for Go. It supports
-// different logging backends like syslog, file and memory. Multiple backends
-// can be utilized with different log levels per backend and logger.
-package logging
-
-import (
- "bytes"
- "fmt"
- "log"
- "os"
- "strings"
- "sync/atomic"
- "time"
-)
-
-// Redactor is an interface for types that may contain sensitive information
-// (like passwords), which shouldn't be printed to the log. The idea was found
-// in relog as part of the vitness project.
-type Redactor interface {
- Redacted() interface{}
-}
-
-// Redact returns a string of * having the same length as s.
-func Redact(s string) string {
- return strings.Repeat("*", len(s))
-}
-
-var (
- // Sequence number is incremented and utilized for all log records created.
- sequenceNo uint64
-
- // timeNow is a customizable for testing purposes.
- timeNow = time.Now
-)
-
-// Record represents a log record and contains the timestamp when the record
-// was created, an increasing id, filename and line and finally the actual
-// formatted log line.
-type Record struct {
- Id uint64
- Time time.Time
- Module string
- Level Level
-
- // message is kept as a pointer to have shallow copies update this once
- // needed.
- message *string
- args []interface{}
- fmt string
- formatter Formatter
- formatted string
-}
-
-// Formatted returns the formatted log record string.
-func (r *Record) Formatted(calldepth int) string {
- if r.formatted == "" {
- var buf bytes.Buffer
- r.formatter.Format(calldepth+1, r, &buf)
- r.formatted = buf.String()
- }
- return r.formatted
-}
-
-// Message returns the log record message.
-func (r *Record) Message() string {
- if r.message == nil {
- // Redact the arguments that implements the Redactor interface
- for i, arg := range r.args {
- if redactor, ok := arg.(Redactor); ok == true {
- r.args[i] = redactor.Redacted()
- }
- }
- msg := fmt.Sprintf(r.fmt, r.args...)
- r.message = &msg
- }
- return *r.message
-}
-
-// Logger is the actual logger which creates log records based on the functions
-// called and passes them to the underlying logging backend.
-type Logger struct {
- Module string
- backend LeveledBackend
- haveBackend bool
-
- // ExtraCallDepth can be used to add additional call depth when getting the
- // calling function. This is normally used when wrapping a logger.
- ExtraCalldepth int
-}
-
-// SetBackend overrides any previously defined backend for this logger.
-func (l *Logger) SetBackend(backend LeveledBackend) {
- l.backend = backend
- l.haveBackend = true
-}
-
-// TODO call NewLogger and remove MustGetLogger?
-
-// GetLogger creates and returns a Logger object based on the module name.
-func GetLogger(module string) (*Logger, error) {
- return &Logger{Module: module}, nil
-}
-
-// MustGetLogger is like GetLogger but panics if the logger can't be created.
-// It simplifies safe initialization of a global logger for eg. a package.
-func MustGetLogger(module string) *Logger {
- logger, err := GetLogger(module)
- if err != nil {
- panic("logger: " + module + ": " + err.Error())
- }
- return logger
-}
-
-// Reset restores the internal state of the logging library.
-func Reset() {
- // TODO make a global Init() method to be less magic? or make it such that
- // if there's no backends at all configured, we could use some tricks to
- // automatically setup backends based if we have a TTY or not.
- sequenceNo = 0
- b := SetBackend(NewLogBackend(os.Stderr, "", log.LstdFlags))
- b.SetLevel(DEBUG, "")
- SetFormatter(DefaultFormatter)
- timeNow = time.Now
-}
-
-// IsEnabledFor returns true if the logger is enabled for the given level.
-func (l *Logger) IsEnabledFor(level Level) bool {
- return defaultBackend.IsEnabledFor(level, l.Module)
-}
-
-func (l *Logger) log(lvl Level, format string, args ...interface{}) {
- if !l.IsEnabledFor(lvl) {
- return
- }
-
- // Create the logging record and pass it in to the backend
- record := &Record{
- Id: atomic.AddUint64(&sequenceNo, 1),
- Time: timeNow(),
- Module: l.Module,
- Level: lvl,
- fmt: format,
- args: args,
- }
-
- // TODO use channels to fan out the records to all backends?
- // TODO in case of errors, do something (tricky)
-
- // calldepth=2 brings the stack up to the caller of the level
- // methods, Info(), Fatal(), etc.
- // ExtraCallDepth allows this to be extended further up the stack in case we
- // are wrapping these methods, eg. to expose them package level
- if l.haveBackend {
- l.backend.Log(lvl, 2+l.ExtraCalldepth, record)
- return
- }
-
- defaultBackend.Log(lvl, 2+l.ExtraCalldepth, record)
-}
-
-// Fatal is equivalent to l.Critical(fmt.Sprint()) followed by a call to os.Exit(1).
-func (l *Logger) Fatal(args ...interface{}) {
- s := fmt.Sprint(args...)
- l.log(CRITICAL, "%s", s)
- os.Exit(1)
-}
-
-// Fatalf is equivalent to l.Critical followed by a call to os.Exit(1).
-func (l *Logger) Fatalf(format string, args ...interface{}) {
- l.log(CRITICAL, format, args...)
- os.Exit(1)
-}
-
-// Panic is equivalent to l.Critical(fmt.Sprint()) followed by a call to panic().
-func (l *Logger) Panic(args ...interface{}) {
- s := fmt.Sprint(args...)
- l.log(CRITICAL, "%s", s)
- panic(s)
-}
-
-// Panicf is equivalent to l.Critical followed by a call to panic().
-func (l *Logger) Panicf(format string, args ...interface{}) {
- s := fmt.Sprintf(format, args...)
- l.log(CRITICAL, "%s", s)
- panic(s)
-}
-
-// Critical logs a message using CRITICAL as log level.
-func (l *Logger) Critical(format string, args ...interface{}) {
- l.log(CRITICAL, format, args...)
-}
-
-// Error logs a message using ERROR as log level.
-func (l *Logger) Error(format string, args ...interface{}) {
- l.log(ERROR, format, args...)
-}
-
-// Errorf logs a message using ERROR as log level.
-func (l *Logger) Errorf(format string, args ...interface{}) {
- l.log(ERROR, format, args...)
-}
-
-// Warning logs a message using WARNING as log level.
-func (l *Logger) Warning(format string, args ...interface{}) {
- l.log(WARNING, format, args...)
-}
-
-// Warningf logs a message using WARNING as log level.
-func (l *Logger) Warningf(format string, args ...interface{}) {
- l.log(WARNING, format, args...)
-}
-
-// Notice logs a message using NOTICE as log level.
-func (l *Logger) Notice(format string, args ...interface{}) {
- l.log(NOTICE, format, args...)
-}
-
-// Noticef logs a message using NOTICE as log level.
-func (l *Logger) Noticef(format string, args ...interface{}) {
- l.log(NOTICE, format, args...)
-}
-
-// Info logs a message using INFO as log level.
-func (l *Logger) Info(format string, args ...interface{}) {
- l.log(INFO, format, args...)
-}
-
-// Infof logs a message using INFO as log level.
-func (l *Logger) Infof(format string, args ...interface{}) {
- l.log(INFO, format, args...)
-}
-
-// Debug logs a message using DEBUG as log level.
-func (l *Logger) Debug(format string, args ...interface{}) {
- l.log(DEBUG, format, args...)
-}
-
-// Debugf logs a message using DEBUG as log level.
-func (l *Logger) Debugf(format string, args ...interface{}) {
- l.log(DEBUG, format, args...)
-}
-
-func init() {
- Reset()
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/memory.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/memory.go
deleted file mode 100644
index 8d5152c..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/memory.go
+++ /dev/null
@@ -1,237 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine
-
-package logging
-
-import (
- "sync"
- "sync/atomic"
- "time"
- "unsafe"
-)
-
-// TODO pick one of the memory backends and stick with it or share interface.
-
-// InitForTesting is a convenient method when using logging in a test. Once
-// called, the time will be frozen to January 1, 1970 UTC.
-func InitForTesting(level Level) *MemoryBackend {
- Reset()
-
- memoryBackend := NewMemoryBackend(10240)
-
- leveledBackend := AddModuleLevel(memoryBackend)
- leveledBackend.SetLevel(level, "")
- SetBackend(leveledBackend)
-
- timeNow = func() time.Time {
- return time.Unix(0, 0).UTC()
- }
- return memoryBackend
-}
-
-// Node is a record node pointing to an optional next node.
-type node struct {
- next *node
- Record *Record
-}
-
-// Next returns the next record node. If there's no node available, it will
-// return nil.
-func (n *node) Next() *node {
- return n.next
-}
-
-// MemoryBackend is a simple memory based logging backend that will not produce
-// any output but merly keep records, up to the given size, in memory.
-type MemoryBackend struct {
- size int32
- maxSize int32
- head, tail unsafe.Pointer
-}
-
-// NewMemoryBackend creates a simple in-memory logging backend.
-func NewMemoryBackend(size int) *MemoryBackend {
- return &MemoryBackend{maxSize: int32(size)}
-}
-
-// Log implements the Log method required by Backend.
-func (b *MemoryBackend) Log(level Level, calldepth int, rec *Record) error {
- var size int32
-
- n := &node{Record: rec}
- np := unsafe.Pointer(n)
-
- // Add the record to the tail. If there's no records available, tail and
- // head will both be nil. When we successfully set the tail and the previous
- // value was nil, it's safe to set the head to the current value too.
- for {
- tailp := b.tail
- swapped := atomic.CompareAndSwapPointer(
- &b.tail,
- tailp,
- np,
- )
- if swapped == true {
- if tailp == nil {
- b.head = np
- } else {
- (*node)(tailp).next = n
- }
- size = atomic.AddInt32(&b.size, 1)
- break
- }
- }
-
- // Since one record was added, we might have overflowed the list. Remove
- // a record if that is the case. The size will fluctate a bit, but
- // eventual consistent.
- if b.maxSize > 0 && size > b.maxSize {
- for {
- headp := b.head
- head := (*node)(b.head)
- if head.next == nil {
- break
- }
- swapped := atomic.CompareAndSwapPointer(
- &b.head,
- headp,
- unsafe.Pointer(head.next),
- )
- if swapped == true {
- atomic.AddInt32(&b.size, -1)
- break
- }
- }
- }
- return nil
-}
-
-// Head returns the oldest record node kept in memory. It can be used to
-// iterate over records, one by one, up to the last record.
-//
-// Note: new records can get added while iterating. Hence the number of records
-// iterated over might be larger than the maximum size.
-func (b *MemoryBackend) Head() *node {
- return (*node)(b.head)
-}
-
-type event int
-
-const (
- eventFlush event = iota
- eventStop
-)
-
-// ChannelMemoryBackend is very similar to the MemoryBackend, except that it
-// internally utilizes a channel.
-type ChannelMemoryBackend struct {
- maxSize int
- size int
- incoming chan *Record
- events chan event
- mu sync.Mutex
- running bool
- flushWg sync.WaitGroup
- stopWg sync.WaitGroup
- head, tail *node
-}
-
-// NewChannelMemoryBackend creates a simple in-memory logging backend which
-// utilizes a go channel for communication.
-//
-// Start will automatically be called by this function.
-func NewChannelMemoryBackend(size int) *ChannelMemoryBackend {
- backend := &ChannelMemoryBackend{
- maxSize: size,
- incoming: make(chan *Record, 1024),
- events: make(chan event),
- }
- backend.Start()
- return backend
-}
-
-// Start launches the internal goroutine which starts processing data from the
-// input channel.
-func (b *ChannelMemoryBackend) Start() {
- b.mu.Lock()
- defer b.mu.Unlock()
-
- // Launch the goroutine unless it's already running.
- if b.running != true {
- b.running = true
- b.stopWg.Add(1)
- go b.process()
- }
-}
-
-func (b *ChannelMemoryBackend) process() {
- defer b.stopWg.Done()
- for {
- select {
- case rec := <-b.incoming:
- b.insertRecord(rec)
- case e := <-b.events:
- switch e {
- case eventStop:
- return
- case eventFlush:
- for len(b.incoming) > 0 {
- b.insertRecord(<-b.incoming)
- }
- b.flushWg.Done()
- }
- }
- }
-}
-
-func (b *ChannelMemoryBackend) insertRecord(rec *Record) {
- prev := b.tail
- b.tail = &node{Record: rec}
- if prev == nil {
- b.head = b.tail
- } else {
- prev.next = b.tail
- }
-
- if b.maxSize > 0 && b.size >= b.maxSize {
- b.head = b.head.next
- } else {
- b.size++
- }
-}
-
-// Flush waits until all records in the buffered channel have been processed.
-func (b *ChannelMemoryBackend) Flush() {
- b.flushWg.Add(1)
- b.events <- eventFlush
- b.flushWg.Wait()
-}
-
-// Stop signals the internal goroutine to exit and waits until it have.
-func (b *ChannelMemoryBackend) Stop() {
- b.mu.Lock()
- if b.running == true {
- b.running = false
- b.events <- eventStop
- }
- b.mu.Unlock()
- b.stopWg.Wait()
-}
-
-// Log implements the Log method required by Backend.
-func (b *ChannelMemoryBackend) Log(level Level, calldepth int, rec *Record) error {
- b.incoming <- rec
- return nil
-}
-
-// Head returns the oldest record node kept in memory. It can be used to
-// iterate over records, one by one, up to the last record.
-//
-// Note: new records can get added while iterating. Hence the number of records
-// iterated over might be larger than the maximum size.
-func (b *ChannelMemoryBackend) Head() *node {
- return b.head
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/multi.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/multi.go
deleted file mode 100644
index 3731653..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/multi.go
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logging
-
-// TODO remove Level stuff from the multi logger. Do one thing.
-
-// multiLogger is a log multiplexer which can be used to utilize multiple log
-// backends at once.
-type multiLogger struct {
- backends []LeveledBackend
-}
-
-// MultiLogger creates a logger which contain multiple loggers.
-func MultiLogger(backends ...Backend) LeveledBackend {
- var leveledBackends []LeveledBackend
- for _, backend := range backends {
- leveledBackends = append(leveledBackends, AddModuleLevel(backend))
- }
- return &multiLogger{leveledBackends}
-}
-
-// Log passes the log record to all backends.
-func (b *multiLogger) Log(level Level, calldepth int, rec *Record) (err error) {
- for _, backend := range b.backends {
- if backend.IsEnabledFor(level, rec.Module) {
- // Shallow copy of the record for the formatted cache on Record and get the
- // record formatter from the backend.
- r2 := *rec
- if e := backend.Log(level, calldepth+1, &r2); e != nil {
- err = e
- }
- }
- }
- return
-}
-
-// GetLevel returns the highest level enabled by all backends.
-func (b *multiLogger) GetLevel(module string) Level {
- var level Level
- for _, backend := range b.backends {
- if backendLevel := backend.GetLevel(module); backendLevel > level {
- level = backendLevel
- }
- }
- return level
-}
-
-// SetLevel propagates the same level to all backends.
-func (b *multiLogger) SetLevel(level Level, module string) {
- for _, backend := range b.backends {
- backend.SetLevel(level, module)
- }
-}
-
-// IsEnabledFor returns true if any of the backends are enabled for it.
-func (b *multiLogger) IsEnabledFor(level Level, module string) bool {
- for _, backend := range b.backends {
- if backend.IsEnabledFor(level, module) {
- return true
- }
- }
- return false
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog.go
deleted file mode 100644
index 4faa531..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog.go
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//+build !windows,!plan9
-
-package logging
-
-import "log/syslog"
-
-// SyslogBackend is a simple logger to syslog backend. It automatically maps
-// the internal log levels to appropriate syslog log levels.
-type SyslogBackend struct {
- Writer *syslog.Writer
-}
-
-// NewSyslogBackend connects to the syslog daemon using UNIX sockets with the
-// given prefix. If prefix is not given, the prefix will be derived from the
-// launched command.
-func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) {
- var w *syslog.Writer
- w, err = syslog.New(syslog.LOG_CRIT, prefix)
- return &SyslogBackend{w}, err
-}
-
-// NewSyslogBackendPriority is the same as NewSyslogBackend, but with custom
-// syslog priority, like syslog.LOG_LOCAL3|syslog.LOG_DEBUG etc.
-func NewSyslogBackendPriority(prefix string, priority syslog.Priority) (b *SyslogBackend, err error) {
- var w *syslog.Writer
- w, err = syslog.New(priority, prefix)
- return &SyslogBackend{w}, err
-}
-
-// Log implements the Backend interface.
-func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error {
- line := rec.Formatted(calldepth + 1)
- switch level {
- case CRITICAL:
- return b.Writer.Crit(line)
- case ERROR:
- return b.Writer.Err(line)
- case WARNING:
- return b.Writer.Warning(line)
- case NOTICE:
- return b.Writer.Notice(line)
- case INFO:
- return b.Writer.Info(line)
- case DEBUG:
- return b.Writer.Debug(line)
- default:
- }
- panic("unhandled log level")
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go b/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go
deleted file mode 100644
index 91bc18d..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/github.com/op/go-logging/syslog_fallback.go
+++ /dev/null
@@ -1,28 +0,0 @@
-// Copyright 2013, Örjan Persson. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//+build windows plan9
-
-package logging
-
-import (
- "fmt"
-)
-
-type Priority int
-
-type SyslogBackend struct {
-}
-
-func NewSyslogBackend(prefix string) (b *SyslogBackend, err error) {
- return nil, fmt.Errorf("Platform does not support syslog")
-}
-
-func NewSyslogBackendPriority(prefix string, priority Priority) (b *SyslogBackend, err error) {
- return nil, fmt.Errorf("Platform does not support syslog")
-}
-
-func (b *SyslogBackend) Log(level Level, calldepth int, rec *Record) error {
- return fmt.Errorf("Platform does not support syslog")
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/LICENSE b/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/LICENSE
deleted file mode 100644
index 6a66aea..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/PATENTS b/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/PATENTS
deleted file mode 100644
index 7330990..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/gen.go b/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/gen.go
deleted file mode 100644
index ee2598c..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/gen.go
+++ /dev/null
@@ -1,608 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build ignore
-
-package main
-
-// This program generates table.go and table_test.go.
-// Invoke as:
-//
-// go run gen.go -version "xxx" >table.go
-// go run gen.go -version "xxx" -test >table_test.go
-//
-// The version is derived from information found at
-// https://github.com/publicsuffix/list/commits/master/public_suffix_list.dat
-//
-// To fetch a particular git revision, such as 5c70ccd250, pass
-// -url "https://raw.githubusercontent.com/publicsuffix/list/5c70ccd250/public_suffix_list.dat"
-
-import (
- "bufio"
- "bytes"
- "flag"
- "fmt"
- "go/format"
- "io"
- "net/http"
- "os"
- "regexp"
- "sort"
- "strings"
-
- "golang.org/x/net/idna"
-)
-
-const (
- nodesBitsChildren = 9
- nodesBitsICANN = 1
- nodesBitsTextOffset = 15
- nodesBitsTextLength = 6
-
- childrenBitsWildcard = 1
- childrenBitsNodeType = 2
- childrenBitsHi = 14
- childrenBitsLo = 14
-)
-
-var (
- maxChildren int
- maxTextOffset int
- maxTextLength int
- maxHi uint32
- maxLo uint32
-)
-
-func max(a, b int) int {
- if a < b {
- return b
- }
- return a
-}
-
-func u32max(a, b uint32) uint32 {
- if a < b {
- return b
- }
- return a
-}
-
-const (
- nodeTypeNormal = 0
- nodeTypeException = 1
- nodeTypeParentOnly = 2
- numNodeType = 3
-)
-
-func nodeTypeStr(n int) string {
- switch n {
- case nodeTypeNormal:
- return "+"
- case nodeTypeException:
- return "!"
- case nodeTypeParentOnly:
- return "o"
- }
- panic("unreachable")
-}
-
-var (
- labelEncoding = map[string]uint32{}
- labelsList = []string{}
- labelsMap = map[string]bool{}
- rules = []string{}
-
- // validSuffix is used to check that the entries in the public suffix list
- // are in canonical form (after Punycode encoding). Specifically, capital
- // letters are not allowed.
- validSuffix = regexp.MustCompile(`^[a-z0-9_\!\*\-\.]+$`)
-
- crush = flag.Bool("crush", true, "make the generated node text as small as possible")
- subset = flag.Bool("subset", false, "generate only a subset of the full table, for debugging")
- url = flag.String("url",
- "https://publicsuffix.org/list/effective_tld_names.dat",
- "URL of the publicsuffix.org list. If empty, stdin is read instead")
- v = flag.Bool("v", false, "verbose output (to stderr)")
- version = flag.String("version", "", "the effective_tld_names.dat version")
- test = flag.Bool("test", false, "generate table_test.go")
-)
-
-func main() {
- if err := main1(); err != nil {
- fmt.Fprintln(os.Stderr, err)
- os.Exit(1)
- }
-}
-
-func main1() error {
- flag.Parse()
- if nodesBitsTextLength+nodesBitsTextOffset+nodesBitsICANN+nodesBitsChildren > 32 {
- return fmt.Errorf("not enough bits to encode the nodes table")
- }
- if childrenBitsLo+childrenBitsHi+childrenBitsNodeType+childrenBitsWildcard > 32 {
- return fmt.Errorf("not enough bits to encode the children table")
- }
- if *version == "" {
- return fmt.Errorf("-version was not specified")
- }
- var r io.Reader = os.Stdin
- if *url != "" {
- res, err := http.Get(*url)
- if err != nil {
- return err
- }
- if res.StatusCode != http.StatusOK {
- return fmt.Errorf("bad GET status for %s: %d", *url, res.Status)
- }
- r = res.Body
- defer res.Body.Close()
- }
-
- var root node
- icann := false
- buf := new(bytes.Buffer)
- br := bufio.NewReader(r)
- for {
- s, err := br.ReadString('\n')
- if err != nil {
- if err == io.EOF {
- break
- }
- return err
- }
- s = strings.TrimSpace(s)
- if strings.Contains(s, "BEGIN ICANN DOMAINS") {
- icann = true
- continue
- }
- if strings.Contains(s, "END ICANN DOMAINS") {
- icann = false
- continue
- }
- if s == "" || strings.HasPrefix(s, "//") {
- continue
- }
- s, err = idna.ToASCII(s)
- if err != nil {
- return err
- }
- if !validSuffix.MatchString(s) {
- return fmt.Errorf("bad publicsuffix.org list data: %q", s)
- }
-
- if *subset {
- switch {
- case s == "ac.jp" || strings.HasSuffix(s, ".ac.jp"):
- case s == "ak.us" || strings.HasSuffix(s, ".ak.us"):
- case s == "ao" || strings.HasSuffix(s, ".ao"):
- case s == "ar" || strings.HasSuffix(s, ".ar"):
- case s == "arpa" || strings.HasSuffix(s, ".arpa"):
- case s == "cy" || strings.HasSuffix(s, ".cy"):
- case s == "dyndns.org" || strings.HasSuffix(s, ".dyndns.org"):
- case s == "jp":
- case s == "kobe.jp" || strings.HasSuffix(s, ".kobe.jp"):
- case s == "kyoto.jp" || strings.HasSuffix(s, ".kyoto.jp"):
- case s == "om" || strings.HasSuffix(s, ".om"):
- case s == "uk" || strings.HasSuffix(s, ".uk"):
- case s == "uk.com" || strings.HasSuffix(s, ".uk.com"):
- case s == "tw" || strings.HasSuffix(s, ".tw"):
- case s == "zw" || strings.HasSuffix(s, ".zw"):
- case s == "xn--p1ai" || strings.HasSuffix(s, ".xn--p1ai"):
- // xn--p1ai is Russian-Cyrillic "рф".
- default:
- continue
- }
- }
-
- rules = append(rules, s)
-
- nt, wildcard := nodeTypeNormal, false
- switch {
- case strings.HasPrefix(s, "*."):
- s, nt = s[2:], nodeTypeParentOnly
- wildcard = true
- case strings.HasPrefix(s, "!"):
- s, nt = s[1:], nodeTypeException
- }
- labels := strings.Split(s, ".")
- for n, i := &root, len(labels)-1; i >= 0; i-- {
- label := labels[i]
- n = n.child(label)
- if i == 0 {
- if nt != nodeTypeParentOnly && n.nodeType == nodeTypeParentOnly {
- n.nodeType = nt
- }
- n.icann = n.icann && icann
- n.wildcard = n.wildcard || wildcard
- }
- labelsMap[label] = true
- }
- }
- labelsList = make([]string, 0, len(labelsMap))
- for label := range labelsMap {
- labelsList = append(labelsList, label)
- }
- sort.Strings(labelsList)
-
- p := printReal
- if *test {
- p = printTest
- }
- if err := p(buf, &root); err != nil {
- return err
- }
-
- b, err := format.Source(buf.Bytes())
- if err != nil {
- return err
- }
- _, err = os.Stdout.Write(b)
- return err
-}
-
-func printTest(w io.Writer, n *node) error {
- fmt.Fprintf(w, "// generated by go run gen.go; DO NOT EDIT\n\n")
- fmt.Fprintf(w, "package publicsuffix\n\nvar rules = [...]string{\n")
- for _, rule := range rules {
- fmt.Fprintf(w, "%q,\n", rule)
- }
- fmt.Fprintf(w, "}\n\nvar nodeLabels = [...]string{\n")
- if err := n.walk(w, printNodeLabel); err != nil {
- return err
- }
- fmt.Fprintf(w, "}\n")
- return nil
-}
-
-func printReal(w io.Writer, n *node) error {
- const header = `// generated by go run gen.go; DO NOT EDIT
-
-package publicsuffix
-
-const version = %q
-
-const (
- nodesBitsChildren = %d
- nodesBitsICANN = %d
- nodesBitsTextOffset = %d
- nodesBitsTextLength = %d
-
- childrenBitsWildcard = %d
- childrenBitsNodeType = %d
- childrenBitsHi = %d
- childrenBitsLo = %d
-)
-
-const (
- nodeTypeNormal = %d
- nodeTypeException = %d
- nodeTypeParentOnly = %d
-)
-
-// numTLD is the number of top level domains.
-const numTLD = %d
-
-`
- fmt.Fprintf(w, header, *version,
- nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength,
- childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo,
- nodeTypeNormal, nodeTypeException, nodeTypeParentOnly, len(n.children))
-
- text := makeText()
- if text == "" {
- return fmt.Errorf("internal error: makeText returned no text")
- }
- for _, label := range labelsList {
- offset, length := strings.Index(text, label), len(label)
- if offset < 0 {
- return fmt.Errorf("internal error: could not find %q in text %q", label, text)
- }
- maxTextOffset, maxTextLength = max(maxTextOffset, offset), max(maxTextLength, length)
- if offset >= 1<= 1< 64 {
- n, plus = 64, " +"
- }
- fmt.Fprintf(w, "%q%s\n", text[:n], plus)
- text = text[n:]
- }
-
- n.walk(w, assignIndexes)
-
- fmt.Fprintf(w, `
-
-// nodes is the list of nodes. Each node is represented as a uint32, which
-// encodes the node's children, wildcard bit and node type (as an index into
-// the children array), ICANN bit and text.
-//
-// In the //-comment after each node's data, the nodes indexes of the children
-// are formatted as (n0x1234-n0x1256), with * denoting the wildcard bit. The
-// nodeType is printed as + for normal, ! for exception, and o for parent-only
-// nodes that have children but don't match a domain label in their own right.
-// An I denotes an ICANN domain.
-//
-// The layout within the uint32, from MSB to LSB, is:
-// [%2d bits] unused
-// [%2d bits] children index
-// [%2d bits] ICANN bit
-// [%2d bits] text index
-// [%2d bits] text length
-var nodes = [...]uint32{
-`,
- 32-nodesBitsChildren-nodesBitsICANN-nodesBitsTextOffset-nodesBitsTextLength,
- nodesBitsChildren, nodesBitsICANN, nodesBitsTextOffset, nodesBitsTextLength)
- if err := n.walk(w, printNode); err != nil {
- return err
- }
- fmt.Fprintf(w, `}
-
-// children is the list of nodes' children, the parent's wildcard bit and the
-// parent's node type. If a node has no children then their children index
-// will be in the range [0, 6), depending on the wildcard bit and node type.
-//
-// The layout within the uint32, from MSB to LSB, is:
-// [%2d bits] unused
-// [%2d bits] wildcard bit
-// [%2d bits] node type
-// [%2d bits] high nodes index (exclusive) of children
-// [%2d bits] low nodes index (inclusive) of children
-var children=[...]uint32{
-`,
- 32-childrenBitsWildcard-childrenBitsNodeType-childrenBitsHi-childrenBitsLo,
- childrenBitsWildcard, childrenBitsNodeType, childrenBitsHi, childrenBitsLo)
- for i, c := range childrenEncoding {
- s := "---------------"
- lo := c & (1<> childrenBitsLo) & (1<>(childrenBitsLo+childrenBitsHi)) & (1<>(childrenBitsLo+childrenBitsHi+childrenBitsNodeType) != 0
- fmt.Fprintf(w, "0x%08x, // c0x%04x (%s)%s %s\n",
- c, i, s, wildcardStr(wildcard), nodeTypeStr(nodeType))
- }
- fmt.Fprintf(w, "}\n\n")
- fmt.Fprintf(w, "// max children %d (capacity %d)\n", maxChildren, 1<= 1<= 1<= 1< 0 && ss[0] == "" {
- ss = ss[1:]
- }
-
- // Join strings where one suffix matches another prefix.
- for {
- // Find best i, j, k such that ss[i][len-k:] == ss[j][:k],
- // maximizing overlap length k.
- besti := -1
- bestj := -1
- bestk := 0
- for i, s := range ss {
- if s == "" {
- continue
- }
- for j, t := range ss {
- if i == j {
- continue
- }
- for k := bestk + 1; k <= len(s) && k <= len(t); k++ {
- if s[len(s)-k:] == t[:k] {
- besti = i
- bestj = j
- bestk = k
- }
- }
- }
- }
- if bestk > 0 {
- if *v {
- fmt.Fprintf(os.Stderr, "%d-length overlap at (%4d,%4d) out of (%4d,%4d): %q and %q\n",
- bestk, besti, bestj, len(ss), len(ss), ss[besti], ss[bestj])
- }
- ss[besti] += ss[bestj][bestk:]
- ss[bestj] = ""
- continue
- }
- break
- }
-
- text := strings.Join(ss, "")
- if *v {
- fmt.Fprintf(os.Stderr, "crushed %d bytes to become %d bytes\n", beforeLength, len(text))
- }
- return text
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/list.go b/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/list.go
deleted file mode 100644
index 5e41a65..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/golang.org/x/net/publicsuffix/list.go
+++ /dev/null
@@ -1,133 +0,0 @@
-// Copyright 2012 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package publicsuffix provides a public suffix list based on data from
-// http://publicsuffix.org/. A public suffix is one under which Internet users
-// can directly register names.
-package publicsuffix
-
-// TODO: specify case sensitivity and leading/trailing dot behavior for
-// func PublicSuffix and func EffectiveTLDPlusOne.
-
-import (
- "fmt"
- "net/http/cookiejar"
- "strings"
-)
-
-// List implements the cookiejar.PublicSuffixList interface by calling the
-// PublicSuffix function.
-var List cookiejar.PublicSuffixList = list{}
-
-type list struct{}
-
-func (list) PublicSuffix(domain string) string {
- ps, _ := PublicSuffix(domain)
- return ps
-}
-
-func (list) String() string {
- return version
-}
-
-// PublicSuffix returns the public suffix of the domain using a copy of the
-// publicsuffix.org database compiled into the library.
-//
-// icann is whether the public suffix is managed by the Internet Corporation
-// for Assigned Names and Numbers. If not, the public suffix is privately
-// managed. For example, foo.org and foo.co.uk are ICANN domains,
-// foo.dyndns.org and foo.blogspot.co.uk are private domains.
-//
-// Use cases for distinguishing ICANN domains like foo.com from private
-// domains like foo.appspot.com can be found at
-// https://wiki.mozilla.org/Public_Suffix_List/Use_Cases
-func PublicSuffix(domain string) (publicSuffix string, icann bool) {
- lo, hi := uint32(0), uint32(numTLD)
- s, suffix, wildcard := domain, len(domain), false
-loop:
- for {
- dot := strings.LastIndex(s, ".")
- if wildcard {
- suffix = 1 + dot
- }
- if lo == hi {
- break
- }
- f := find(s[1+dot:], lo, hi)
- if f == notFound {
- break
- }
-
- u := nodes[f] >> (nodesBitsTextOffset + nodesBitsTextLength)
- icann = u&(1<>= nodesBitsICANN
- u = children[u&(1<>= childrenBitsLo
- hi = u & (1<>= childrenBitsHi
- switch u & (1<>= childrenBitsNodeType
- wildcard = u&(1<>= nodesBitsTextLength
- offset := x & (1<
-
-- [Overview](#overview)
-- [Features](#features)
-- [User-visible changes between v1 and v2](#user-visible-changes-between-v1-and-v2)
- - [Flags can be used at any point after their definition.](#flags-can-be-used-at-any-point-after-their-definition)
- - [Short flags can be combined with their parameters](#short-flags-can-be-combined-with-their-parameters)
-- [API changes between v1 and v2](#api-changes-between-v1-and-v2)
-- [Versions](#versions)
- - [V2 is the current stable version](#v2-is-the-current-stable-version)
- - [V1 is the OLD stable version](#v1-is-the-old-stable-version)
-- [Change History](#change-history)
-- [Examples](#examples)
- - [Simple Example](#simple-example)
- - [Complex Example](#complex-example)
-- [Reference Documentation](#reference-documentation)
- - [Displaying errors and usage information](#displaying-errors-and-usage-information)
- - [Sub-commands](#sub-commands)
- - [Custom Parsers](#custom-parsers)
- - [Repeatable flags](#repeatable-flags)
- - [Boolean Values](#boolean-values)
- - [Default Values](#default-values)
- - [Place-holders in Help](#place-holders-in-help)
- - [Consuming all remaining arguments](#consuming-all-remaining-arguments)
- - [Supporting -h for help](#supporting--h-for-help)
- - [Custom help](#custom-help)
-
-
-
-## Overview
-
-Kingpin is a [fluent-style](http://en.wikipedia.org/wiki/Fluent_interface),
-type-safe command-line parser. It supports flags, nested commands, and
-positional arguments.
-
-Install it with:
-
- $ go get gopkg.in/alecthomas/kingpin.v2
-
-It looks like this:
-
-```go
-var (
- verbose = kingpin.Flag("verbose", "Verbose mode.").Short('v').Bool()
- name = kingpin.Arg("name", "Name of user.").Required().String()
-)
-
-func main() {
- kingpin.Parse()
- fmt.Printf("%v, %s\n", *verbose, *name)
-}
-```
-
-More [examples](https://github.com/alecthomas/kingpin/tree/master/examples) are available.
-
-Second to parsing, providing the user with useful help is probably the most
-important thing a command-line parser does. Kingpin tries to provide detailed
-contextual help if `--help` is encountered at any point in the command line
-(excluding after `--`).
-
-## Features
-
-- Help output that isn't as ugly as sin.
-- Fully [customisable help](#custom-help), via Go templates.
-- Parsed, type-safe flags (`kingpin.Flag("f", "help").Int()`)
-- Parsed, type-safe positional arguments (`kingpin.Arg("a", "help").Int()`).
-- Parsed, type-safe, arbitrarily deep commands (`kingpin.Command("c", "help")`).
-- Support for required flags and required positional arguments (`kingpin.Flag("f", "").Required().Int()`).
-- Support for arbitrarily nested default commands (`command.Default()`).
-- Callbacks per command, flag and argument (`kingpin.Command("c", "").Action(myAction)`).
-- POSIX-style short flag combining (`-a -b` -> `-ab`).
-- Short-flag+parameter combining (`-a parm` -> `-aparm`).
-- Read command-line from files (`@`).
-- Automatically generate man pages (`--man-page`).
-
-## User-visible changes between v1 and v2
-
-### Flags can be used at any point after their definition.
-
-Flags can be specified at any point after their definition, not just
-*immediately after their associated command*. From the chat example below, the
-following used to be required:
-
-```
-$ chat --server=chat.server.com:8080 post --image=~/Downloads/owls.jpg pics
-```
-
-But the following will now work:
-
-```
-$ chat post --server=chat.server.com:8080 --image=~/Downloads/owls.jpg pics
-```
-
-### Short flags can be combined with their parameters
-
-Previously, if a short flag was used, any argument to that flag would have to
-be separated by a space. That is no longer the case.
-
-## API changes between v1 and v2
-
-- `ParseWithFileExpansion()` is gone. The new parser directly supports expanding `@`.
-- Added `FatalUsage()` and `FatalUsageContext()` for displaying an error + usage and terminating.
-- `Dispatch()` renamed to `Action()`.
-- Added `ParseContext()` for parsing a command line into its intermediate context form without executing.
-- Added `Terminate()` function to override the termination function.
-- Added `UsageForContextWithTemplate()` for printing usage via a custom template.
-- Added `UsageTemplate()` for overriding the default template to use. Two templates are included:
- 1. `DefaultUsageTemplate` - default template.
- 2. `CompactUsageTemplate` - compact command template for larger applications.
-
-## Versions
-
-Kingpin uses [gopkg.in](https://gopkg.in/alecthomas/kingpin) for versioning.
-
-The current stable version is [gopkg.in/alecthomas/kingpin.v2](https://gopkg.in/alecthomas/kingpin.v2). The previous version, [gopkg.in/alecthomas/kingpin.v1](https://gopkg.in/alecthomas/kingpin.v1), is deprecated and in maintenance mode.
-
-### [V2](https://gopkg.in/alecthomas/kingpin.v2) is the current stable version
-
-Installation:
-
-```sh
-$ go get gopkg.in/alecthomas/kingpin.v2
-```
-
-### [V1](https://gopkg.in/alecthomas/kingpin.v1) is the OLD stable version
-
-Installation:
-
-```sh
-$ go get gopkg.in/alecthomas/kingpin.v1
-```
-
-## Change History
-
-- *2015-09-19* -- Stable v2.1.0 release.
- - Added `command.Default()` to specify a default command to use if no other
- command matches. This allows for convenient user shortcuts.
- - Exposed `HelpFlag` and `VersionFlag` for further cusomisation.
- - `Action()` and `PreAction()` added and both now support an arbitrary
- number of callbacks.
- - `kingpin.SeparateOptionalFlagsUsageTemplate`.
- - `--help-long` and `--help-man` (hidden by default) flags.
- - Flags are "interspersed" by default, but can be disabled with `app.Interspersed(false)`.
- - Added flags for all simple builtin types (int8, uint16, etc.) and slice variants.
- - Use `app.Writer(os.Writer)` to specify the default writer for all output functions.
- - Dropped `os.Writer` prefix from all printf-like functions.
-
-- *2015-05-22* -- Stable v2.0.0 release.
- - Initial stable release of v2.0.0.
- - Fully supports interspersed flags, commands and arguments.
- - Flags can be present at any point after their logical definition.
- - Application.Parse() terminates if commands are present and a command is not parsed.
- - Dispatch() -> Action().
- - Actions are dispatched after all values are populated.
- - Override termination function (defaults to os.Exit).
- - Override output stream (defaults to os.Stderr).
- - Templatised usage help, with default and compact templates.
- - Make error/usage functions more consistent.
- - Support argument expansion from files by default (with @).
- - Fully public data model is available via .Model().
- - Parser has been completely refactored.
- - Parsing and execution has been split into distinct stages.
- - Use `go generate` to generate repeated flags.
- - Support combined short-flag+argument: -fARG.
-
-- *2015-01-23* -- Stable v1.3.4 release.
- - Support "--" for separating flags from positional arguments.
- - Support loading flags from files (ParseWithFileExpansion()). Use @FILE as an argument.
- - Add post-app and post-cmd validation hooks. This allows arbitrary validation to be added.
- - A bunch of improvements to help usage and formatting.
- - Support arbitrarily nested sub-commands.
-
-- *2014-07-08* -- Stable v1.2.0 release.
- - Pass any value through to `Strings()` when final argument.
- Allows for values that look like flags to be processed.
- - Allow `--help` to be used with commands.
- - Support `Hidden()` flags.
- - Parser for [units.Base2Bytes](https://github.com/alecthomas/units)
- type. Allows for flags like `--ram=512MB` or `--ram=1GB`.
- - Add an `Enum()` value, allowing only one of a set of values
- to be selected. eg. `Flag(...).Enum("debug", "info", "warning")`.
-
-- *2014-06-27* -- Stable v1.1.0 release.
- - Bug fixes.
- - Always return an error (rather than panicing) when misconfigured.
- - `OpenFile(flag, perm)` value type added, for finer control over opening files.
- - Significantly improved usage formatting.
-
-- *2014-06-19* -- Stable v1.0.0 release.
- - Support [cumulative positional](#consuming-all-remaining-arguments) arguments.
- - Return error rather than panic when there are fatal errors not caught by
- the type system. eg. when a default value is invalid.
- - Use gokpg.in.
-
-- *2014-06-10* -- Place-holder streamlining.
- - Renamed `MetaVar` to `PlaceHolder`.
- - Removed `MetaVarFromDefault`. Kingpin now uses [heuristics](#place-holders-in-help)
- to determine what to display.
-
-## Examples
-
-### Simple Example
-
-Kingpin can be used for simple flag+arg applications like so:
-
-```
-$ ping --help
-usage: ping [] []
-
-Flags:
- --debug Enable debug mode.
- --help Show help.
- -t, --timeout=5s Timeout waiting for ping.
-
-Args:
- IP address to ping.
- [] Number of packets to send
-$ ping 1.2.3.4 5
-Would ping: 1.2.3.4 with timeout 5s and count 0
-```
-
-From the following source:
-
-```go
-package main
-
-import (
- "fmt"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- debug = kingpin.Flag("debug", "Enable debug mode.").Bool()
- timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").OverrideDefaultFromEnvar("PING_TIMEOUT").Short('t').Duration()
- ip = kingpin.Arg("ip", "IP address to ping.").Required().IP()
- count = kingpin.Arg("count", "Number of packets to send").Int()
-)
-
-func main() {
- kingpin.Version("0.0.1")
- kingpin.Parse()
- fmt.Printf("Would ping: %s with timeout %s and count %d", *ip, *timeout, *count)
-}
-```
-
-### Complex Example
-
-Kingpin can also produce complex command-line applications with global flags,
-subcommands, and per-subcommand flags, like this:
-
-```
-$ chat --help
-usage: chat [] [] [ ...]
-
-A command-line chat application.
-
-Flags:
- --help Show help.
- --debug Enable debug mode.
- --server=127.0.0.1 Server address.
-
-Commands:
- help []
- Show help for a command.
-
- register
- Register a new user.
-
- post [] []
- Post a message to a channel.
-
-$ chat help post
-usage: chat [] post [] []
-
-Post a message to a channel.
-
-Flags:
- --image=IMAGE Image to post.
-
-Args:
- Channel to post to.
- [] Text to post.
-
-$ chat post --image=~/Downloads/owls.jpg pics
-...
-```
-
-From this code:
-
-```go
-package main
-
-import (
- "os"
- "strings"
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- app = kingpin.New("chat", "A command-line chat application.")
- debug = app.Flag("debug", "Enable debug mode.").Bool()
- serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP()
-
- register = app.Command("register", "Register a new user.")
- registerNick = register.Arg("nick", "Nickname for user.").Required().String()
- registerName = register.Arg("name", "Name of user.").Required().String()
-
- post = app.Command("post", "Post a message to a channel.")
- postImage = post.Flag("image", "Image to post.").File()
- postChannel = post.Arg("channel", "Channel to post to.").Required().String()
- postText = post.Arg("text", "Text to post.").Strings()
-)
-
-func main() {
- switch kingpin.MustParse(app.Parse(os.Args[1:])) {
- // Register user
- case register.FullCommand():
- println(*registerNick)
-
- // Post message
- case post.FullCommand():
- if *postImage != nil {
- }
- text := strings.Join(*postText, " ")
- println("Post:", text)
- }
-}
-```
-
-## Reference Documentation
-
-### Displaying errors and usage information
-
-Kingpin exports a set of functions to provide consistent errors and usage
-information to the user.
-
-Error messages look something like this:
-
- : error:
-
-The functions on `Application` are:
-
-Function | Purpose
----------|--------------
-`Errorf(format, args)` | Display a printf formatted error to the user.
-`Fatalf(format, args)` | As with Errorf, but also call the termination handler.
-`FatalUsage(format, args)` | As with Fatalf, but also print contextual usage information.
-`FatalUsageContext(context, format, args)` | As with Fatalf, but also print contextual usage information from a `ParseContext`.
-`FatalIfError(err, format, args)` | Conditionally print an error prefixed with format+args, then call the termination handler
-
-There are equivalent global functions in the kingpin namespace for the default
-`kingpin.CommandLine` instance.
-
-### Sub-commands
-
-Kingpin supports nested sub-commands, with separate flag and positional
-arguments per sub-command. Note that positional arguments may only occur after
-sub-commands.
-
-For example:
-
-```go
-var (
- deleteCommand = kingpin.Command("delete", "Delete an object.")
- deleteUserCommand = deleteCommand.Command("user", "Delete a user.")
- deleteUserUIDFlag = deleteUserCommand.Flag("uid", "Delete user by UID rather than username.")
- deleteUserUsername = deleteUserCommand.Arg("username", "Username to delete.")
- deletePostCommand = deleteCommand.Command("post", "Delete a post.")
-)
-
-func main() {
- switch kingpin.Parse() {
- case "delete user":
- case "delete post":
- }
-}
-```
-
-### Custom Parsers
-
-Kingpin supports both flag and positional argument parsers for converting to
-Go types. For example, some included parsers are `Int()`, `Float()`,
-`Duration()` and `ExistingFile()`.
-
-Parsers conform to Go's [`flag.Value`](http://godoc.org/flag#Value)
-interface, so any existing implementations will work.
-
-For example, a parser for accumulating HTTP header values might look like this:
-
-```go
-type HTTPHeaderValue http.Header
-
-func (h *HTTPHeaderValue) Set(value string) error {
- parts := strings.SplitN(value, ":", 2)
- if len(parts) != 2 {
- return fmt.Errorf("expected HEADER:VALUE got '%s'", value)
- }
- (*http.Header)(h).Add(parts[0], parts[1])
- return nil
-}
-
-func (h *HTTPHeaderValue) String() string {
- return ""
-}
-```
-
-As a convenience, I would recommend something like this:
-
-```go
-func HTTPHeader(s Settings) (target *http.Header) {
- target = new(http.Header)
- s.SetValue((*HTTPHeaderValue)(target))
- return
-}
-```
-
-You would use it like so:
-
-```go
-headers = HTTPHeader(kingpin.Flag("header", "Add a HTTP header to the request.").Short('H'))
-```
-
-### Repeatable flags
-
-Depending on the `Value` they hold, some flags may be repeated. The
-`IsCumulative() bool` function on `Value` tells if it's safe to call `Set()`
-multiple times or if an error should be raised if several values are passed.
-
-The built-in `Value`s returning slices and maps, as well as `Counter` are
-examples of `Value`s that make a flag repeatable.
-
-### Boolean values
-
-Boolean values are uniquely managed by Kingpin. Each boolean flag will have a negative complement:
-`--` and `--no-`.
-
-### Default Values
-
-The default value is the zero value for a type. This can be overridden with
-the `Default(value...)` function on flags and arguments. This function accepts
-one or several strings, which are parsed by the value itself, so they *must*
-be compliant with the format expected.
-
-### Place-holders in Help
-
-The place-holder value for a flag is the value used in the help to describe
-the value of a non-boolean flag.
-
-The value provided to PlaceHolder() is used if provided, then the value
-provided by Default() if provided, then finally the capitalised flag name is
-used.
-
-Here are some examples of flags with various permutations:
-
- --name=NAME // Flag(...).String()
- --name="Harry" // Flag(...).Default("Harry").String()
- --name=FULL-NAME // flag(...).PlaceHolder("FULL-NAME").Default("Harry").String()
-
-### Consuming all remaining arguments
-
-A common command-line idiom is to use all remaining arguments for some
-purpose. eg. The following command accepts an arbitrary number of
-IP addresses as positional arguments:
-
- ./cmd ping 10.1.1.1 192.168.1.1
-
-Such arguments are similar to [repeatable flags](#repeatable-flags), but for
-arguments. Therefore they use the same `IsCumulative() bool` function on the
-underlying `Value`, so the built-in `Value`s for which the `Set()` function
-can be called several times will consume multiple arguments.
-
-To implement the above example with a custom `Value`, we might do something
-like this:
-
-```go
-type ipList []net.IP
-
-func (i *ipList) Set(value string) error {
- if ip := net.ParseIP(value); ip == nil {
- return fmt.Errorf("'%s' is not an IP address", value)
- } else {
- *i = append(*i, ip)
- return nil
- }
-}
-
-func (i *ipList) String() string {
- return ""
-}
-
-func (i *ipList) IsCumulative() bool {
- return true
-}
-
-func IPList(s Settings) (target *[]net.IP) {
- target = new([]net.IP)
- s.SetValue((*ipList)(target))
- return
-}
-```
-
-And use it like so:
-
-```go
-ips := IPList(kingpin.Arg("ips", "IP addresses to ping."))
-```
-
-### Supporting -h for help
-
-`kingpin.CommandLine.HelpFlag.Short('h')`
-
-### Custom help
-
-Kingpin v2 supports templatised help using the text/template library (actually, [a fork](https://github.com/alecthomas/template)).
-
-You can specify the template to use with the [Application.UsageTemplate()](http://godoc.org/gopkg.in/alecthomas/kingpin.v2#Application.UsageTemplate) function.
-
-There are four included templates: `kingpin.DefaultUsageTemplate` is the default,
-`kingpin.CompactUsageTemplate` provides a more compact representation for more complex command-line structures,
-`kingpin.SeparateOptionalFlagsUsageTemplate` looks like the default template, but splits required
-and optional command flags into separate lists, and `kingpin.ManPageTemplate` is used to generate man pages.
-
-See the above templates for examples of usage, and the the function [UsageForContextWithTemplate()](https://github.com/alecthomas/kingpin/blob/master/usage.go#L198) method for details on the context.
-
-#### Default help template
-
-```
-$ go run ./examples/curl/curl.go --help
-usage: curl [] [ ...]
-
-An example implementation of curl.
-
-Flags:
- --help Show help.
- -t, --timeout=5s Set connection timeout.
- -H, --headers=HEADER=VALUE
- Add HTTP headers to the request.
-
-Commands:
- help [...]
- Show help.
-
- get url
- Retrieve a URL.
-
- get file
- Retrieve a file.
-
- post []
- POST a resource.
-```
-
-#### Compact help template
-
-```
-$ go run ./examples/curl/curl.go --help
-usage: curl [] [ ...]
-
-An example implementation of curl.
-
-Flags:
- --help Show help.
- -t, --timeout=5s Set connection timeout.
- -H, --headers=HEADER=VALUE
- Add HTTP headers to the request.
-
-Commands:
- help [...]
- get []
- url
- file
- post []
-```
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/actions.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/actions.go
deleted file mode 100644
index 72d6cbd..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/actions.go
+++ /dev/null
@@ -1,42 +0,0 @@
-package kingpin
-
-// Action callback executed at various stages after all values are populated.
-// The application, commands, arguments and flags all have corresponding
-// actions.
-type Action func(*ParseContext) error
-
-type actionMixin struct {
- actions []Action
- preActions []Action
-}
-
-type actionApplier interface {
- applyActions(*ParseContext) error
- applyPreActions(*ParseContext) error
-}
-
-func (a *actionMixin) addAction(action Action) {
- a.actions = append(a.actions, action)
-}
-
-func (a *actionMixin) addPreAction(action Action) {
- a.preActions = append(a.preActions, action)
-}
-
-func (a *actionMixin) applyActions(context *ParseContext) error {
- for _, action := range a.actions {
- if err := action(context); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *actionMixin) applyPreActions(context *ParseContext) error {
- for _, preAction := range a.preActions {
- if err := preAction(context); err != nil {
- return err
- }
- }
- return nil
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/app.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/app.go
deleted file mode 100644
index 2e2c5a0..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/app.go
+++ /dev/null
@@ -1,569 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "io"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- ErrCommandNotSpecified = fmt.Errorf("command not specified")
-)
-
-var (
- envarTransformRegexp = regexp.MustCompile(`[^a-zA-Z_]+`)
-)
-
-type ApplicationValidator func(*Application) error
-
-// An Application contains the definitions of flags, arguments and commands
-// for an application.
-type Application struct {
- *flagGroup
- *argGroup
- *cmdGroup
- actionMixin
- initialized bool
-
- Name string
- Help string
-
- author string
- version string
- writer io.Writer // Destination for usage and errors.
- usageTemplate string
- validator ApplicationValidator
- terminate func(status int) // See Terminate()
- noInterspersed bool // can flags be interspersed with args (or must they come first)
- defaultEnvars bool
-
- // Help flag. Exposed for user customisation.
- HelpFlag *FlagClause
- // Help command. Exposed for user customisation. May be nil.
- HelpCommand *CmdClause
- // Version flag. Exposed for user customisation. May be nil.
- VersionFlag *FlagClause
-}
-
-// New creates a new Kingpin application instance.
-func New(name, help string) *Application {
- a := &Application{
- flagGroup: newFlagGroup(),
- argGroup: newArgGroup(),
- Name: name,
- Help: help,
- writer: os.Stderr,
- usageTemplate: DefaultUsageTemplate,
- terminate: os.Exit,
- }
- a.cmdGroup = newCmdGroup(a)
- a.HelpFlag = a.Flag("help", "Show context-sensitive help (also try --help-long and --help-man).")
- a.HelpFlag.Bool()
- a.Flag("help-long", "Generate long help.").Hidden().PreAction(a.generateLongHelp).Bool()
- a.Flag("help-man", "Generate a man page.").Hidden().PreAction(a.generateManPage).Bool()
- return a
-}
-
-func (a *Application) generateLongHelp(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, LongHelpTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-func (a *Application) generateManPage(c *ParseContext) error {
- a.Writer(os.Stdout)
- if err := a.UsageForContextWithTemplate(c, 2, ManPageTemplate); err != nil {
- return err
- }
- a.terminate(0)
- return nil
-}
-
-// DefaultEnvars configures all flags (that do not already have an associated
-// envar) to use a default environment variable in the form "_".
-//
-// For example, if the application is named "foo" and a flag is named "bar-
-// waz" the environment variable: "FOO_BAR_WAZ".
-func (a *Application) DefaultEnvars() *Application {
- a.defaultEnvars = true
- return a
-}
-
-// Terminate specifies the termination handler. Defaults to os.Exit(status).
-// If nil is passed, a no-op function will be used.
-func (a *Application) Terminate(terminate func(int)) *Application {
- if terminate == nil {
- terminate = func(int) {}
- }
- a.terminate = terminate
- return a
-}
-
-// Specify the writer to use for usage and errors. Defaults to os.Stderr.
-func (a *Application) Writer(w io.Writer) *Application {
- a.writer = w
- return a
-}
-
-// UsageTemplate specifies the text template to use when displaying usage
-// information. The default is UsageTemplate.
-func (a *Application) UsageTemplate(template string) *Application {
- a.usageTemplate = template
- return a
-}
-
-// Validate sets a validation function to run when parsing.
-func (a *Application) Validate(validator ApplicationValidator) *Application {
- a.validator = validator
- return a
-}
-
-// ParseContext parses the given command line and returns the fully populated
-// ParseContext.
-func (a *Application) ParseContext(args []string) (*ParseContext, error) {
- return a.parseContext(false, args)
-}
-
-func (a *Application) parseContext(ignoreDefault bool, args []string) (*ParseContext, error) {
- if err := a.init(); err != nil {
- return nil, err
- }
- context := tokenize(args, ignoreDefault)
- err := parse(context, a)
- return context, err
-}
-
-// Parse parses command-line arguments. It returns the selected command and an
-// error. The selected command will be a space separated subcommand, if
-// subcommands have been configured.
-//
-// This will populate all flag and argument values, call all callbacks, and so
-// on.
-func (a *Application) Parse(args []string) (command string, err error) {
- context, err := a.ParseContext(args)
- if err != nil {
- return "", err
- }
- a.maybeHelp(context)
- if !context.EOL() {
- return "", fmt.Errorf("unexpected argument '%s'", context.Peek())
- }
- command, err = a.execute(context)
- if err == ErrCommandNotSpecified {
- a.writeUsage(context, nil)
- }
- return command, err
-}
-
-func (a *Application) writeUsage(context *ParseContext, err error) {
- if err != nil {
- a.Errorf("%s", err)
- }
- if err := a.UsageForContext(context); err != nil {
- panic(err)
- }
- a.terminate(1)
-}
-
-func (a *Application) maybeHelp(context *ParseContext) {
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok && flag == a.HelpFlag {
- a.writeUsage(context, nil)
- }
- }
-}
-
-// findCommandFromArgs finds a command (if any) from the given command line arguments.
-func (a *Application) findCommandFromArgs(args []string) (command string, err error) {
- if err := a.init(); err != nil {
- return "", err
- }
- context := tokenize(args, false)
- if _, err := a.parse(context); err != nil {
- return "", err
- }
- return a.findCommandFromContext(context), nil
-}
-
-// findCommandFromContext finds a command (if any) from a parsed context.
-func (a *Application) findCommandFromContext(context *ParseContext) string {
- commands := []string{}
- for _, element := range context.Elements {
- if c, ok := element.Clause.(*CmdClause); ok {
- commands = append(commands, c.name)
- }
- }
- return strings.Join(commands, " ")
-}
-
-// Version adds a --version flag for displaying the application version.
-func (a *Application) Version(version string) *Application {
- a.version = version
- a.VersionFlag = a.Flag("version", "Show application version.").PreAction(func(*ParseContext) error {
- fmt.Fprintln(a.writer, version)
- a.terminate(0)
- return nil
- })
- a.VersionFlag.Bool()
- return a
-}
-
-func (a *Application) Author(author string) *Application {
- a.author = author
- return a
-}
-
-// Action callback to call when all values are populated and parsing is
-// complete, but before any command, flag or argument actions.
-//
-// All Action() callbacks are called in the order they are encountered on the
-// command line.
-func (a *Application) Action(action Action) *Application {
- a.addAction(action)
- return a
-}
-
-// Action called after parsing completes but before validation and execution.
-func (a *Application) PreAction(action Action) *Application {
- a.addPreAction(action)
- return a
-}
-
-// Command adds a new top-level command.
-func (a *Application) Command(name, help string) *CmdClause {
- return a.addCommand(name, help)
-}
-
-// Interspersed control if flags can be interspersed with positional arguments
-//
-// true (the default) means that they can, false means that all the flags must appear before the first positional arguments.
-func (a *Application) Interspersed(interspersed bool) *Application {
- a.noInterspersed = !interspersed
- return a
-}
-
-func (a *Application) defaultEnvarPrefix() string {
- if a.defaultEnvars {
- return a.Name
- }
- return ""
-}
-
-func (a *Application) init() error {
- if a.initialized {
- return nil
- }
- if a.cmdGroup.have() && a.argGroup.have() {
- return fmt.Errorf("can't mix top-level Arg()s with Command()s")
- }
-
- // If we have subcommands, add a help command at the top-level.
- if a.cmdGroup.have() {
- var command []string
- a.HelpCommand = a.Command("help", "Show help.").PreAction(func(context *ParseContext) error {
- a.Usage(command)
- a.terminate(0)
- return nil
- })
- a.HelpCommand.Arg("command", "Show help on command.").StringsVar(&command)
- // Make help first command.
- l := len(a.commandOrder)
- a.commandOrder = append(a.commandOrder[l-1:l], a.commandOrder[:l-1]...)
- }
-
- if err := a.flagGroup.init(a.defaultEnvarPrefix()); err != nil {
- return err
- }
- if err := a.cmdGroup.init(); err != nil {
- return err
- }
- if err := a.argGroup.init(); err != nil {
- return err
- }
- for _, cmd := range a.commands {
- if err := cmd.init(); err != nil {
- return err
- }
- }
- flagGroups := []*flagGroup{a.flagGroup}
- for _, cmd := range a.commandOrder {
- if err := checkDuplicateFlags(cmd, flagGroups); err != nil {
- return err
- }
- }
- a.initialized = true
- return nil
-}
-
-// Recursively check commands for duplicate flags.
-func checkDuplicateFlags(current *CmdClause, flagGroups []*flagGroup) error {
- // Check for duplicates.
- for _, flags := range flagGroups {
- for _, flag := range current.flagOrder {
- if flag.shorthand != 0 {
- if _, ok := flags.short[string(flag.shorthand)]; ok {
- return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
- }
- }
- if _, ok := flags.long[flag.name]; ok {
- return fmt.Errorf("duplicate long flag --%s", flag.name)
- }
- }
- }
- flagGroups = append(flagGroups, current.flagGroup)
- // Check subcommands.
- for _, subcmd := range current.commandOrder {
- if err := checkDuplicateFlags(subcmd, flagGroups); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (a *Application) execute(context *ParseContext) (string, error) {
- var err error
- selected := []string{}
-
- if err = a.setDefaults(context); err != nil {
- return "", err
- }
-
- selected, err = a.setValues(context)
- if err != nil {
- return "", err
- }
-
- if err = a.applyPreActions(context); err != nil {
- return "", err
- }
-
- if err = a.validateRequired(context); err != nil {
- return "", err
- }
-
- if err = a.applyValidators(context); err != nil {
- return "", err
- }
-
- if err = a.applyActions(context); err != nil {
- return "", err
- }
-
- command := strings.Join(selected, " ")
- if command == "" && a.cmdGroup.have() {
- return "", ErrCommandNotSpecified
- }
- return command, err
-}
-
-func (a *Application) setDefaults(context *ParseContext) error {
- flagElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok {
- flagElements[flag.name] = element
- }
- }
-
- argElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if arg, ok := element.Clause.(*ArgClause); ok {
- argElements[arg.name] = element
- }
- }
-
- // Check required flags and set defaults.
- for _, flag := range context.flags.long {
- if flagElements[flag.name] == nil {
- if err := flag.setDefault(); err != nil {
- return err
- }
- }
- }
-
- for _, arg := range context.arguments.args {
- if argElements[arg.name] == nil {
- // Set defaults, if any.
- for _, defaultValue := range arg.defaultValues {
- if err := arg.value.Set(defaultValue); err != nil {
- return err
- }
- }
- }
- }
-
- return nil
-}
-
-func (a *Application) validateRequired(context *ParseContext) error {
- flagElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if flag, ok := element.Clause.(*FlagClause); ok {
- flagElements[flag.name] = element
- }
- }
-
- argElements := map[string]*ParseElement{}
- for _, element := range context.Elements {
- if arg, ok := element.Clause.(*ArgClause); ok {
- argElements[arg.name] = element
- }
- }
-
- // Check required flags and set defaults.
- for _, flag := range context.flags.long {
- if flagElements[flag.name] == nil {
- // Check required flags were provided.
- if flag.needsValue() {
- return fmt.Errorf("required flag --%s not provided", flag.name)
- }
- }
- }
-
- for _, arg := range context.arguments.args {
- if argElements[arg.name] == nil {
- if arg.required {
- return fmt.Errorf("required argument '%s' not provided", arg.name)
- }
- }
- }
- return nil
-}
-
-func (a *Application) setValues(context *ParseContext) (selected []string, err error) {
- // Set all arg and flag values.
- var (
- lastCmd *CmdClause
- flagSet = map[string]struct{}{}
- )
- for _, element := range context.Elements {
- switch clause := element.Clause.(type) {
- case *FlagClause:
- if _, ok := flagSet[clause.name]; ok {
- if v, ok := clause.value.(repeatableFlag); !ok || !v.IsCumulative() {
- return nil, fmt.Errorf("flag '%s' cannot be repeated", clause.name)
- }
- }
- if err = clause.value.Set(*element.Value); err != nil {
- return
- }
- flagSet[clause.name] = struct{}{}
-
- case *ArgClause:
- if err = clause.value.Set(*element.Value); err != nil {
- return
- }
-
- case *CmdClause:
- if clause.validator != nil {
- if err = clause.validator(clause); err != nil {
- return
- }
- }
- selected = append(selected, clause.name)
- lastCmd = clause
- }
- }
-
- if lastCmd != nil && len(lastCmd.commands) > 0 {
- return nil, fmt.Errorf("must select a subcommand of '%s'", lastCmd.FullCommand())
- }
-
- return
-}
-
-func (a *Application) applyValidators(context *ParseContext) (err error) {
- // Call command validation functions.
- for _, element := range context.Elements {
- if cmd, ok := element.Clause.(*CmdClause); ok && cmd.validator != nil {
- if err = cmd.validator(cmd); err != nil {
- return err
- }
- }
- }
-
- if a.validator != nil {
- err = a.validator(a)
- }
- return err
-}
-
-func (a *Application) applyPreActions(context *ParseContext) error {
- if err := a.actionMixin.applyPreActions(context); err != nil {
- return err
- }
- // Dispatch to actions.
- for _, element := range context.Elements {
- if applier, ok := element.Clause.(actionApplier); ok {
- if err := applier.applyPreActions(context); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (a *Application) applyActions(context *ParseContext) error {
- if err := a.actionMixin.applyActions(context); err != nil {
- return err
- }
- // Dispatch to actions.
- for _, element := range context.Elements {
- if applier, ok := element.Clause.(actionApplier); ok {
- if err := applier.applyActions(context); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-// Errorf prints an error message to w in the format ": error: ".
-func (a *Application) Errorf(format string, args ...interface{}) {
- fmt.Fprintf(a.writer, a.Name+": error: "+format+"\n", args...)
-}
-
-// Fatalf writes a formatted error to w then terminates with exit status 1.
-func (a *Application) Fatalf(format string, args ...interface{}) {
- a.Errorf(format, args...)
- a.terminate(1)
-}
-
-// FatalUsage prints an error message followed by usage information, then
-// exits with a non-zero status.
-func (a *Application) FatalUsage(format string, args ...interface{}) {
- a.Errorf(format, args...)
- a.Usage([]string{})
- a.terminate(1)
-}
-
-// FatalUsageContext writes a printf formatted error message to w, then usage
-// information for the given ParseContext, before exiting.
-func (a *Application) FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
- a.Errorf(format, args...)
- if err := a.UsageForContext(context); err != nil {
- panic(err)
- }
- a.terminate(1)
-}
-
-// FatalIfError prints an error and exits if err is not nil. The error is printed
-// with the given formatted string, if any.
-func (a *Application) FatalIfError(err error, format string, args ...interface{}) {
- if err != nil {
- prefix := ""
- if format != "" {
- prefix = fmt.Sprintf(format, args...) + ": "
- }
- a.Errorf(prefix+"%s", err)
- a.terminate(1)
- }
-}
-
-func envarTransform(name string) string {
- return strings.ToUpper(envarTransformRegexp.ReplaceAllString(name, "_"))
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/args.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/args.go
deleted file mode 100644
index 9dd0c0e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/args.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package kingpin
-
-import "fmt"
-
-type argGroup struct {
- args []*ArgClause
-}
-
-func newArgGroup() *argGroup {
- return &argGroup{}
-}
-
-func (a *argGroup) have() bool {
- return len(a.args) > 0
-}
-
-// GetArg gets an argument definition.
-//
-// This allows existing arguments to be modified after definition but before parsing. Useful for
-// modular applications.
-func (a *argGroup) GetArg(name string) *ArgClause {
- for _, arg := range a.args {
- if arg.name == name {
- return arg
- }
- }
- return nil
-}
-
-func (a *argGroup) Arg(name, help string) *ArgClause {
- arg := newArg(name, help)
- a.args = append(a.args, arg)
- return arg
-}
-
-func (a *argGroup) init() error {
- required := 0
- seen := map[string]struct{}{}
- previousArgMustBeLast := false
- for i, arg := range a.args {
- if previousArgMustBeLast {
- return fmt.Errorf("Args() can't be followed by another argument '%s'", arg.name)
- }
- if arg.consumesRemainder() {
- previousArgMustBeLast = true
- }
- if _, ok := seen[arg.name]; ok {
- return fmt.Errorf("duplicate argument '%s'", arg.name)
- }
- seen[arg.name] = struct{}{}
- if arg.required && required != i {
- return fmt.Errorf("required arguments found after non-required")
- }
- if arg.required {
- required++
- }
- if err := arg.init(); err != nil {
- return err
- }
- }
- return nil
-}
-
-type ArgClause struct {
- actionMixin
- parserMixin
- name string
- help string
- defaultValues []string
- required bool
-}
-
-func newArg(name, help string) *ArgClause {
- a := &ArgClause{
- name: name,
- help: help,
- }
- return a
-}
-
-func (a *ArgClause) consumesRemainder() bool {
- if r, ok := a.value.(remainderArg); ok {
- return r.IsCumulative()
- }
- return false
-}
-
-// Required arguments must be input by the user. They can not have a Default() value provided.
-func (a *ArgClause) Required() *ArgClause {
- a.required = true
- return a
-}
-
-// Default values for this argument. They *must* be parseable by the value of the argument.
-func (a *ArgClause) Default(values ...string) *ArgClause {
- a.defaultValues = values
- return a
-}
-
-func (a *ArgClause) Action(action Action) *ArgClause {
- a.addAction(action)
- return a
-}
-
-func (a *ArgClause) PreAction(action Action) *ArgClause {
- a.addPreAction(action)
- return a
-}
-
-func (a *ArgClause) init() error {
- if a.required && len(a.defaultValues) > 0 {
- return fmt.Errorf("required argument '%s' with unusable default value", a.name)
- }
- if a.value == nil {
- return fmt.Errorf("no parser defined for arg '%s'", a.name)
- }
- return nil
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd.go
deleted file mode 100644
index 9bbc793..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd.go
+++ /dev/null
@@ -1,182 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "strings"
-)
-
-type cmdGroup struct {
- app *Application
- parent *CmdClause
- commands map[string]*CmdClause
- commandOrder []*CmdClause
-}
-
-func (c *cmdGroup) defaultSubcommand() *CmdClause {
- for _, cmd := range c.commandOrder {
- if cmd.isDefault {
- return cmd
- }
- }
- return nil
-}
-
-// GetArg gets a command definition.
-//
-// This allows existing commands to be modified after definition but before parsing. Useful for
-// modular applications.
-func (c *cmdGroup) GetCommand(name string) *CmdClause {
- return c.commands[name]
-}
-
-func newCmdGroup(app *Application) *cmdGroup {
- return &cmdGroup{
- app: app,
- commands: make(map[string]*CmdClause),
- }
-}
-
-func (c *cmdGroup) flattenedCommands() (out []*CmdClause) {
- for _, cmd := range c.commandOrder {
- if len(cmd.commands) == 0 {
- out = append(out, cmd)
- }
- out = append(out, cmd.flattenedCommands()...)
- }
- return
-}
-
-func (c *cmdGroup) addCommand(name, help string) *CmdClause {
- cmd := newCommand(c.app, name, help)
- c.commands[name] = cmd
- c.commandOrder = append(c.commandOrder, cmd)
- return cmd
-}
-
-func (c *cmdGroup) init() error {
- seen := map[string]bool{}
- if c.defaultSubcommand() != nil && !c.have() {
- return fmt.Errorf("default subcommand %q provided but no subcommands defined", c.defaultSubcommand().name)
- }
- defaults := []string{}
- for _, cmd := range c.commandOrder {
- if cmd.isDefault {
- defaults = append(defaults, cmd.name)
- }
- if seen[cmd.name] {
- return fmt.Errorf("duplicate command %q", cmd.name)
- }
- seen[cmd.name] = true
- for _, alias := range cmd.aliases {
- if seen[alias] {
- return fmt.Errorf("alias duplicates existing command %q", alias)
- }
- c.commands[alias] = cmd
- }
- if err := cmd.init(); err != nil {
- return err
- }
- }
- if len(defaults) > 1 {
- return fmt.Errorf("more than one default subcommand exists: %s", strings.Join(defaults, ", "))
- }
- return nil
-}
-
-func (c *cmdGroup) have() bool {
- return len(c.commands) > 0
-}
-
-type CmdClauseValidator func(*CmdClause) error
-
-// A CmdClause is a single top-level command. It encapsulates a set of flags
-// and either subcommands or positional arguments.
-type CmdClause struct {
- actionMixin
- *flagGroup
- *argGroup
- *cmdGroup
- app *Application
- name string
- aliases []string
- help string
- isDefault bool
- validator CmdClauseValidator
- hidden bool
-}
-
-func newCommand(app *Application, name, help string) *CmdClause {
- c := &CmdClause{
- flagGroup: newFlagGroup(),
- argGroup: newArgGroup(),
- cmdGroup: newCmdGroup(app),
- app: app,
- name: name,
- help: help,
- }
- return c
-}
-
-// Add an Alias for this command.
-func (c *CmdClause) Alias(name string) *CmdClause {
- c.aliases = append(c.aliases, name)
- return c
-}
-
-// Validate sets a validation function to run when parsing.
-func (c *CmdClause) Validate(validator CmdClauseValidator) *CmdClause {
- c.validator = validator
- return c
-}
-
-func (c *CmdClause) FullCommand() string {
- out := []string{c.name}
- for p := c.parent; p != nil; p = p.parent {
- out = append([]string{p.name}, out...)
- }
- return strings.Join(out, " ")
-}
-
-// Command adds a new sub-command.
-func (c *CmdClause) Command(name, help string) *CmdClause {
- cmd := c.addCommand(name, help)
- cmd.parent = c
- return cmd
-}
-
-// Default makes this command the default if commands don't match.
-func (c *CmdClause) Default() *CmdClause {
- c.isDefault = true
- return c
-}
-
-func (c *CmdClause) Action(action Action) *CmdClause {
- c.addAction(action)
- return c
-}
-
-func (c *CmdClause) PreAction(action Action) *CmdClause {
- c.addPreAction(action)
- return c
-}
-
-func (c *CmdClause) init() error {
- if err := c.flagGroup.init(c.app.defaultEnvarPrefix()); err != nil {
- return err
- }
- if c.argGroup.have() && c.cmdGroup.have() {
- return fmt.Errorf("can't mix Arg()s with Command()s")
- }
- if err := c.argGroup.init(); err != nil {
- return err
- }
- if err := c.cmdGroup.init(); err != nil {
- return err
- }
- return nil
-}
-
-func (c *CmdClause) Hidden() *CmdClause {
- c.hidden = true
- return c
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd/genvalues/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd/genvalues/main.go
deleted file mode 100644
index 1411539..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/cmd/genvalues/main.go
+++ /dev/null
@@ -1,134 +0,0 @@
-package main
-
-import (
- "encoding/json"
- "os"
- "os/exec"
- "strings"
-
- "github.com/alecthomas/template"
-)
-
-const (
- tmpl = `package kingpin
-
-// This file is autogenerated by "go generate .". Do not modify.
-
-{{range .}}
-{{if not .NoValueParser}}
-// -- {{.Type}} Value
-type {{.|ValueName}} struct { v *{{.Type}} }
-
-func new{{.|Name}}Value(p *{{.Type}}) *{{.|ValueName}} {
- return &{{.|ValueName}}{p}
-}
-
-func (f *{{.|ValueName}}) Set(s string) error {
- v, err := {{.Parser}}
- if err == nil {
- *f.v = ({{.Type}})(v)
- }
- return err
-}
-
-func (f *{{.|ValueName}}) Get() interface{} { return ({{.Type}})(*f.v) }
-
-func (f *{{.|ValueName}}) String() string { return {{.|Format}} }
-
-{{if .Help}}
-// {{.Help}}
-{{else}}\
-// {{.|Name}} parses the next command-line value as {{.Type}}.
-{{end}}\
-func (p *parserMixin) {{.|Name}}() (target *{{.Type}}) {
- target = new({{.Type}})
- p.{{.|Name}}Var(target)
- return
-}
-
-func (p *parserMixin) {{.|Name}}Var(target *{{.Type}}) {
- p.SetValue(new{{.|Name}}Value(target))
-}
-
-{{end}}
-// {{.|Plural}} accumulates {{.Type}} values into a slice.
-func (p *parserMixin) {{.|Plural}}() (target *[]{{.Type}}) {
- target = new([]{{.Type}})
- p.{{.|Plural}}Var(target)
- return
-}
-
-func (p *parserMixin) {{.|Plural}}Var(target *[]{{.Type}}) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return new{{.|Name}}Value(v.(*{{.Type}}))
- }))
-}
-
-{{end}}
-`
-)
-
-type Value struct {
- Name string `json:"name"`
- NoValueParser bool `json:"no_value_parser"`
- Type string `json:"type"`
- Parser string `json:"parser"`
- Format string `json:"format"`
- Plural string `json:"plural"`
- Help string `json:"help"`
-}
-
-func fatalIfError(err error) {
- if err != nil {
- panic(err)
- }
-}
-
-func main() {
- r, err := os.Open("values.json")
- fatalIfError(err)
- defer r.Close()
-
- v := []Value{}
- err = json.NewDecoder(r).Decode(&v)
- fatalIfError(err)
-
- valueName := func(v *Value) string {
- if v.Name != "" {
- return v.Name
- }
- return strings.Title(v.Type)
- }
-
- t, err := template.New("genvalues").Funcs(template.FuncMap{
- "Lower": strings.ToLower,
- "Format": func(v *Value) string {
- if v.Format != "" {
- return v.Format
- }
- return "fmt.Sprintf(\"%v\", *f)"
- },
- "ValueName": func(v *Value) string {
- name := valueName(v)
- return strings.ToLower(name[0:1]) + name[1:] + "Value"
- },
- "Name": valueName,
- "Plural": func(v *Value) string {
- if v.Plural != "" {
- return v.Plural
- }
- return valueName(v) + "List"
- },
- }).Parse(tmpl)
- fatalIfError(err)
-
- w, err := os.Create("values_generated.go")
- fatalIfError(err)
- defer w.Close()
-
- err = t.Execute(w, v)
- fatalIfError(err)
-
- err = exec.Command("goimports", "-w", "values_generated.go").Run()
- fatalIfError(err)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/doc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/doc.go
deleted file mode 100644
index c14762c..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/doc.go
+++ /dev/null
@@ -1,68 +0,0 @@
-// Package kingpin provides command line interfaces like this:
-//
-// $ chat
-// usage: chat [] [] [ ...]
-//
-// Flags:
-// --debug enable debug mode
-// --help Show help.
-// --server=127.0.0.1 server address
-//
-// Commands:
-// help
-// Show help for a command.
-//
-// post []
-// Post a message to a channel.
-//
-// register
-// Register a new user.
-//
-// $ chat help post
-// usage: chat [] post [] []
-//
-// Post a message to a channel.
-//
-// Flags:
-// --image=IMAGE image to post
-//
-// Args:
-// channel to post to
-// [] text to post
-// $ chat post --image=~/Downloads/owls.jpg pics
-//
-// From code like this:
-//
-// package main
-//
-// import "gopkg.in/alecthomas/kingpin.v1"
-//
-// var (
-// debug = kingpin.Flag("debug", "enable debug mode").Default("false").Bool()
-// serverIP = kingpin.Flag("server", "server address").Default("127.0.0.1").IP()
-//
-// register = kingpin.Command("register", "Register a new user.")
-// registerNick = register.Arg("nick", "nickname for user").Required().String()
-// registerName = register.Arg("name", "name of user").Required().String()
-//
-// post = kingpin.Command("post", "Post a message to a channel.")
-// postImage = post.Flag("image", "image to post").ExistingFile()
-// postChannel = post.Arg("channel", "channel to post to").Required().String()
-// postText = post.Arg("text", "text to post").String()
-// )
-//
-// func main() {
-// switch kingpin.Parse() {
-// // Register user
-// case "register":
-// println(*registerNick)
-//
-// // Post message
-// case "post":
-// if *postImage != nil {
-// }
-// if *postText != "" {
-// }
-// }
-// }
-package kingpin
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat1/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat1/main.go
deleted file mode 100644
index 2a233fc..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat1/main.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-import (
- "fmt"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- debug = kingpin.Flag("debug", "Enable debug mode.").Bool()
- timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").Default("5s").OverrideDefaultFromEnvar("PING_TIMEOUT").Short('t').Duration()
- ip = kingpin.Arg("ip", "IP address to ping.").Required().IP()
- count = kingpin.Arg("count", "Number of packets to send").Int()
-)
-
-func main() {
- kingpin.Version("0.0.1")
- kingpin.Parse()
- fmt.Printf("Would ping: %s with timeout %s and count %d", *ip, *timeout, *count)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat2/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat2/main.go
deleted file mode 100644
index 83891a7..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/chat2/main.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package main
-
-import (
- "os"
- "strings"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- app = kingpin.New("chat", "A command-line chat application.")
- debug = app.Flag("debug", "Enable debug mode.").Bool()
- serverIP = app.Flag("server", "Server address.").Default("127.0.0.1").IP()
-
- register = app.Command("register", "Register a new user.")
- registerNick = register.Arg("nick", "Nickname for user.").Required().String()
- registerName = register.Arg("name", "Name of user.").Required().String()
-
- post = app.Command("post", "Post a message to a channel.")
- postImage = post.Flag("image", "Image to post.").File()
- postChannel = post.Arg("channel", "Channel to post to.").Required().String()
- postText = post.Arg("text", "Text to post.").Strings()
-)
-
-func main() {
- switch kingpin.MustParse(app.Parse(os.Args[1:])) {
- // Register user
- case register.FullCommand():
- println(*registerNick)
-
- // Post message
- case post.FullCommand():
- if *postImage != nil {
- }
- text := strings.Join(*postText, " ")
- println("Post:", text)
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/curl/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/curl/main.go
deleted file mode 100644
index a877e7b..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/curl/main.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// A curl-like HTTP command-line client.
-package main
-
-import (
- "errors"
- "fmt"
- "io"
- "net/http"
- "os"
- "strings"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- timeout = kingpin.Flag("timeout", "Set connection timeout.").Short('t').Default("5s").Duration()
- headers = HTTPHeader(kingpin.Flag("headers", "Add HTTP headers to the request.").Short('H').PlaceHolder("HEADER=VALUE"))
-
- get = kingpin.Command("get", "GET a resource.").Default()
- getFlag = get.Flag("test", "Test flag").Bool()
- getURL = get.Command("url", "Retrieve a URL.").Default()
- getURLURL = getURL.Arg("url", "URL to GET.").Required().URL()
- getFile = get.Command("file", "Retrieve a file.")
- getFileFile = getFile.Arg("file", "File to retrieve.").Required().ExistingFile()
-
- post = kingpin.Command("post", "POST a resource.")
- postData = post.Flag("data", "Key-value data to POST").Short('d').PlaceHolder("KEY:VALUE").StringMap()
- postBinaryFile = post.Flag("data-binary", "File with binary data to POST.").File()
- postURL = post.Arg("url", "URL to POST to.").Required().URL()
-)
-
-type HTTPHeaderValue http.Header
-
-func (h HTTPHeaderValue) Set(value string) error {
- parts := strings.SplitN(value, "=", 2)
- if len(parts) != 2 {
- return fmt.Errorf("expected HEADER=VALUE got '%s'", value)
- }
- (http.Header)(h).Add(parts[0], parts[1])
- return nil
-}
-
-func (h HTTPHeaderValue) String() string {
- return ""
-}
-
-func HTTPHeader(s kingpin.Settings) (target *http.Header) {
- target = &http.Header{}
- s.SetValue((*HTTPHeaderValue)(target))
- return
-}
-
-func applyRequest(req *http.Request) error {
- req.Header = *headers
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- if resp.StatusCode < 200 || resp.StatusCode > 299 {
- return fmt.Errorf("HTTP request failed: %s", resp.Status)
- }
- _, err = io.Copy(os.Stdout, resp.Body)
- return err
-}
-
-func apply(method string, url string) error {
- req, err := http.NewRequest(method, url, nil)
- if err != nil {
- return err
- }
- return applyRequest(req)
-}
-
-func applyPOST() error {
- req, err := http.NewRequest("POST", (*postURL).String(), nil)
- if err != nil {
- return err
- }
- if len(*postData) > 0 {
- for key, value := range *postData {
- req.Form.Set(key, value)
- }
- } else if postBinaryFile != nil {
- if headers.Get("Content-Type") != "" {
- headers.Set("Content-Type", "application/octet-stream")
- }
- req.Body = *postBinaryFile
- } else {
- return errors.New("--data or --data-binary must be provided to POST")
- }
- return applyRequest(req)
-}
-
-func main() {
- kingpin.UsageTemplate(kingpin.CompactUsageTemplate).Version("1.0").Author("Alec Thomas")
- kingpin.CommandLine.Help = "An example implementation of curl."
- switch kingpin.Parse() {
- case "get url":
- kingpin.FatalIfError(apply("GET", (*getURLURL).String()), "GET failed")
-
- case "post":
- kingpin.FatalIfError(applyPOST(), "POST failed")
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/modular/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/modular/main.go
deleted file mode 100644
index 34cfa0b..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/modular/main.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package main
-
-import (
- "fmt"
- "os"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-// Context for "ls" command
-type LsCommand struct {
- All bool
-}
-
-func (l *LsCommand) run(c *kingpin.ParseContext) error {
- fmt.Printf("all=%v\n", l.All)
- return nil
-}
-
-func configureLsCommand(app *kingpin.Application) {
- c := &LsCommand{}
- ls := app.Command("ls", "List files.").Action(c.run)
- ls.Flag("all", "List all files.").Short('a').BoolVar(&c.All)
-}
-
-func main() {
- app := kingpin.New("modular", "My modular application.")
- configureLsCommand(app)
- kingpin.MustParse(app.Parse(os.Args[1:]))
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/ping/main.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/ping/main.go
deleted file mode 100644
index 41ea263..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/examples/ping/main.go
+++ /dev/null
@@ -1,20 +0,0 @@
-package main
-
-import (
- "fmt"
-
- "gopkg.in/alecthomas/kingpin.v2"
-)
-
-var (
- debug = kingpin.Flag("debug", "Enable debug mode.").Bool()
- timeout = kingpin.Flag("timeout", "Timeout waiting for ping.").OverrideDefaultFromEnvar("PING_TIMEOUT").Required().Short('t').Duration()
- ip = kingpin.Arg("ip", "IP address to ping.").Required().IP()
- count = kingpin.Arg("count", "Number of packets to send").Int()
-)
-
-func main() {
- kingpin.Version("0.0.1")
- kingpin.Parse()
- fmt.Printf("Would ping: %s with timeout %s and count %d", *ip, *timeout, *count)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/flags.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/flags.go
deleted file mode 100644
index a2ab962..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/flags.go
+++ /dev/null
@@ -1,317 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "os"
- "regexp"
- "strings"
-)
-
-var (
- envVarValuesSeparator = "\r?\n"
- envVarValuesTrimmer = regexp.MustCompile(envVarValuesSeparator + "$")
- envVarValuesSplitter = regexp.MustCompile(envVarValuesSeparator)
-)
-
-type flagGroup struct {
- short map[string]*FlagClause
- long map[string]*FlagClause
- flagOrder []*FlagClause
-}
-
-func newFlagGroup() *flagGroup {
- return &flagGroup{
- short: map[string]*FlagClause{},
- long: map[string]*FlagClause{},
- }
-}
-
-// GetFlag gets a flag definition.
-//
-// This allows existing flags to be modified after definition but before parsing. Useful for
-// modular applications.
-func (f *flagGroup) GetFlag(name string) *FlagClause {
- return f.long[name]
-}
-
-// Flag defines a new flag with the given long name and help.
-func (f *flagGroup) Flag(name, help string) *FlagClause {
- flag := newFlag(name, help)
- f.long[name] = flag
- f.flagOrder = append(f.flagOrder, flag)
- return flag
-}
-
-func (f *flagGroup) init(defaultEnvarPrefix string) error {
- if err := f.checkDuplicates(); err != nil {
- return err
- }
- for _, flag := range f.long {
- if defaultEnvarPrefix != "" && !flag.noEnvar && flag.envar == "" {
- flag.envar = envarTransform(defaultEnvarPrefix + "_" + flag.name)
- }
- if err := flag.init(); err != nil {
- return err
- }
- if flag.shorthand != 0 {
- f.short[string(flag.shorthand)] = flag
- }
- }
- return nil
-}
-
-func (f *flagGroup) checkDuplicates() error {
- seenShort := map[byte]bool{}
- seenLong := map[string]bool{}
- for _, flag := range f.flagOrder {
- if flag.shorthand != 0 {
- if _, ok := seenShort[flag.shorthand]; ok {
- return fmt.Errorf("duplicate short flag -%c", flag.shorthand)
- }
- seenShort[flag.shorthand] = true
- }
- if _, ok := seenLong[flag.name]; ok {
- return fmt.Errorf("duplicate long flag --%s", flag.name)
- }
- seenLong[flag.name] = true
- }
- return nil
-}
-
-func (f *flagGroup) parse(context *ParseContext) (*FlagClause, error) {
- var token *Token
-
-loop:
- for {
- token = context.Peek()
- switch token.Type {
- case TokenEOL:
- break loop
-
- case TokenLong, TokenShort:
- flagToken := token
- defaultValue := ""
- var flag *FlagClause
- var ok bool
- invert := false
-
- name := token.Value
- if token.Type == TokenLong {
- if strings.HasPrefix(name, "no-") {
- name = name[3:]
- invert = true
- }
- flag, ok = f.long[name]
- if !ok {
- return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
- }
- } else {
- flag, ok = f.short[name]
- if !ok {
- return nil, fmt.Errorf("unknown short flag '%s'", flagToken)
- }
- }
-
- context.Next()
-
- fb, ok := flag.value.(boolFlag)
- if ok && fb.IsBoolFlag() {
- if invert {
- defaultValue = "false"
- } else {
- defaultValue = "true"
- }
- } else {
- if invert {
- context.Push(token)
- return nil, fmt.Errorf("unknown long flag '%s'", flagToken)
- }
- token = context.Peek()
- if token.Type != TokenArg {
- context.Push(token)
- return nil, fmt.Errorf("expected argument for flag '%s'", flagToken)
- }
- context.Next()
- defaultValue = token.Value
- }
-
- context.matchedFlag(flag, defaultValue)
- return flag, nil
-
- default:
- break loop
- }
- }
- return nil, nil
-}
-
-func (f *flagGroup) visibleFlags() int {
- count := 0
- for _, flag := range f.long {
- if !flag.hidden {
- count++
- }
- }
- return count
-}
-
-// FlagClause is a fluid interface used to build flags.
-type FlagClause struct {
- parserMixin
- actionMixin
- name string
- shorthand byte
- help string
- envar string
- noEnvar bool
- defaultValues []string
- placeholder string
- hidden bool
-}
-
-func newFlag(name, help string) *FlagClause {
- f := &FlagClause{
- name: name,
- help: help,
- }
- return f
-}
-
-func (f *FlagClause) setDefault() error {
- if !f.noEnvar && f.envar != "" {
- if envarValue := os.Getenv(f.envar); envarValue != "" {
- if v, ok := f.value.(repeatableFlag); !ok || !v.IsCumulative() {
- // Use the value as-is
- return f.value.Set(envarValue)
- } else {
- // Split by new line to extract multiple values, if any.
- trimmed := envVarValuesTrimmer.ReplaceAllString(envarValue, "")
- for _, value := range envVarValuesSplitter.Split(trimmed, -1) {
- if err := f.value.Set(value); err != nil {
- return err
- }
- }
- return nil
- }
- }
- }
-
- if len(f.defaultValues) > 0 {
- for _, defaultValue := range f.defaultValues {
- if err := f.value.Set(defaultValue); err != nil {
- return err
- }
- }
- return nil
- }
-
- return nil
-}
-
-func (f *FlagClause) needsValue() bool {
- haveDefault := len(f.defaultValues) > 0
- haveEnvar := !f.noEnvar && f.envar != "" && os.Getenv(f.envar) != ""
- return f.required && !(haveDefault || haveEnvar)
-}
-
-func (f *FlagClause) formatPlaceHolder() string {
- if f.placeholder != "" {
- return f.placeholder
- }
- if len(f.defaultValues) > 0 {
- ellipsis := ""
- if len(f.defaultValues) > 1 {
- ellipsis = "..."
- }
- if _, ok := f.value.(*stringValue); ok {
- return fmt.Sprintf("%q"+ellipsis, f.defaultValues[0])
- }
- return f.defaultValues[0] + ellipsis
- }
- return strings.ToUpper(f.name)
-}
-
-func (f *FlagClause) init() error {
- if f.required && len(f.defaultValues) > 0 {
- return fmt.Errorf("required flag '--%s' with default value that will never be used", f.name)
- }
- if f.value == nil {
- return fmt.Errorf("no type defined for --%s (eg. .String())", f.name)
- }
- if v, ok := f.value.(repeatableFlag); (!ok || !v.IsCumulative()) && len(f.defaultValues) > 1 {
- return fmt.Errorf("invalid default for '--%s', expecting single value", f.name)
- }
- return nil
-}
-
-// Dispatch to the given function after the flag is parsed and validated.
-func (f *FlagClause) Action(action Action) *FlagClause {
- f.addAction(action)
- return f
-}
-
-func (f *FlagClause) PreAction(action Action) *FlagClause {
- f.addPreAction(action)
- return f
-}
-
-// Default values for this flag. They *must* be parseable by the value of the flag.
-func (f *FlagClause) Default(values ...string) *FlagClause {
- f.defaultValues = values
- return f
-}
-
-// DEPRECATED: Use Envar(name) instead.
-func (f *FlagClause) OverrideDefaultFromEnvar(envar string) *FlagClause {
- return f.Envar(envar)
-}
-
-// Envar overrides the default value(s) for a flag from an environment variable,
-// if it is set. Several default values can be provided by using new lines to
-// separate them.
-func (f *FlagClause) Envar(name string) *FlagClause {
- f.envar = name
- f.noEnvar = false
- return f
-}
-
-// NoEnvar forces environment variable defaults to be disabled for this flag.
-// Most useful in conjunction with app.DefaultEnvars().
-func (f *FlagClause) NoEnvar() *FlagClause {
- f.envar = ""
- f.noEnvar = true
- return f
-}
-
-// PlaceHolder sets the place-holder string used for flag values in the help. The
-// default behaviour is to use the value provided by Default() if provided,
-// then fall back on the capitalized flag name.
-func (f *FlagClause) PlaceHolder(placeholder string) *FlagClause {
- f.placeholder = placeholder
- return f
-}
-
-// Hidden hides a flag from usage but still allows it to be used.
-func (f *FlagClause) Hidden() *FlagClause {
- f.hidden = true
- return f
-}
-
-// Required makes the flag required. You can not provide a Default() value to a Required() flag.
-func (f *FlagClause) Required() *FlagClause {
- f.required = true
- return f
-}
-
-// Short sets the short flag name.
-func (f *FlagClause) Short(name byte) *FlagClause {
- f.shorthand = name
- return f
-}
-
-// Bool makes this flag a boolean flag.
-func (f *FlagClause) Bool() (target *bool) {
- target = new(bool)
- f.SetValue(newBoolValue(target))
- return
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/global.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/global.go
deleted file mode 100644
index 10a2913..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/global.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package kingpin
-
-import (
- "os"
- "path/filepath"
-)
-
-var (
- // CommandLine is the default Kingpin parser.
- CommandLine = New(filepath.Base(os.Args[0]), "")
- // Global help flag. Exposed for user customisation.
- HelpFlag = CommandLine.HelpFlag
- // Top-level help command. Exposed for user customisation. May be nil.
- HelpCommand = CommandLine.HelpCommand
- // Global version flag. Exposed for user customisation. May be nil.
- VersionFlag = CommandLine.VersionFlag
-)
-
-// Command adds a new command to the default parser.
-func Command(name, help string) *CmdClause {
- return CommandLine.Command(name, help)
-}
-
-// Flag adds a new flag to the default parser.
-func Flag(name, help string) *FlagClause {
- return CommandLine.Flag(name, help)
-}
-
-// Arg adds a new argument to the top-level of the default parser.
-func Arg(name, help string) *ArgClause {
- return CommandLine.Arg(name, help)
-}
-
-// Parse and return the selected command. Will call the termination handler if
-// an error is encountered.
-func Parse() string {
- selected := MustParse(CommandLine.Parse(os.Args[1:]))
- if selected == "" && CommandLine.cmdGroup.have() {
- Usage()
- CommandLine.terminate(0)
- }
- return selected
-}
-
-// Errorf prints an error message to stderr.
-func Errorf(format string, args ...interface{}) {
- CommandLine.Errorf(format, args...)
-}
-
-// Fatalf prints an error message to stderr and exits.
-func Fatalf(format string, args ...interface{}) {
- CommandLine.Fatalf(format, args...)
-}
-
-// FatalIfError prints an error and exits if err is not nil. The error is printed
-// with the given prefix.
-func FatalIfError(err error, format string, args ...interface{}) {
- CommandLine.FatalIfError(err, format, args...)
-}
-
-// FatalUsage prints an error message followed by usage information, then
-// exits with a non-zero status.
-func FatalUsage(format string, args ...interface{}) {
- CommandLine.FatalUsage(format, args...)
-}
-
-// FatalUsageContext writes a printf formatted error message to stderr, then
-// usage information for the given ParseContext, before exiting.
-func FatalUsageContext(context *ParseContext, format string, args ...interface{}) {
- CommandLine.FatalUsageContext(context, format, args...)
-}
-
-// Usage prints usage to stderr.
-func Usage() {
- CommandLine.Usage(os.Args[1:])
-}
-
-// Set global usage template to use (defaults to DefaultUsageTemplate).
-func UsageTemplate(template string) *Application {
- return CommandLine.UsageTemplate(template)
-}
-
-// MustParse can be used with app.Parse(args) to exit with an error if parsing fails.
-func MustParse(command string, err error) string {
- if err != nil {
- Fatalf("%s, try --help", err)
- }
- return command
-}
-
-// Version adds a flag for displaying the application version number.
-func Version(version string) *Application {
- return CommandLine.Version(version)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth.go
deleted file mode 100644
index a269531..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build appengine !linux,!freebsd,!darwin,!dragonfly,!netbsd,!openbsd
-
-package kingpin
-
-import "io"
-
-func guessWidth(w io.Writer) int {
- return 80
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go
deleted file mode 100644
index ad8163f..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/guesswidth_unix.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// +build !appengine,linux freebsd darwin dragonfly netbsd openbsd
-
-package kingpin
-
-import (
- "io"
- "os"
- "strconv"
- "syscall"
- "unsafe"
-)
-
-func guessWidth(w io.Writer) int {
- // check if COLUMNS env is set to comply with
- // http://pubs.opengroup.org/onlinepubs/009604499/basedefs/xbd_chap08.html
- colsStr := os.Getenv("COLUMNS")
- if colsStr != "" {
- if cols, err := strconv.Atoi(colsStr); err == nil {
- return cols
- }
- }
-
- if t, ok := w.(*os.File); ok {
- fd := t.Fd()
- var dimensions [4]uint16
-
- if _, _, err := syscall.Syscall6(
- syscall.SYS_IOCTL,
- uintptr(fd),
- uintptr(syscall.TIOCGWINSZ),
- uintptr(unsafe.Pointer(&dimensions)),
- 0, 0, 0,
- ); err == 0 {
- return int(dimensions[1])
- }
- }
- return 80
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/model.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/model.go
deleted file mode 100644
index ee770b0..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/model.go
+++ /dev/null
@@ -1,225 +0,0 @@
-package kingpin
-
-import (
- "fmt"
- "strconv"
- "strings"
-)
-
-// Data model for Kingpin command-line structure.
-
-type FlagGroupModel struct {
- Flags []*FlagModel
-}
-
-func (f *FlagGroupModel) FlagSummary() string {
- out := []string{}
- count := 0
- for _, flag := range f.Flags {
- if flag.Name != "help" {
- count++
- }
- if flag.Required {
- if flag.IsBoolFlag() {
- out = append(out, fmt.Sprintf("--[no-]%s", flag.Name))
- } else {
- out = append(out, fmt.Sprintf("--%s=%s", flag.Name, flag.FormatPlaceHolder()))
- }
- }
- }
- if count != len(out) {
- out = append(out, "[]")
- }
- return strings.Join(out, " ")
-}
-
-type FlagModel struct {
- Name string
- Help string
- Short rune
- Default []string
- Envar string
- PlaceHolder string
- Required bool
- Hidden bool
- Value Value
-}
-
-func (f *FlagModel) String() string {
- return f.Value.String()
-}
-
-func (f *FlagModel) IsBoolFlag() bool {
- if fl, ok := f.Value.(boolFlag); ok {
- return fl.IsBoolFlag()
- }
- return false
-}
-
-func (f *FlagModel) FormatPlaceHolder() string {
- if f.PlaceHolder != "" {
- return f.PlaceHolder
- }
- if len(f.Default) > 0 {
- ellipsis := ""
- if len(f.Default) > 1 {
- ellipsis = "..."
- }
- if _, ok := f.Value.(*stringValue); ok {
- return strconv.Quote(f.Default[0]) + ellipsis
- }
- return f.Default[0] + ellipsis
- }
- return strings.ToUpper(f.Name)
-}
-
-type ArgGroupModel struct {
- Args []*ArgModel
-}
-
-func (a *ArgGroupModel) ArgSummary() string {
- depth := 0
- out := []string{}
- for _, arg := range a.Args {
- h := "<" + arg.Name + ">"
- if !arg.Required {
- h = "[" + h
- depth++
- }
- out = append(out, h)
- }
- out[len(out)-1] = out[len(out)-1] + strings.Repeat("]", depth)
- return strings.Join(out, " ")
-}
-
-type ArgModel struct {
- Name string
- Help string
- Default []string
- Required bool
- Value Value
-}
-
-func (a *ArgModel) String() string {
- return a.Value.String()
-}
-
-type CmdGroupModel struct {
- Commands []*CmdModel
-}
-
-func (c *CmdGroupModel) FlattenedCommands() (out []*CmdModel) {
- for _, cmd := range c.Commands {
- if len(cmd.Commands) == 0 {
- out = append(out, cmd)
- }
- out = append(out, cmd.FlattenedCommands()...)
- }
- return
-}
-
-type CmdModel struct {
- Name string
- Aliases []string
- Help string
- FullCommand string
- Depth int
- Hidden bool
- Default bool
- *FlagGroupModel
- *ArgGroupModel
- *CmdGroupModel
-}
-
-func (c *CmdModel) String() string {
- return c.FullCommand
-}
-
-type ApplicationModel struct {
- Name string
- Help string
- Version string
- Author string
- *ArgGroupModel
- *CmdGroupModel
- *FlagGroupModel
-}
-
-func (a *Application) Model() *ApplicationModel {
- return &ApplicationModel{
- Name: a.Name,
- Help: a.Help,
- Version: a.version,
- Author: a.author,
- FlagGroupModel: a.flagGroup.Model(),
- ArgGroupModel: a.argGroup.Model(),
- CmdGroupModel: a.cmdGroup.Model(),
- }
-}
-
-func (a *argGroup) Model() *ArgGroupModel {
- m := &ArgGroupModel{}
- for _, arg := range a.args {
- m.Args = append(m.Args, arg.Model())
- }
- return m
-}
-
-func (a *ArgClause) Model() *ArgModel {
- return &ArgModel{
- Name: a.name,
- Help: a.help,
- Default: a.defaultValues,
- Required: a.required,
- Value: a.value,
- }
-}
-
-func (f *flagGroup) Model() *FlagGroupModel {
- m := &FlagGroupModel{}
- for _, fl := range f.flagOrder {
- m.Flags = append(m.Flags, fl.Model())
- }
- return m
-}
-
-func (f *FlagClause) Model() *FlagModel {
- return &FlagModel{
- Name: f.name,
- Help: f.help,
- Short: rune(f.shorthand),
- Default: f.defaultValues,
- Envar: f.envar,
- PlaceHolder: f.placeholder,
- Required: f.required,
- Hidden: f.hidden,
- Value: f.value,
- }
-}
-
-func (c *cmdGroup) Model() *CmdGroupModel {
- m := &CmdGroupModel{}
- for _, cm := range c.commandOrder {
- m.Commands = append(m.Commands, cm.Model())
- }
- return m
-}
-
-func (c *CmdClause) Model() *CmdModel {
- depth := 0
- for i := c; i != nil; i = i.parent {
- depth++
- }
- return &CmdModel{
- Name: c.name,
- Aliases: c.aliases,
- Help: c.help,
- Depth: depth,
- Hidden: c.hidden,
- Default: c.isDefault,
- FullCommand: c.FullCommand(),
- FlagGroupModel: c.flagGroup.Model(),
- ArgGroupModel: c.argGroup.Model(),
- CmdGroupModel: c.cmdGroup.Model(),
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parser.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parser.go
deleted file mode 100644
index f6ba7ec..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parser.go
+++ /dev/null
@@ -1,374 +0,0 @@
-package kingpin
-
-import (
- "bufio"
- "fmt"
- "os"
- "strings"
-)
-
-type TokenType int
-
-// Token types.
-const (
- TokenShort TokenType = iota
- TokenLong
- TokenArg
- TokenError
- TokenEOL
-)
-
-func (t TokenType) String() string {
- switch t {
- case TokenShort:
- return "short flag"
- case TokenLong:
- return "long flag"
- case TokenArg:
- return "argument"
- case TokenError:
- return "error"
- case TokenEOL:
- return ""
- }
- return "?"
-}
-
-var (
- TokenEOLMarker = Token{-1, TokenEOL, ""}
-)
-
-type Token struct {
- Index int
- Type TokenType
- Value string
-}
-
-func (t *Token) Equal(o *Token) bool {
- return t.Index == o.Index
-}
-
-func (t *Token) IsFlag() bool {
- return t.Type == TokenShort || t.Type == TokenLong
-}
-
-func (t *Token) IsEOF() bool {
- return t.Type == TokenEOL
-}
-
-func (t *Token) String() string {
- switch t.Type {
- case TokenShort:
- return "-" + t.Value
- case TokenLong:
- return "--" + t.Value
- case TokenArg:
- return t.Value
- case TokenError:
- return "error: " + t.Value
- case TokenEOL:
- return ""
- default:
- panic("unhandled type")
- }
-}
-
-// A union of possible elements in a parse stack.
-type ParseElement struct {
- // Clause is either *CmdClause, *ArgClause or *FlagClause.
- Clause interface{}
- // Value is corresponding value for an ArgClause or FlagClause (if any).
- Value *string
-}
-
-// ParseContext holds the current context of the parser. When passed to
-// Action() callbacks Elements will be fully populated with *FlagClause,
-// *ArgClause and *CmdClause values and their corresponding arguments (if
-// any).
-type ParseContext struct {
- SelectedCommand *CmdClause
- ignoreDefault bool
- argsOnly bool
- peek []*Token
- argi int // Index of current command-line arg we're processing.
- args []string
- flags *flagGroup
- arguments *argGroup
- argumenti int // Cursor into arguments
- // Flags, arguments and commands encountered and collected during parse.
- Elements []*ParseElement
-}
-
-func (p *ParseContext) nextArg() *ArgClause {
- if p.argumenti >= len(p.arguments.args) {
- return nil
- }
- arg := p.arguments.args[p.argumenti]
- if !arg.consumesRemainder() {
- p.argumenti++
- }
- return arg
-}
-
-func (p *ParseContext) next() {
- p.argi++
- p.args = p.args[1:]
-}
-
-// HasTrailingArgs returns true if there are unparsed command-line arguments.
-// This can occur if the parser can not match remaining arguments.
-func (p *ParseContext) HasTrailingArgs() bool {
- return len(p.args) > 0
-}
-
-func tokenize(args []string, ignoreDefault bool) *ParseContext {
- return &ParseContext{
- ignoreDefault: ignoreDefault,
- args: args,
- flags: newFlagGroup(),
- arguments: newArgGroup(),
- }
-}
-
-func (p *ParseContext) mergeFlags(flags *flagGroup) {
- for _, flag := range flags.flagOrder {
- if flag.shorthand != 0 {
- p.flags.short[string(flag.shorthand)] = flag
- }
- p.flags.long[flag.name] = flag
- p.flags.flagOrder = append(p.flags.flagOrder, flag)
- }
-}
-
-func (p *ParseContext) mergeArgs(args *argGroup) {
- for _, arg := range args.args {
- p.arguments.args = append(p.arguments.args, arg)
- }
-}
-
-func (p *ParseContext) EOL() bool {
- return p.Peek().Type == TokenEOL
-}
-
-// Next token in the parse context.
-func (p *ParseContext) Next() *Token {
- if len(p.peek) > 0 {
- return p.pop()
- }
-
- // End of tokens.
- if len(p.args) == 0 {
- return &Token{Index: p.argi, Type: TokenEOL}
- }
-
- arg := p.args[0]
- p.next()
-
- if p.argsOnly {
- return &Token{p.argi, TokenArg, arg}
- }
-
- // All remaining args are passed directly.
- if arg == "--" {
- p.argsOnly = true
- return p.Next()
- }
-
- if strings.HasPrefix(arg, "--") {
- parts := strings.SplitN(arg[2:], "=", 2)
- token := &Token{p.argi, TokenLong, parts[0]}
- if len(parts) == 2 {
- p.Push(&Token{p.argi, TokenArg, parts[1]})
- }
- return token
- }
-
- if strings.HasPrefix(arg, "-") {
- if len(arg) == 1 {
- return &Token{Index: p.argi, Type: TokenShort}
- }
- short := arg[1:2]
- flag, ok := p.flags.short[short]
- // Not a known short flag, we'll just return it anyway.
- if !ok {
- } else if fb, ok := flag.value.(boolFlag); ok && fb.IsBoolFlag() {
- // Bool short flag.
- } else {
- // Short flag with combined argument: -fARG
- token := &Token{p.argi, TokenShort, short}
- if len(arg) > 2 {
- p.Push(&Token{p.argi, TokenArg, arg[2:]})
- }
- return token
- }
-
- if len(arg) > 2 {
- p.args = append([]string{"-" + arg[2:]}, p.args...)
- }
- return &Token{p.argi, TokenShort, short}
- } else if strings.HasPrefix(arg, "@") {
- expanded, err := ExpandArgsFromFile(arg[1:])
- if err != nil {
- return &Token{p.argi, TokenError, err.Error()}
- }
- if p.argi >= len(p.args) {
- p.args = append(p.args[:p.argi-1], expanded...)
- } else {
- p.args = append(p.args[:p.argi-1], append(expanded, p.args[p.argi+1:]...)...)
- }
- return p.Next()
- }
-
- return &Token{p.argi, TokenArg, arg}
-}
-
-func (p *ParseContext) Peek() *Token {
- if len(p.peek) == 0 {
- return p.Push(p.Next())
- }
- return p.peek[len(p.peek)-1]
-}
-
-func (p *ParseContext) Push(token *Token) *Token {
- p.peek = append(p.peek, token)
- return token
-}
-
-func (p *ParseContext) pop() *Token {
- end := len(p.peek) - 1
- token := p.peek[end]
- p.peek = p.peek[0:end]
- return token
-}
-
-func (p *ParseContext) String() string {
- return p.SelectedCommand.FullCommand()
-}
-
-func (p *ParseContext) matchedFlag(flag *FlagClause, value string) {
- p.Elements = append(p.Elements, &ParseElement{Clause: flag, Value: &value})
-}
-
-func (p *ParseContext) matchedArg(arg *ArgClause, value string) {
- p.Elements = append(p.Elements, &ParseElement{Clause: arg, Value: &value})
-}
-
-func (p *ParseContext) matchedCmd(cmd *CmdClause) {
- p.Elements = append(p.Elements, &ParseElement{Clause: cmd})
- p.mergeFlags(cmd.flagGroup)
- p.mergeArgs(cmd.argGroup)
- p.SelectedCommand = cmd
-}
-
-// Expand arguments from a file. Lines starting with # will be treated as comments.
-func ExpandArgsFromFile(filename string) (out []string, err error) {
- r, err := os.Open(filename)
- if err != nil {
- return nil, err
- }
- defer r.Close()
- scanner := bufio.NewScanner(r)
- for scanner.Scan() {
- line := scanner.Text()
- if strings.HasPrefix(line, "#") {
- continue
- }
- out = append(out, line)
- }
- err = scanner.Err()
- return
-}
-
-func parse(context *ParseContext, app *Application) (err error) {
- context.mergeFlags(app.flagGroup)
- context.mergeArgs(app.argGroup)
-
- cmds := app.cmdGroup
- ignoreDefault := context.ignoreDefault
-
-loop:
- for !context.EOL() {
- token := context.Peek()
-
- switch token.Type {
- case TokenLong, TokenShort:
- if flag, err := context.flags.parse(context); err != nil {
- if !ignoreDefault {
- if cmd := cmds.defaultSubcommand(); cmd != nil {
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- break
- }
- }
- return err
- } else if flag == HelpFlag {
- ignoreDefault = true
- }
-
- case TokenArg:
- if cmds.have() {
- selectedDefault := false
- cmd, ok := cmds.commands[token.String()]
- if !ok {
- if !ignoreDefault {
- if cmd = cmds.defaultSubcommand(); cmd != nil {
- selectedDefault = true
- }
- }
- if cmd == nil {
- return fmt.Errorf("expected command but got %q", token)
- }
- }
- if cmd == HelpCommand {
- ignoreDefault = true
- }
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- if !selectedDefault {
- context.Next()
- }
- } else if context.arguments.have() {
- if app.noInterspersed {
- // no more flags
- context.argsOnly = true
- }
- arg := context.nextArg()
- if arg == nil {
- break loop
- }
- context.matchedArg(arg, token.String())
- context.Next()
- } else {
- break loop
- }
-
- case TokenEOL:
- break loop
- }
- }
-
- // Move to innermost default command.
- for !ignoreDefault {
- if cmd := cmds.defaultSubcommand(); cmd != nil {
- context.matchedCmd(cmd)
- cmds = cmd.cmdGroup
- } else {
- break
- }
- }
-
- if !context.EOL() {
- return fmt.Errorf("unexpected %s", context.Peek())
- }
-
- // Set defaults for all remaining args.
- for arg := context.nextArg(); arg != nil && !arg.consumesRemainder(); arg = context.nextArg() {
- for _, defaultValue := range arg.defaultValues {
- if err := arg.value.Set(defaultValue); err != nil {
- return fmt.Errorf("invalid default value '%s' for argument '%s'", defaultValue, arg.name)
- }
- }
- }
-
- return
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parsers.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parsers.go
deleted file mode 100644
index d9ad57e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/parsers.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package kingpin
-
-import (
- "net"
- "net/url"
- "os"
- "time"
-
- "github.com/alecthomas/units"
-)
-
-type Settings interface {
- SetValue(value Value)
-}
-
-type parserMixin struct {
- value Value
- required bool
-}
-
-func (p *parserMixin) SetValue(value Value) {
- p.value = value
-}
-
-// StringMap provides key=value parsing into a map.
-func (p *parserMixin) StringMap() (target *map[string]string) {
- target = &(map[string]string{})
- p.StringMapVar(target)
- return
-}
-
-// Duration sets the parser to a time.Duration parser.
-func (p *parserMixin) Duration() (target *time.Duration) {
- target = new(time.Duration)
- p.DurationVar(target)
- return
-}
-
-// Bytes parses numeric byte units. eg. 1.5KB
-func (p *parserMixin) Bytes() (target *units.Base2Bytes) {
- target = new(units.Base2Bytes)
- p.BytesVar(target)
- return
-}
-
-// IP sets the parser to a net.IP parser.
-func (p *parserMixin) IP() (target *net.IP) {
- target = new(net.IP)
- p.IPVar(target)
- return
-}
-
-// TCP (host:port) address.
-func (p *parserMixin) TCP() (target **net.TCPAddr) {
- target = new(*net.TCPAddr)
- p.TCPVar(target)
- return
-}
-
-// TCPVar (host:port) address.
-func (p *parserMixin) TCPVar(target **net.TCPAddr) {
- p.SetValue(newTCPAddrValue(target))
-}
-
-// ExistingFile sets the parser to one that requires and returns an existing file.
-func (p *parserMixin) ExistingFile() (target *string) {
- target = new(string)
- p.ExistingFileVar(target)
- return
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingDir() (target *string) {
- target = new(string)
- p.ExistingDirVar(target)
- return
-}
-
-// ExistingFileOrDir sets the parser to one that requires and returns an existing file OR directory.
-func (p *parserMixin) ExistingFileOrDir() (target *string) {
- target = new(string)
- p.ExistingFileOrDirVar(target)
- return
-}
-
-// File returns an os.File against an existing file.
-func (p *parserMixin) File() (target **os.File) {
- target = new(*os.File)
- p.FileVar(target)
- return
-}
-
-// File attempts to open a File with os.OpenFile(flag, perm).
-func (p *parserMixin) OpenFile(flag int, perm os.FileMode) (target **os.File) {
- target = new(*os.File)
- p.OpenFileVar(target, flag, perm)
- return
-}
-
-// URL provides a valid, parsed url.URL.
-func (p *parserMixin) URL() (target **url.URL) {
- target = new(*url.URL)
- p.URLVar(target)
- return
-}
-
-// StringMap provides key=value parsing into a map.
-func (p *parserMixin) StringMapVar(target *map[string]string) {
- p.SetValue(newStringMapValue(target))
-}
-
-// Float sets the parser to a float64 parser.
-func (p *parserMixin) Float() (target *float64) {
- return p.Float64()
-}
-
-// Float sets the parser to a float64 parser.
-func (p *parserMixin) FloatVar(target *float64) {
- p.Float64Var(target)
-}
-
-// Duration sets the parser to a time.Duration parser.
-func (p *parserMixin) DurationVar(target *time.Duration) {
- p.SetValue(newDurationValue(target))
-}
-
-// BytesVar parses numeric byte units. eg. 1.5KB
-func (p *parserMixin) BytesVar(target *units.Base2Bytes) {
- p.SetValue(newBytesValue(target))
-}
-
-// IP sets the parser to a net.IP parser.
-func (p *parserMixin) IPVar(target *net.IP) {
- p.SetValue(newIPValue(target))
-}
-
-// ExistingFile sets the parser to one that requires and returns an existing file.
-func (p *parserMixin) ExistingFileVar(target *string) {
- p.SetValue(newExistingFileValue(target))
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingDirVar(target *string) {
- p.SetValue(newExistingDirValue(target))
-}
-
-// ExistingDir sets the parser to one that requires and returns an existing directory.
-func (p *parserMixin) ExistingFileOrDirVar(target *string) {
- p.SetValue(newExistingFileOrDirValue(target))
-}
-
-// FileVar opens an existing file.
-func (p *parserMixin) FileVar(target **os.File) {
- p.SetValue(newFileValue(target, os.O_RDONLY, 0))
-}
-
-// OpenFileVar calls os.OpenFile(flag, perm)
-func (p *parserMixin) OpenFileVar(target **os.File, flag int, perm os.FileMode) {
- p.SetValue(newFileValue(target, flag, perm))
-}
-
-// URL provides a valid, parsed url.URL.
-func (p *parserMixin) URLVar(target **url.URL) {
- p.SetValue(newURLValue(target))
-}
-
-// URLList provides a parsed list of url.URL values.
-func (p *parserMixin) URLList() (target *[]*url.URL) {
- target = new([]*url.URL)
- p.URLListVar(target)
- return
-}
-
-// URLListVar provides a parsed list of url.URL values.
-func (p *parserMixin) URLListVar(target *[]*url.URL) {
- p.SetValue(newURLListValue(target))
-}
-
-// Enum allows a value from a set of options.
-func (p *parserMixin) Enum(options ...string) (target *string) {
- target = new(string)
- p.EnumVar(target, options...)
- return
-}
-
-// EnumVar allows a value from a set of options.
-func (p *parserMixin) EnumVar(target *string, options ...string) {
- p.SetValue(newEnumFlag(target, options...))
-}
-
-// Enums allows a set of values from a set of options.
-func (p *parserMixin) Enums(options ...string) (target *[]string) {
- target = new([]string)
- p.EnumsVar(target, options...)
- return
-}
-
-// EnumVar allows a value from a set of options.
-func (p *parserMixin) EnumsVar(target *[]string, options ...string) {
- p.SetValue(newEnumsFlag(target, options...))
-}
-
-// A Counter increments a number each time it is encountered.
-func (p *parserMixin) Counter() (target *int) {
- target = new(int)
- p.CounterVar(target)
- return
-}
-
-func (p *parserMixin) CounterVar(target *int) {
- p.SetValue(newCounterValue(target))
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/templates.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/templates.go
deleted file mode 100644
index 536f9e5..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/templates.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package kingpin
-
-// Default usage template.
-var DefaultUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-{{if .Context.SelectedCommand.Commands}}\
-Subcommands:
-{{template "FormatCommands" .Context.SelectedCommand}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
-
-// Usage template where command's optional flags are listed separately
-var SeparateOptionalFlagsUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-
-{{if .Context.Flags|RequiredFlags}}\
-Required flags:
-{{.Context.Flags|RequiredFlags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Flags|OptionalFlags}}\
-Optional flags:
-{{.Context.Flags|OptionalFlags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-Subcommands:
-{{if .Context.SelectedCommand.Commands}}\
-{{template "FormatCommands" .Context.SelectedCommand}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
-
-// Usage template with compactly formatted commands.
-var CompactUsageTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommandList"}}\
-{{range .}}\
-{{if not .Hidden}}\
-{{.Depth|Indent}}{{.Name}}{{if .Default}}*{{end}}{{template "FormatCommand" .}}
-{{end}}\
-{{template "FormatCommandList" .Commands}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-{{if .Context.SelectedCommand}}\
-usage: {{.App.Name}} {{.Context.SelectedCommand}}{{template "FormatUsage" .Context.SelectedCommand}}
-{{else}}\
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{end}}\
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.SelectedCommand}}\
-{{if .Context.SelectedCommand.Commands}}\
-Commands:
- {{.Context.SelectedCommand}}
-{{template "FormatCommandList" .Context.SelectedCommand.Commands}}
-{{end}}\
-{{else if .App.Commands}}\
-Commands:
-{{template "FormatCommandList" .App.Commands}}
-{{end}}\
-`
-
-var ManPageTemplate = `{{define "FormatFlags"}}\
-{{range .Flags}}\
-{{if not .Hidden}}\
-.TP
-\fB{{if .Short}}-{{.Short|Char}}, {{end}}--{{.Name}}{{if not .IsBoolFlag}}={{.FormatPlaceHolder}}{{end}}\\fR
-{{.Help}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}{{if .Default}}*{{end}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
-.SS
-\fB{{.FullCommand}}{{template "FormatCommand" .}}\\fR
-.PP
-{{.Help}}
-{{template "FormatFlags" .}}\
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}\\fR
-{{end}}\
-
-.TH {{.App.Name}} 1 {{.App.Version}} "{{.App.Author}}"
-.SH "NAME"
-{{.App.Name}}
-.SH "SYNOPSIS"
-.TP
-\fB{{.App.Name}}{{template "FormatUsage" .App}}
-.SH "DESCRIPTION"
-{{.App.Help}}
-.SH "OPTIONS"
-{{template "FormatFlags" .App}}\
-{{if .App.Commands}}\
-.SH "COMMANDS"
-{{template "FormatCommands" .App}}\
-{{end}}\
-`
-
-// Default usage template.
-var LongHelpTemplate = `{{define "FormatCommand"}}\
-{{if .FlagSummary}} {{.FlagSummary}}{{end}}\
-{{range .Args}} {{if not .Required}}[{{end}}<{{.Name}}>{{if .Value|IsCumulative}}...{{end}}{{if not .Required}}]{{end}}{{end}}\
-{{end}}\
-
-{{define "FormatCommands"}}\
-{{range .FlattenedCommands}}\
-{{if not .Hidden}}\
- {{.FullCommand}}{{template "FormatCommand" .}}
-{{.Help|Wrap 4}}
-{{with .Flags|FlagsToTwoColumns}}{{FormatTwoColumnsWithIndent . 4 2}}{{end}}
-{{end}}\
-{{end}}\
-{{end}}\
-
-{{define "FormatUsage"}}\
-{{template "FormatCommand" .}}{{if .Commands}} [ ...]{{end}}
-{{if .Help}}
-{{.Help|Wrap 0}}\
-{{end}}\
-
-{{end}}\
-
-usage: {{.App.Name}}{{template "FormatUsage" .App}}
-{{if .Context.Flags}}\
-Flags:
-{{.Context.Flags|FlagsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .Context.Args}}\
-Args:
-{{.Context.Args|ArgsToTwoColumns|FormatTwoColumns}}
-{{end}}\
-{{if .App.Commands}}\
-Commands:
-{{template "FormatCommands" .App}}
-{{end}}\
-`
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/usage.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/usage.go
deleted file mode 100644
index 6c9124e..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/usage.go
+++ /dev/null
@@ -1,211 +0,0 @@
-package kingpin
-
-import (
- "bytes"
- "fmt"
- "go/doc"
- "io"
- "strings"
-
- "github.com/alecthomas/template"
-)
-
-var (
- preIndent = " "
-)
-
-func formatTwoColumns(w io.Writer, indent, padding, width int, rows [][2]string) {
- // Find size of first column.
- s := 0
- for _, row := range rows {
- if c := len(row[0]); c > s && c < 30 {
- s = c
- }
- }
-
- indentStr := strings.Repeat(" ", indent)
- offsetStr := strings.Repeat(" ", s+padding)
-
- for _, row := range rows {
- buf := bytes.NewBuffer(nil)
- doc.ToText(buf, row[1], "", preIndent, width-s-padding-indent)
- lines := strings.Split(strings.TrimRight(buf.String(), "\n"), "\n")
- fmt.Fprintf(w, "%s%-*s%*s", indentStr, s, row[0], padding, "")
- if len(row[0]) >= 30 {
- fmt.Fprintf(w, "\n%s%s", indentStr, offsetStr)
- }
- fmt.Fprintf(w, "%s\n", lines[0])
- for _, line := range lines[1:] {
- fmt.Fprintf(w, "%s%s%s\n", indentStr, offsetStr, line)
- }
- }
-}
-
-// Usage writes application usage to w. It parses args to determine
-// appropriate help context, such as which command to show help for.
-func (a *Application) Usage(args []string) {
- context, err := a.parseContext(true, args)
- a.FatalIfError(err, "")
- if err := a.UsageForContextWithTemplate(context, 2, a.usageTemplate); err != nil {
- panic(err)
- }
-}
-
-func formatAppUsage(app *ApplicationModel) string {
- s := []string{app.Name}
- if len(app.Flags) > 0 {
- s = append(s, app.FlagSummary())
- }
- if len(app.Args) > 0 {
- s = append(s, app.ArgSummary())
- }
- return strings.Join(s, " ")
-}
-
-func formatCmdUsage(app *ApplicationModel, cmd *CmdModel) string {
- s := []string{app.Name, cmd.String()}
- if len(app.Flags) > 0 {
- s = append(s, app.FlagSummary())
- }
- if len(app.Args) > 0 {
- s = append(s, app.ArgSummary())
- }
- return strings.Join(s, " ")
-}
-
-func formatFlag(haveShort bool, flag *FlagModel) string {
- flagString := ""
- if flag.Short != 0 {
- flagString += fmt.Sprintf("-%c, --%s", flag.Short, flag.Name)
- } else {
- if haveShort {
- flagString += fmt.Sprintf(" --%s", flag.Name)
- } else {
- flagString += fmt.Sprintf("--%s", flag.Name)
- }
- }
- if !flag.IsBoolFlag() {
- flagString += fmt.Sprintf("=%s", flag.FormatPlaceHolder())
- }
- if v, ok := flag.Value.(repeatableFlag); ok && v.IsCumulative() {
- flagString += " ..."
- }
- return flagString
-}
-
-type templateParseContext struct {
- SelectedCommand *CmdModel
- *FlagGroupModel
- *ArgGroupModel
-}
-
-type templateContext struct {
- App *ApplicationModel
- Width int
- Context *templateParseContext
-}
-
-// UsageForContext displays usage information from a ParseContext (obtained from
-// Application.ParseContext() or Action(f) callbacks).
-func (a *Application) UsageForContext(context *ParseContext) error {
- return a.UsageForContextWithTemplate(context, 2, a.usageTemplate)
-}
-
-// UsageForContextWithTemplate is the base usage function. You generally don't need to use this.
-func (a *Application) UsageForContextWithTemplate(context *ParseContext, indent int, tmpl string) error {
- width := guessWidth(a.writer)
- funcs := template.FuncMap{
- "Indent": func(level int) string {
- return strings.Repeat(" ", level*indent)
- },
- "Wrap": func(indent int, s string) string {
- buf := bytes.NewBuffer(nil)
- indentText := strings.Repeat(" ", indent)
- doc.ToText(buf, s, indentText, indentText, width-indent)
- return buf.String()
- },
- "FormatFlag": formatFlag,
- "FlagsToTwoColumns": func(f []*FlagModel) [][2]string {
- rows := [][2]string{}
- haveShort := false
- for _, flag := range f {
- if flag.Short != 0 {
- haveShort = true
- break
- }
- }
- for _, flag := range f {
- if !flag.Hidden {
- rows = append(rows, [2]string{formatFlag(haveShort, flag), flag.Help})
- }
- }
- return rows
- },
- "RequiredFlags": func(f []*FlagModel) []*FlagModel {
- requiredFlags := []*FlagModel{}
- for _, flag := range f {
- if flag.Required == true {
- requiredFlags = append(requiredFlags, flag)
- }
- }
- return requiredFlags
- },
- "OptionalFlags": func(f []*FlagModel) []*FlagModel {
- optionalFlags := []*FlagModel{}
- for _, flag := range f {
- if flag.Required == false {
- optionalFlags = append(optionalFlags, flag)
- }
- }
- return optionalFlags
- },
- "ArgsToTwoColumns": func(a []*ArgModel) [][2]string {
- rows := [][2]string{}
- for _, arg := range a {
- s := "<" + arg.Name + ">"
- if !arg.Required {
- s = "[" + s + "]"
- }
- rows = append(rows, [2]string{s, arg.Help})
- }
- return rows
- },
- "FormatTwoColumns": func(rows [][2]string) string {
- buf := bytes.NewBuffer(nil)
- formatTwoColumns(buf, indent, indent, width, rows)
- return buf.String()
- },
- "FormatTwoColumnsWithIndent": func(rows [][2]string, indent, padding int) string {
- buf := bytes.NewBuffer(nil)
- formatTwoColumns(buf, indent, padding, width, rows)
- return buf.String()
- },
- "FormatAppUsage": formatAppUsage,
- "FormatCommandUsage": formatCmdUsage,
- "IsCumulative": func(value Value) bool {
- r, ok := value.(remainderArg)
- return ok && r.IsCumulative()
- },
- "Char": func(c rune) string {
- return string(c)
- },
- }
- t, err := template.New("usage").Funcs(funcs).Parse(tmpl)
- if err != nil {
- return err
- }
- var selectedCommand *CmdModel
- if context.SelectedCommand != nil {
- selectedCommand = context.SelectedCommand.Model()
- }
- ctx := templateContext{
- App: a.Model(),
- Width: width,
- Context: &templateParseContext{
- SelectedCommand: selectedCommand,
- FlagGroupModel: context.flags.Model(),
- ArgGroupModel: context.arguments.Model(),
- },
- }
- return t.Execute(a.writer, ctx)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.go
deleted file mode 100644
index b986f12..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.go
+++ /dev/null
@@ -1,466 +0,0 @@
-package kingpin
-
-//go:generate go run ./cmd/genvalues/main.go
-
-import (
- "fmt"
- "net"
- "net/url"
- "os"
- "reflect"
- "regexp"
- "strings"
- "time"
-
- "github.com/alecthomas/units"
-)
-
-// NOTE: Most of the base type values were lifted from:
-// http://golang.org/src/pkg/flag/flag.go?s=20146:20222
-
-// Value is the interface to the dynamic value stored in a flag.
-// (The default value is represented as a string.)
-//
-// If a Value has an IsBoolFlag() bool method returning true, the command-line
-// parser makes --name equivalent to -name=true rather than using the next
-// command-line argument, and adds a --no-name counterpart for negating the
-// flag.
-type Value interface {
- String() string
- Set(string) error
-}
-
-// Getter is an interface that allows the contents of a Value to be retrieved.
-// It wraps the Value interface, rather than being part of it, because it
-// appeared after Go 1 and its compatibility rules. All Value types provided
-// by this package satisfy the Getter interface.
-type Getter interface {
- Value
- Get() interface{}
-}
-
-// Optional interface to indicate boolean flags that don't accept a value, and
-// implicitly have a --no- negation counterpart.
-type boolFlag interface {
- Value
- IsBoolFlag() bool
-}
-
-// Optional interface for arguments that cumulatively consume all remaining
-// input.
-type remainderArg interface {
- Value
- IsCumulative() bool
-}
-
-// Optional interface for flags that can be repeated.
-type repeatableFlag interface {
- Value
- IsCumulative() bool
-}
-
-type accumulator struct {
- element func(value interface{}) Value
- typ reflect.Type
- slice reflect.Value
-}
-
-// Use reflection to accumulate values into a slice.
-//
-// target := []string{}
-// newAccumulator(&target, func (value interface{}) Value {
-// return newStringValue(value.(*string))
-// })
-func newAccumulator(slice interface{}, element func(value interface{}) Value) *accumulator {
- typ := reflect.TypeOf(slice)
- if typ.Kind() != reflect.Ptr || typ.Elem().Kind() != reflect.Slice {
- panic("expected a pointer to a slice")
- }
- return &accumulator{
- element: element,
- typ: typ.Elem().Elem(),
- slice: reflect.ValueOf(slice),
- }
-}
-
-func (a *accumulator) String() string {
- out := []string{}
- s := a.slice.Elem()
- for i := 0; i < s.Len(); i++ {
- out = append(out, a.element(s.Index(i).Addr().Interface()).String())
- }
- return strings.Join(out, ",")
-}
-
-func (a *accumulator) Set(value string) error {
- e := reflect.New(a.typ)
- if err := a.element(e.Interface()).Set(value); err != nil {
- return err
- }
- slice := reflect.Append(a.slice.Elem(), e.Elem())
- a.slice.Elem().Set(slice)
- return nil
-}
-
-func (a *accumulator) Get() interface{} {
- return a.slice.Interface()
-}
-
-func (a *accumulator) IsCumulative() bool {
- return true
-}
-
-func (b *boolValue) IsBoolFlag() bool { return true }
-
-// -- time.Duration Value
-type durationValue time.Duration
-
-func newDurationValue(p *time.Duration) *durationValue {
- return (*durationValue)(p)
-}
-
-func (d *durationValue) Set(s string) error {
- v, err := time.ParseDuration(s)
- *d = durationValue(v)
- return err
-}
-
-func (d *durationValue) Get() interface{} { return time.Duration(*d) }
-
-func (d *durationValue) String() string { return (*time.Duration)(d).String() }
-
-// -- map[string]string Value
-type stringMapValue map[string]string
-
-func newStringMapValue(p *map[string]string) *stringMapValue {
- return (*stringMapValue)(p)
-}
-
-var stringMapRegex = regexp.MustCompile("[:=]")
-
-func (s *stringMapValue) Set(value string) error {
- parts := stringMapRegex.Split(value, 2)
- if len(parts) != 2 {
- return fmt.Errorf("expected KEY=VALUE got '%s'", value)
- }
- (*s)[parts[0]] = parts[1]
- return nil
-}
-
-func (s *stringMapValue) Get() interface{} {
- return (map[string]string)(*s)
-}
-
-func (s *stringMapValue) String() string {
- return fmt.Sprintf("%s", map[string]string(*s))
-}
-
-func (s *stringMapValue) IsCumulative() bool {
- return true
-}
-
-// -- net.IP Value
-type ipValue net.IP
-
-func newIPValue(p *net.IP) *ipValue {
- return (*ipValue)(p)
-}
-
-func (i *ipValue) Set(value string) error {
- if ip := net.ParseIP(value); ip == nil {
- return fmt.Errorf("'%s' is not an IP address", value)
- } else {
- *i = *(*ipValue)(&ip)
- return nil
- }
-}
-
-func (i *ipValue) Get() interface{} {
- return (net.IP)(*i)
-}
-
-func (i *ipValue) String() string {
- return (*net.IP)(i).String()
-}
-
-// -- *net.TCPAddr Value
-type tcpAddrValue struct {
- addr **net.TCPAddr
-}
-
-func newTCPAddrValue(p **net.TCPAddr) *tcpAddrValue {
- return &tcpAddrValue{p}
-}
-
-func (i *tcpAddrValue) Set(value string) error {
- if addr, err := net.ResolveTCPAddr("tcp", value); err != nil {
- return fmt.Errorf("'%s' is not a valid TCP address: %s", value, err)
- } else {
- *i.addr = addr
- return nil
- }
-}
-
-func (t *tcpAddrValue) Get() interface{} {
- return (*net.TCPAddr)(*t.addr)
-}
-
-func (i *tcpAddrValue) String() string {
- return (*i.addr).String()
-}
-
-// -- existingFile Value
-
-type fileStatValue struct {
- path *string
- predicate func(os.FileInfo) error
-}
-
-func newFileStatValue(p *string, predicate func(os.FileInfo) error) *fileStatValue {
- return &fileStatValue{
- path: p,
- predicate: predicate,
- }
-}
-
-func (e *fileStatValue) Set(value string) error {
- if s, err := os.Stat(value); os.IsNotExist(err) {
- return fmt.Errorf("path '%s' does not exist", value)
- } else if err != nil {
- return err
- } else if err := e.predicate(s); err != nil {
- return err
- }
- *e.path = value
- return nil
-}
-
-func (f *fileStatValue) Get() interface{} {
- return (string)(*f.path)
-}
-
-func (e *fileStatValue) String() string {
- return *e.path
-}
-
-// -- os.File value
-
-type fileValue struct {
- f **os.File
- flag int
- perm os.FileMode
-}
-
-func newFileValue(p **os.File, flag int, perm os.FileMode) *fileValue {
- return &fileValue{p, flag, perm}
-}
-
-func (f *fileValue) Set(value string) error {
- if fd, err := os.OpenFile(value, f.flag, f.perm); err != nil {
- return err
- } else {
- *f.f = fd
- return nil
- }
-}
-
-func (f *fileValue) Get() interface{} {
- return (*os.File)(*f.f)
-}
-
-func (f *fileValue) String() string {
- if *f.f == nil {
- return ""
- }
- return (*f.f).Name()
-}
-
-// -- url.URL Value
-type urlValue struct {
- u **url.URL
-}
-
-func newURLValue(p **url.URL) *urlValue {
- return &urlValue{p}
-}
-
-func (u *urlValue) Set(value string) error {
- if url, err := url.Parse(value); err != nil {
- return fmt.Errorf("invalid URL: %s", err)
- } else {
- *u.u = url
- return nil
- }
-}
-
-func (u *urlValue) Get() interface{} {
- return (*url.URL)(*u.u)
-}
-
-func (u *urlValue) String() string {
- if *u.u == nil {
- return ""
- }
- return (*u.u).String()
-}
-
-// -- []*url.URL Value
-type urlListValue []*url.URL
-
-func newURLListValue(p *[]*url.URL) *urlListValue {
- return (*urlListValue)(p)
-}
-
-func (u *urlListValue) Set(value string) error {
- if url, err := url.Parse(value); err != nil {
- return fmt.Errorf("invalid URL: %s", err)
- } else {
- *u = append(*u, url)
- return nil
- }
-}
-
-func (u *urlListValue) Get() interface{} {
- return ([]*url.URL)(*u)
-}
-
-func (u *urlListValue) String() string {
- out := []string{}
- for _, url := range *u {
- out = append(out, url.String())
- }
- return strings.Join(out, ",")
-}
-
-// A flag whose value must be in a set of options.
-type enumValue struct {
- value *string
- options []string
-}
-
-func newEnumFlag(target *string, options ...string) *enumValue {
- return &enumValue{
- value: target,
- options: options,
- }
-}
-
-func (a *enumValue) String() string {
- return *a.value
-}
-
-func (a *enumValue) Set(value string) error {
- for _, v := range a.options {
- if v == value {
- *a.value = value
- return nil
- }
- }
- return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(a.options, ","), value)
-}
-
-func (e *enumValue) Get() interface{} {
- return (string)(*e.value)
-}
-
-// -- []string Enum Value
-type enumsValue struct {
- value *[]string
- options []string
-}
-
-func newEnumsFlag(target *[]string, options ...string) *enumsValue {
- return &enumsValue{
- value: target,
- options: options,
- }
-}
-
-func (s *enumsValue) Set(value string) error {
- for _, v := range s.options {
- if v == value {
- *s.value = append(*s.value, value)
- return nil
- }
- }
- return fmt.Errorf("enum value must be one of %s, got '%s'", strings.Join(s.options, ","), value)
-}
-
-func (e *enumsValue) Get() interface{} {
- return ([]string)(*e.value)
-}
-
-func (s *enumsValue) String() string {
- return strings.Join(*s.value, ",")
-}
-
-func (s *enumsValue) IsCumulative() bool {
- return true
-}
-
-// -- units.Base2Bytes Value
-type bytesValue units.Base2Bytes
-
-func newBytesValue(p *units.Base2Bytes) *bytesValue {
- return (*bytesValue)(p)
-}
-
-func (d *bytesValue) Set(s string) error {
- v, err := units.ParseBase2Bytes(s)
- *d = bytesValue(v)
- return err
-}
-
-func (d *bytesValue) Get() interface{} { return units.Base2Bytes(*d) }
-
-func (d *bytesValue) String() string { return (*units.Base2Bytes)(d).String() }
-
-func newExistingFileValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error {
- if s.IsDir() {
- return fmt.Errorf("'%s' is a directory", s.Name())
- }
- return nil
- })
-}
-
-func newExistingDirValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error {
- if !s.IsDir() {
- return fmt.Errorf("'%s' is a file", s.Name())
- }
- return nil
- })
-}
-
-func newExistingFileOrDirValue(target *string) *fileStatValue {
- return newFileStatValue(target, func(s os.FileInfo) error { return nil })
-}
-
-type counterValue int
-
-func newCounterValue(n *int) *counterValue {
- return (*counterValue)(n)
-}
-
-func (c *counterValue) Set(s string) error {
- *c++
- return nil
-}
-
-func (c *counterValue) Get() interface{} { return (int)(*c) }
-func (c *counterValue) IsBoolFlag() bool { return true }
-func (c *counterValue) String() string { return fmt.Sprintf("%d", *c) }
-func (c *counterValue) IsCumulative() bool { return true }
-
-func resolveHost(value string) (net.IP, error) {
- if ip := net.ParseIP(value); ip != nil {
- return ip, nil
- } else {
- if addr, err := net.ResolveIPAddr("ip", value); err != nil {
- return nil, err
- } else {
- return addr.IP, nil
- }
- }
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.json b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.json
deleted file mode 100644
index 23c6744..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values.json
+++ /dev/null
@@ -1,25 +0,0 @@
-[
- {"type": "bool", "parser": "strconv.ParseBool(s)"},
- {"type": "string", "parser": "s, error(nil)", "format": "string(*f.v)", "plural": "Strings"},
- {"type": "uint", "parser": "strconv.ParseUint(s, 0, 64)", "plural": "Uints"},
- {"type": "uint8", "parser": "strconv.ParseUint(s, 0, 8)"},
- {"type": "uint16", "parser": "strconv.ParseUint(s, 0, 16)"},
- {"type": "uint32", "parser": "strconv.ParseUint(s, 0, 32)"},
- {"type": "uint64", "parser": "strconv.ParseUint(s, 0, 64)"},
- {"type": "int", "parser": "strconv.ParseFloat(s, 64)", "plural": "Ints"},
- {"type": "int8", "parser": "strconv.ParseInt(s, 0, 8)"},
- {"type": "int16", "parser": "strconv.ParseInt(s, 0, 16)"},
- {"type": "int32", "parser": "strconv.ParseInt(s, 0, 32)"},
- {"type": "int64", "parser": "strconv.ParseInt(s, 0, 64)"},
- {"type": "float64", "parser": "strconv.ParseFloat(s, 64)"},
- {"type": "float32", "parser": "strconv.ParseFloat(s, 32)"},
- {"name": "Duration", "type": "time.Duration", "no_value_parser": true},
- {"name": "IP", "type": "net.IP", "no_value_parser": true},
- {"name": "TCPAddr", "Type": "*net.TCPAddr", "plural": "TCPList", "no_value_parser": true},
- {"name": "ExistingFile", "Type": "string", "plural": "ExistingFiles", "no_value_parser": true},
- {"name": "ExistingDir", "Type": "string", "plural": "ExistingDirs", "no_value_parser": true},
- {"name": "ExistingFileOrDir", "Type": "string", "plural": "ExistingFilesOrDirs", "no_value_parser": true},
- {"name": "Regexp", "Type": "*regexp.Regexp", "parser": "regexp.Compile(s)"},
- {"name": "ResolvedIP", "Type": "net.IP", "parser": "resolveHost(s)", "help": "Resolve a hostname or IP to an IP."},
- {"name": "HexBytes", "Type": "[]byte", "parser": "hex.DecodeString(s)", "help": "Bytes as a hex string."}
-]
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values_generated.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values_generated.go
deleted file mode 100644
index 602cfc9..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/alecthomas/kingpin.v2/values_generated.go
+++ /dev/null
@@ -1,821 +0,0 @@
-package kingpin
-
-import (
- "encoding/hex"
- "fmt"
- "net"
- "regexp"
- "strconv"
- "time"
-)
-
-// This file is autogenerated by "go generate .". Do not modify.
-
-// -- bool Value
-type boolValue struct{ v *bool }
-
-func newBoolValue(p *bool) *boolValue {
- return &boolValue{p}
-}
-
-func (f *boolValue) Set(s string) error {
- v, err := strconv.ParseBool(s)
- if err == nil {
- *f.v = (bool)(v)
- }
- return err
-}
-
-func (f *boolValue) Get() interface{} { return (bool)(*f.v) }
-
-func (f *boolValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Bool parses the next command-line value as bool.
-func (p *parserMixin) Bool() (target *bool) {
- target = new(bool)
- p.BoolVar(target)
- return
-}
-
-func (p *parserMixin) BoolVar(target *bool) {
- p.SetValue(newBoolValue(target))
-}
-
-// BoolList accumulates bool values into a slice.
-func (p *parserMixin) BoolList() (target *[]bool) {
- target = new([]bool)
- p.BoolListVar(target)
- return
-}
-
-func (p *parserMixin) BoolListVar(target *[]bool) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newBoolValue(v.(*bool))
- }))
-}
-
-// -- string Value
-type stringValue struct{ v *string }
-
-func newStringValue(p *string) *stringValue {
- return &stringValue{p}
-}
-
-func (f *stringValue) Set(s string) error {
- v, err := s, error(nil)
- if err == nil {
- *f.v = (string)(v)
- }
- return err
-}
-
-func (f *stringValue) Get() interface{} { return (string)(*f.v) }
-
-func (f *stringValue) String() string { return string(*f.v) }
-
-// String parses the next command-line value as string.
-func (p *parserMixin) String() (target *string) {
- target = new(string)
- p.StringVar(target)
- return
-}
-
-func (p *parserMixin) StringVar(target *string) {
- p.SetValue(newStringValue(target))
-}
-
-// Strings accumulates string values into a slice.
-func (p *parserMixin) Strings() (target *[]string) {
- target = new([]string)
- p.StringsVar(target)
- return
-}
-
-func (p *parserMixin) StringsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newStringValue(v.(*string))
- }))
-}
-
-// -- uint Value
-type uintValue struct{ v *uint }
-
-func newUintValue(p *uint) *uintValue {
- return &uintValue{p}
-}
-
-func (f *uintValue) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 64)
- if err == nil {
- *f.v = (uint)(v)
- }
- return err
-}
-
-func (f *uintValue) Get() interface{} { return (uint)(*f.v) }
-
-func (f *uintValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Uint parses the next command-line value as uint.
-func (p *parserMixin) Uint() (target *uint) {
- target = new(uint)
- p.UintVar(target)
- return
-}
-
-func (p *parserMixin) UintVar(target *uint) {
- p.SetValue(newUintValue(target))
-}
-
-// Uints accumulates uint values into a slice.
-func (p *parserMixin) Uints() (target *[]uint) {
- target = new([]uint)
- p.UintsVar(target)
- return
-}
-
-func (p *parserMixin) UintsVar(target *[]uint) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUintValue(v.(*uint))
- }))
-}
-
-// -- uint8 Value
-type uint8Value struct{ v *uint8 }
-
-func newUint8Value(p *uint8) *uint8Value {
- return &uint8Value{p}
-}
-
-func (f *uint8Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 8)
- if err == nil {
- *f.v = (uint8)(v)
- }
- return err
-}
-
-func (f *uint8Value) Get() interface{} { return (uint8)(*f.v) }
-
-func (f *uint8Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Uint8 parses the next command-line value as uint8.
-func (p *parserMixin) Uint8() (target *uint8) {
- target = new(uint8)
- p.Uint8Var(target)
- return
-}
-
-func (p *parserMixin) Uint8Var(target *uint8) {
- p.SetValue(newUint8Value(target))
-}
-
-// Uint8List accumulates uint8 values into a slice.
-func (p *parserMixin) Uint8List() (target *[]uint8) {
- target = new([]uint8)
- p.Uint8ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint8ListVar(target *[]uint8) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint8Value(v.(*uint8))
- }))
-}
-
-// -- uint16 Value
-type uint16Value struct{ v *uint16 }
-
-func newUint16Value(p *uint16) *uint16Value {
- return &uint16Value{p}
-}
-
-func (f *uint16Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 16)
- if err == nil {
- *f.v = (uint16)(v)
- }
- return err
-}
-
-func (f *uint16Value) Get() interface{} { return (uint16)(*f.v) }
-
-func (f *uint16Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Uint16 parses the next command-line value as uint16.
-func (p *parserMixin) Uint16() (target *uint16) {
- target = new(uint16)
- p.Uint16Var(target)
- return
-}
-
-func (p *parserMixin) Uint16Var(target *uint16) {
- p.SetValue(newUint16Value(target))
-}
-
-// Uint16List accumulates uint16 values into a slice.
-func (p *parserMixin) Uint16List() (target *[]uint16) {
- target = new([]uint16)
- p.Uint16ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint16ListVar(target *[]uint16) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint16Value(v.(*uint16))
- }))
-}
-
-// -- uint32 Value
-type uint32Value struct{ v *uint32 }
-
-func newUint32Value(p *uint32) *uint32Value {
- return &uint32Value{p}
-}
-
-func (f *uint32Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 32)
- if err == nil {
- *f.v = (uint32)(v)
- }
- return err
-}
-
-func (f *uint32Value) Get() interface{} { return (uint32)(*f.v) }
-
-func (f *uint32Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Uint32 parses the next command-line value as uint32.
-func (p *parserMixin) Uint32() (target *uint32) {
- target = new(uint32)
- p.Uint32Var(target)
- return
-}
-
-func (p *parserMixin) Uint32Var(target *uint32) {
- p.SetValue(newUint32Value(target))
-}
-
-// Uint32List accumulates uint32 values into a slice.
-func (p *parserMixin) Uint32List() (target *[]uint32) {
- target = new([]uint32)
- p.Uint32ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint32ListVar(target *[]uint32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint32Value(v.(*uint32))
- }))
-}
-
-// -- uint64 Value
-type uint64Value struct{ v *uint64 }
-
-func newUint64Value(p *uint64) *uint64Value {
- return &uint64Value{p}
-}
-
-func (f *uint64Value) Set(s string) error {
- v, err := strconv.ParseUint(s, 0, 64)
- if err == nil {
- *f.v = (uint64)(v)
- }
- return err
-}
-
-func (f *uint64Value) Get() interface{} { return (uint64)(*f.v) }
-
-func (f *uint64Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Uint64 parses the next command-line value as uint64.
-func (p *parserMixin) Uint64() (target *uint64) {
- target = new(uint64)
- p.Uint64Var(target)
- return
-}
-
-func (p *parserMixin) Uint64Var(target *uint64) {
- p.SetValue(newUint64Value(target))
-}
-
-// Uint64List accumulates uint64 values into a slice.
-func (p *parserMixin) Uint64List() (target *[]uint64) {
- target = new([]uint64)
- p.Uint64ListVar(target)
- return
-}
-
-func (p *parserMixin) Uint64ListVar(target *[]uint64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newUint64Value(v.(*uint64))
- }))
-}
-
-// -- int Value
-type intValue struct{ v *int }
-
-func newIntValue(p *int) *intValue {
- return &intValue{p}
-}
-
-func (f *intValue) Set(s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err == nil {
- *f.v = (int)(v)
- }
- return err
-}
-
-func (f *intValue) Get() interface{} { return (int)(*f.v) }
-
-func (f *intValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Int parses the next command-line value as int.
-func (p *parserMixin) Int() (target *int) {
- target = new(int)
- p.IntVar(target)
- return
-}
-
-func (p *parserMixin) IntVar(target *int) {
- p.SetValue(newIntValue(target))
-}
-
-// Ints accumulates int values into a slice.
-func (p *parserMixin) Ints() (target *[]int) {
- target = new([]int)
- p.IntsVar(target)
- return
-}
-
-func (p *parserMixin) IntsVar(target *[]int) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newIntValue(v.(*int))
- }))
-}
-
-// -- int8 Value
-type int8Value struct{ v *int8 }
-
-func newInt8Value(p *int8) *int8Value {
- return &int8Value{p}
-}
-
-func (f *int8Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 8)
- if err == nil {
- *f.v = (int8)(v)
- }
- return err
-}
-
-func (f *int8Value) Get() interface{} { return (int8)(*f.v) }
-
-func (f *int8Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Int8 parses the next command-line value as int8.
-func (p *parserMixin) Int8() (target *int8) {
- target = new(int8)
- p.Int8Var(target)
- return
-}
-
-func (p *parserMixin) Int8Var(target *int8) {
- p.SetValue(newInt8Value(target))
-}
-
-// Int8List accumulates int8 values into a slice.
-func (p *parserMixin) Int8List() (target *[]int8) {
- target = new([]int8)
- p.Int8ListVar(target)
- return
-}
-
-func (p *parserMixin) Int8ListVar(target *[]int8) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt8Value(v.(*int8))
- }))
-}
-
-// -- int16 Value
-type int16Value struct{ v *int16 }
-
-func newInt16Value(p *int16) *int16Value {
- return &int16Value{p}
-}
-
-func (f *int16Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 16)
- if err == nil {
- *f.v = (int16)(v)
- }
- return err
-}
-
-func (f *int16Value) Get() interface{} { return (int16)(*f.v) }
-
-func (f *int16Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Int16 parses the next command-line value as int16.
-func (p *parserMixin) Int16() (target *int16) {
- target = new(int16)
- p.Int16Var(target)
- return
-}
-
-func (p *parserMixin) Int16Var(target *int16) {
- p.SetValue(newInt16Value(target))
-}
-
-// Int16List accumulates int16 values into a slice.
-func (p *parserMixin) Int16List() (target *[]int16) {
- target = new([]int16)
- p.Int16ListVar(target)
- return
-}
-
-func (p *parserMixin) Int16ListVar(target *[]int16) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt16Value(v.(*int16))
- }))
-}
-
-// -- int32 Value
-type int32Value struct{ v *int32 }
-
-func newInt32Value(p *int32) *int32Value {
- return &int32Value{p}
-}
-
-func (f *int32Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 32)
- if err == nil {
- *f.v = (int32)(v)
- }
- return err
-}
-
-func (f *int32Value) Get() interface{} { return (int32)(*f.v) }
-
-func (f *int32Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Int32 parses the next command-line value as int32.
-func (p *parserMixin) Int32() (target *int32) {
- target = new(int32)
- p.Int32Var(target)
- return
-}
-
-func (p *parserMixin) Int32Var(target *int32) {
- p.SetValue(newInt32Value(target))
-}
-
-// Int32List accumulates int32 values into a slice.
-func (p *parserMixin) Int32List() (target *[]int32) {
- target = new([]int32)
- p.Int32ListVar(target)
- return
-}
-
-func (p *parserMixin) Int32ListVar(target *[]int32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt32Value(v.(*int32))
- }))
-}
-
-// -- int64 Value
-type int64Value struct{ v *int64 }
-
-func newInt64Value(p *int64) *int64Value {
- return &int64Value{p}
-}
-
-func (f *int64Value) Set(s string) error {
- v, err := strconv.ParseInt(s, 0, 64)
- if err == nil {
- *f.v = (int64)(v)
- }
- return err
-}
-
-func (f *int64Value) Get() interface{} { return (int64)(*f.v) }
-
-func (f *int64Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Int64 parses the next command-line value as int64.
-func (p *parserMixin) Int64() (target *int64) {
- target = new(int64)
- p.Int64Var(target)
- return
-}
-
-func (p *parserMixin) Int64Var(target *int64) {
- p.SetValue(newInt64Value(target))
-}
-
-// Int64List accumulates int64 values into a slice.
-func (p *parserMixin) Int64List() (target *[]int64) {
- target = new([]int64)
- p.Int64ListVar(target)
- return
-}
-
-func (p *parserMixin) Int64ListVar(target *[]int64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newInt64Value(v.(*int64))
- }))
-}
-
-// -- float64 Value
-type float64Value struct{ v *float64 }
-
-func newFloat64Value(p *float64) *float64Value {
- return &float64Value{p}
-}
-
-func (f *float64Value) Set(s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err == nil {
- *f.v = (float64)(v)
- }
- return err
-}
-
-func (f *float64Value) Get() interface{} { return (float64)(*f.v) }
-
-func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Float64 parses the next command-line value as float64.
-func (p *parserMixin) Float64() (target *float64) {
- target = new(float64)
- p.Float64Var(target)
- return
-}
-
-func (p *parserMixin) Float64Var(target *float64) {
- p.SetValue(newFloat64Value(target))
-}
-
-// Float64List accumulates float64 values into a slice.
-func (p *parserMixin) Float64List() (target *[]float64) {
- target = new([]float64)
- p.Float64ListVar(target)
- return
-}
-
-func (p *parserMixin) Float64ListVar(target *[]float64) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newFloat64Value(v.(*float64))
- }))
-}
-
-// -- float32 Value
-type float32Value struct{ v *float32 }
-
-func newFloat32Value(p *float32) *float32Value {
- return &float32Value{p}
-}
-
-func (f *float32Value) Set(s string) error {
- v, err := strconv.ParseFloat(s, 32)
- if err == nil {
- *f.v = (float32)(v)
- }
- return err
-}
-
-func (f *float32Value) Get() interface{} { return (float32)(*f.v) }
-
-func (f *float32Value) String() string { return fmt.Sprintf("%v", *f) }
-
-// Float32 parses the next command-line value as float32.
-func (p *parserMixin) Float32() (target *float32) {
- target = new(float32)
- p.Float32Var(target)
- return
-}
-
-func (p *parserMixin) Float32Var(target *float32) {
- p.SetValue(newFloat32Value(target))
-}
-
-// Float32List accumulates float32 values into a slice.
-func (p *parserMixin) Float32List() (target *[]float32) {
- target = new([]float32)
- p.Float32ListVar(target)
- return
-}
-
-func (p *parserMixin) Float32ListVar(target *[]float32) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newFloat32Value(v.(*float32))
- }))
-}
-
-// DurationList accumulates time.Duration values into a slice.
-func (p *parserMixin) DurationList() (target *[]time.Duration) {
- target = new([]time.Duration)
- p.DurationListVar(target)
- return
-}
-
-func (p *parserMixin) DurationListVar(target *[]time.Duration) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newDurationValue(v.(*time.Duration))
- }))
-}
-
-// IPList accumulates net.IP values into a slice.
-func (p *parserMixin) IPList() (target *[]net.IP) {
- target = new([]net.IP)
- p.IPListVar(target)
- return
-}
-
-func (p *parserMixin) IPListVar(target *[]net.IP) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newIPValue(v.(*net.IP))
- }))
-}
-
-// TCPList accumulates *net.TCPAddr values into a slice.
-func (p *parserMixin) TCPList() (target *[]*net.TCPAddr) {
- target = new([]*net.TCPAddr)
- p.TCPListVar(target)
- return
-}
-
-func (p *parserMixin) TCPListVar(target *[]*net.TCPAddr) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newTCPAddrValue(v.(**net.TCPAddr))
- }))
-}
-
-// ExistingFiles accumulates string values into a slice.
-func (p *parserMixin) ExistingFiles() (target *[]string) {
- target = new([]string)
- p.ExistingFilesVar(target)
- return
-}
-
-func (p *parserMixin) ExistingFilesVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingFileValue(v.(*string))
- }))
-}
-
-// ExistingDirs accumulates string values into a slice.
-func (p *parserMixin) ExistingDirs() (target *[]string) {
- target = new([]string)
- p.ExistingDirsVar(target)
- return
-}
-
-func (p *parserMixin) ExistingDirsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingDirValue(v.(*string))
- }))
-}
-
-// ExistingFilesOrDirs accumulates string values into a slice.
-func (p *parserMixin) ExistingFilesOrDirs() (target *[]string) {
- target = new([]string)
- p.ExistingFilesOrDirsVar(target)
- return
-}
-
-func (p *parserMixin) ExistingFilesOrDirsVar(target *[]string) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newExistingFileOrDirValue(v.(*string))
- }))
-}
-
-// -- *regexp.Regexp Value
-type regexpValue struct{ v **regexp.Regexp }
-
-func newRegexpValue(p **regexp.Regexp) *regexpValue {
- return ®expValue{p}
-}
-
-func (f *regexpValue) Set(s string) error {
- v, err := regexp.Compile(s)
- if err == nil {
- *f.v = (*regexp.Regexp)(v)
- }
- return err
-}
-
-func (f *regexpValue) Get() interface{} { return (*regexp.Regexp)(*f.v) }
-
-func (f *regexpValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Regexp parses the next command-line value as *regexp.Regexp.
-func (p *parserMixin) Regexp() (target **regexp.Regexp) {
- target = new(*regexp.Regexp)
- p.RegexpVar(target)
- return
-}
-
-func (p *parserMixin) RegexpVar(target **regexp.Regexp) {
- p.SetValue(newRegexpValue(target))
-}
-
-// RegexpList accumulates *regexp.Regexp values into a slice.
-func (p *parserMixin) RegexpList() (target *[]*regexp.Regexp) {
- target = new([]*regexp.Regexp)
- p.RegexpListVar(target)
- return
-}
-
-func (p *parserMixin) RegexpListVar(target *[]*regexp.Regexp) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newRegexpValue(v.(**regexp.Regexp))
- }))
-}
-
-// -- net.IP Value
-type resolvedIPValue struct{ v *net.IP }
-
-func newResolvedIPValue(p *net.IP) *resolvedIPValue {
- return &resolvedIPValue{p}
-}
-
-func (f *resolvedIPValue) Set(s string) error {
- v, err := resolveHost(s)
- if err == nil {
- *f.v = (net.IP)(v)
- }
- return err
-}
-
-func (f *resolvedIPValue) Get() interface{} { return (net.IP)(*f.v) }
-
-func (f *resolvedIPValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Resolve a hostname or IP to an IP.
-func (p *parserMixin) ResolvedIP() (target *net.IP) {
- target = new(net.IP)
- p.ResolvedIPVar(target)
- return
-}
-
-func (p *parserMixin) ResolvedIPVar(target *net.IP) {
- p.SetValue(newResolvedIPValue(target))
-}
-
-// ResolvedIPList accumulates net.IP values into a slice.
-func (p *parserMixin) ResolvedIPList() (target *[]net.IP) {
- target = new([]net.IP)
- p.ResolvedIPListVar(target)
- return
-}
-
-func (p *parserMixin) ResolvedIPListVar(target *[]net.IP) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newResolvedIPValue(v.(*net.IP))
- }))
-}
-
-// -- []byte Value
-type hexBytesValue struct{ v *[]byte }
-
-func newHexBytesValue(p *[]byte) *hexBytesValue {
- return &hexBytesValue{p}
-}
-
-func (f *hexBytesValue) Set(s string) error {
- v, err := hex.DecodeString(s)
- if err == nil {
- *f.v = ([]byte)(v)
- }
- return err
-}
-
-func (f *hexBytesValue) Get() interface{} { return ([]byte)(*f.v) }
-
-func (f *hexBytesValue) String() string { return fmt.Sprintf("%v", *f) }
-
-// Bytes as a hex string.
-func (p *parserMixin) HexBytes() (target *[]byte) {
- target = new([]byte)
- p.HexBytesVar(target)
- return
-}
-
-func (p *parserMixin) HexBytesVar(target *[]byte) {
- p.SetValue(newHexBytesValue(target))
-}
-
-// HexBytesList accumulates []byte values into a slice.
-func (p *parserMixin) HexBytesList() (target *[][]byte) {
- target = new([][]byte)
- p.HexBytesListVar(target)
- return
-}
-
-func (p *parserMixin) HexBytesListVar(target *[][]byte) {
- p.SetValue(newAccumulator(target, func(v interface{}) Value {
- return newHexBytesValue(v.(*[]byte))
- }))
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
deleted file mode 100644
index a68e67f..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE
+++ /dev/null
@@ -1,188 +0,0 @@
-
-Copyright (c) 2011-2014 - Canonical Inc.
-
-This software is licensed under the LGPLv3, included below.
-
-As a special exception to the GNU Lesser General Public License version 3
-("LGPL3"), the copyright holders of this Library give you permission to
-convey to a third party a Combined Work that links statically or dynamically
-to this Library without providing any Minimal Corresponding Source or
-Minimal Application Code as set out in 4d or providing the installation
-information set out in section 4e, provided that you comply with the other
-provisions of LGPL3 and provided that you meet, for the Application the
-terms and conditions of the license(s) which apply to the Application.
-
-Except as stated in this special exception, the provisions of LGPL3 will
-continue to comply in full to this Library. If you modify this Library, you
-may apply this exception to your version of this Library, but you are not
-obliged to do so. If you do not wish to do so, delete this exception
-statement from your version. This exception does not (and cannot) modify any
-license terms which apply to the Application, with which you must still
-comply.
-
-
- GNU LESSER GENERAL PUBLIC LICENSE
- Version 3, 29 June 2007
-
- Copyright (C) 2007 Free Software Foundation, Inc.
- Everyone is permitted to copy and distribute verbatim copies
- of this license document, but changing it is not allowed.
-
-
- This version of the GNU Lesser General Public License incorporates
-the terms and conditions of version 3 of the GNU General Public
-License, supplemented by the additional permissions listed below.
-
- 0. Additional Definitions.
-
- As used herein, "this License" refers to version 3 of the GNU Lesser
-General Public License, and the "GNU GPL" refers to version 3 of the GNU
-General Public License.
-
- "The Library" refers to a covered work governed by this License,
-other than an Application or a Combined Work as defined below.
-
- An "Application" is any work that makes use of an interface provided
-by the Library, but which is not otherwise based on the Library.
-Defining a subclass of a class defined by the Library is deemed a mode
-of using an interface provided by the Library.
-
- A "Combined Work" is a work produced by combining or linking an
-Application with the Library. The particular version of the Library
-with which the Combined Work was made is also called the "Linked
-Version".
-
- The "Minimal Corresponding Source" for a Combined Work means the
-Corresponding Source for the Combined Work, excluding any source code
-for portions of the Combined Work that, considered in isolation, are
-based on the Application, and not on the Linked Version.
-
- The "Corresponding Application Code" for a Combined Work means the
-object code and/or source code for the Application, including any data
-and utility programs needed for reproducing the Combined Work from the
-Application, but excluding the System Libraries of the Combined Work.
-
- 1. Exception to Section 3 of the GNU GPL.
-
- You may convey a covered work under sections 3 and 4 of this License
-without being bound by section 3 of the GNU GPL.
-
- 2. Conveying Modified Versions.
-
- If you modify a copy of the Library, and, in your modifications, a
-facility refers to a function or data to be supplied by an Application
-that uses the facility (other than as an argument passed when the
-facility is invoked), then you may convey a copy of the modified
-version:
-
- a) under this License, provided that you make a good faith effort to
- ensure that, in the event an Application does not supply the
- function or data, the facility still operates, and performs
- whatever part of its purpose remains meaningful, or
-
- b) under the GNU GPL, with none of the additional permissions of
- this License applicable to that copy.
-
- 3. Object Code Incorporating Material from Library Header Files.
-
- The object code form of an Application may incorporate material from
-a header file that is part of the Library. You may convey such object
-code under terms of your choice, provided that, if the incorporated
-material is not limited to numerical parameters, data structure
-layouts and accessors, or small macros, inline functions and templates
-(ten or fewer lines in length), you do both of the following:
-
- a) Give prominent notice with each copy of the object code that the
- Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the object code with a copy of the GNU GPL and this license
- document.
-
- 4. Combined Works.
-
- You may convey a Combined Work under terms of your choice that,
-taken together, effectively do not restrict modification of the
-portions of the Library contained in the Combined Work and reverse
-engineering for debugging such modifications, if you also do each of
-the following:
-
- a) Give prominent notice with each copy of the Combined Work that
- the Library is used in it and that the Library and its use are
- covered by this License.
-
- b) Accompany the Combined Work with a copy of the GNU GPL and this license
- document.
-
- c) For a Combined Work that displays copyright notices during
- execution, include the copyright notice for the Library among
- these notices, as well as a reference directing the user to the
- copies of the GNU GPL and this license document.
-
- d) Do one of the following:
-
- 0) Convey the Minimal Corresponding Source under the terms of this
- License, and the Corresponding Application Code in a form
- suitable for, and under terms that permit, the user to
- recombine or relink the Application with a modified version of
- the Linked Version to produce a modified Combined Work, in the
- manner specified by section 6 of the GNU GPL for conveying
- Corresponding Source.
-
- 1) Use a suitable shared library mechanism for linking with the
- Library. A suitable mechanism is one that (a) uses at run time
- a copy of the Library already present on the user's computer
- system, and (b) will operate properly with a modified version
- of the Library that is interface-compatible with the Linked
- Version.
-
- e) Provide Installation Information, but only if you would otherwise
- be required to provide such information under section 6 of the
- GNU GPL, and only to the extent that such information is
- necessary to install and execute a modified version of the
- Combined Work produced by recombining or relinking the
- Application with a modified version of the Linked Version. (If
- you use option 4d0, the Installation Information must accompany
- the Minimal Corresponding Source and Corresponding Application
- Code. If you use option 4d1, you must provide the Installation
- Information in the manner specified by section 6 of the GNU GPL
- for conveying Corresponding Source.)
-
- 5. Combined Libraries.
-
- You may place library facilities that are a work based on the
-Library side by side in a single library together with other library
-facilities that are not Applications and are not covered by this
-License, and convey such a combined library under terms of your
-choice, if you do both of the following:
-
- a) Accompany the combined library with a copy of the same work based
- on the Library, uncombined with any other library facilities,
- conveyed under the terms of this License.
-
- b) Give prominent notice with the combined library that part of it
- is a work based on the Library, and explaining where to find the
- accompanying uncombined form of the same work.
-
- 6. Revised Versions of the GNU Lesser General Public License.
-
- The Free Software Foundation may publish revised and/or new versions
-of the GNU Lesser General Public License from time to time. Such new
-versions will be similar in spirit to the present version, but may
-differ in detail to address new problems or concerns.
-
- Each version is given a distinguishing version number. If the
-Library as you received it specifies that a certain numbered version
-of the GNU Lesser General Public License "or any later version"
-applies to it, you have the option of following the terms and
-conditions either of that published version or of any later version
-published by the Free Software Foundation. If the Library as you
-received it does not specify a version number of the GNU Lesser
-General Public License, you may choose any version of the GNU Lesser
-General Public License ever published by the Free Software Foundation.
-
- If the Library as you received it specifies that a proxy can decide
-whether future versions of the GNU Lesser General Public License shall
-apply, that proxy's public statement of acceptance of any version is
-permanent authorization for you to choose that version for the
-Library.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
deleted file mode 100644
index 8da58fb..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/LICENSE.libyaml
+++ /dev/null
@@ -1,31 +0,0 @@
-The following files were ported to Go from C files of libyaml, and thus
-are still covered by their original copyright and license:
-
- apic.go
- emitterc.go
- parserc.go
- readerc.go
- scannerc.go
- writerc.go
- yamlh.go
- yamlprivateh.go
-
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
deleted file mode 100644
index 7b8bd86..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/README.md
+++ /dev/null
@@ -1,131 +0,0 @@
-# YAML support for the Go language
-
-Introduction
-------------
-
-The yaml package enables Go programs to comfortably encode and decode YAML
-values. It was developed within [Canonical](https://www.canonical.com) as
-part of the [juju](https://juju.ubuntu.com) project, and is based on a
-pure Go port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML)
-C library to parse and generate YAML data quickly and reliably.
-
-Compatibility
--------------
-
-The yaml package supports most of YAML 1.1 and 1.2, including support for
-anchors, tags, map merging, etc. Multi-document unmarshalling is not yet
-implemented, and base-60 floats from YAML 1.1 are purposefully not
-supported since they're a poor design and are gone in YAML 1.2.
-
-Installation and usage
-----------------------
-
-The import path for the package is *gopkg.in/yaml.v2*.
-
-To install it, run:
-
- go get gopkg.in/yaml.v2
-
-API documentation
------------------
-
-If opened in a browser, the import path itself leads to the API documentation:
-
- * [https://gopkg.in/yaml.v2](https://gopkg.in/yaml.v2)
-
-API stability
--------------
-
-The package API for yaml v2 will remain stable as described in [gopkg.in](https://gopkg.in).
-
-
-License
--------
-
-The yaml package is licensed under the LGPL with an exception that allows it to be linked statically. Please see the LICENSE file for details.
-
-
-Example
--------
-
-```Go
-package main
-
-import (
- "fmt"
- "log"
-
- "gopkg.in/yaml.v2"
-)
-
-var data = `
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-`
-
-type T struct {
- A string
- B struct {
- RenamedC int `yaml:"c"`
- D []int `yaml:",flow"`
- }
-}
-
-func main() {
- t := T{}
-
- err := yaml.Unmarshal([]byte(data), &t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t:\n%v\n\n", t)
-
- d, err := yaml.Marshal(&t)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- t dump:\n%s\n\n", string(d))
-
- m := make(map[interface{}]interface{})
-
- err = yaml.Unmarshal([]byte(data), &m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m:\n%v\n\n", m)
-
- d, err = yaml.Marshal(&m)
- if err != nil {
- log.Fatalf("error: %v", err)
- }
- fmt.Printf("--- m dump:\n%s\n\n", string(d))
-}
-```
-
-This example will generate the following output:
-
-```
---- t:
-{Easy! {2 [3 4]}}
-
---- t dump:
-a: Easy!
-b:
- c: 2
- d: [3, 4]
-
-
---- m:
-map[a:Easy! b:map[c:2 d:[3 4]]]
-
---- m dump:
-a: Easy!
-b:
- c: 2
- d:
- - 3
- - 4
-```
-
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
deleted file mode 100644
index 95ec014..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/apic.go
+++ /dev/null
@@ -1,742 +0,0 @@
-package yaml
-
-import (
- "io"
- "os"
-)
-
-func yaml_insert_token(parser *yaml_parser_t, pos int, token *yaml_token_t) {
- //fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens))
-
- // Check if we can move the queue at the beginning of the buffer.
- if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) {
- if parser.tokens_head != len(parser.tokens) {
- copy(parser.tokens, parser.tokens[parser.tokens_head:])
- }
- parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head]
- parser.tokens_head = 0
- }
- parser.tokens = append(parser.tokens, *token)
- if pos < 0 {
- return
- }
- copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:])
- parser.tokens[parser.tokens_head+pos] = *token
-}
-
-// Create a new parser object.
-func yaml_parser_initialize(parser *yaml_parser_t) bool {
- *parser = yaml_parser_t{
- raw_buffer: make([]byte, 0, input_raw_buffer_size),
- buffer: make([]byte, 0, input_buffer_size),
- }
- return true
-}
-
-// Destroy a parser object.
-func yaml_parser_delete(parser *yaml_parser_t) {
- *parser = yaml_parser_t{}
-}
-
-// String read handler.
-func yaml_string_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- if parser.input_pos == len(parser.input) {
- return 0, io.EOF
- }
- n = copy(buffer, parser.input[parser.input_pos:])
- parser.input_pos += n
- return n, nil
-}
-
-// File read handler.
-func yaml_file_read_handler(parser *yaml_parser_t, buffer []byte) (n int, err error) {
- return parser.input_file.Read(buffer)
-}
-
-// Set a string input.
-func yaml_parser_set_input_string(parser *yaml_parser_t, input []byte) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_string_read_handler
- parser.input = input
- parser.input_pos = 0
-}
-
-// Set a file input.
-func yaml_parser_set_input_file(parser *yaml_parser_t, file *os.File) {
- if parser.read_handler != nil {
- panic("must set the input source only once")
- }
- parser.read_handler = yaml_file_read_handler
- parser.input_file = file
-}
-
-// Set the source encoding.
-func yaml_parser_set_encoding(parser *yaml_parser_t, encoding yaml_encoding_t) {
- if parser.encoding != yaml_ANY_ENCODING {
- panic("must set the encoding only once")
- }
- parser.encoding = encoding
-}
-
-// Create a new emitter object.
-func yaml_emitter_initialize(emitter *yaml_emitter_t) bool {
- *emitter = yaml_emitter_t{
- buffer: make([]byte, output_buffer_size),
- raw_buffer: make([]byte, 0, output_raw_buffer_size),
- states: make([]yaml_emitter_state_t, 0, initial_stack_size),
- events: make([]yaml_event_t, 0, initial_queue_size),
- }
- return true
-}
-
-// Destroy an emitter object.
-func yaml_emitter_delete(emitter *yaml_emitter_t) {
- *emitter = yaml_emitter_t{}
-}
-
-// String write handler.
-func yaml_string_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- *emitter.output_buffer = append(*emitter.output_buffer, buffer...)
- return nil
-}
-
-// File write handler.
-func yaml_file_write_handler(emitter *yaml_emitter_t, buffer []byte) error {
- _, err := emitter.output_file.Write(buffer)
- return err
-}
-
-// Set a string output.
-func yaml_emitter_set_output_string(emitter *yaml_emitter_t, output_buffer *[]byte) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_string_write_handler
- emitter.output_buffer = output_buffer
-}
-
-// Set a file output.
-func yaml_emitter_set_output_file(emitter *yaml_emitter_t, file io.Writer) {
- if emitter.write_handler != nil {
- panic("must set the output target only once")
- }
- emitter.write_handler = yaml_file_write_handler
- emitter.output_file = file
-}
-
-// Set the output encoding.
-func yaml_emitter_set_encoding(emitter *yaml_emitter_t, encoding yaml_encoding_t) {
- if emitter.encoding != yaml_ANY_ENCODING {
- panic("must set the output encoding only once")
- }
- emitter.encoding = encoding
-}
-
-// Set the canonical output style.
-func yaml_emitter_set_canonical(emitter *yaml_emitter_t, canonical bool) {
- emitter.canonical = canonical
-}
-
-//// Set the indentation increment.
-func yaml_emitter_set_indent(emitter *yaml_emitter_t, indent int) {
- if indent < 2 || indent > 9 {
- indent = 2
- }
- emitter.best_indent = indent
-}
-
-// Set the preferred line width.
-func yaml_emitter_set_width(emitter *yaml_emitter_t, width int) {
- if width < 0 {
- width = -1
- }
- emitter.best_width = width
-}
-
-// Set if unescaped non-ASCII characters are allowed.
-func yaml_emitter_set_unicode(emitter *yaml_emitter_t, unicode bool) {
- emitter.unicode = unicode
-}
-
-// Set the preferred line break character.
-func yaml_emitter_set_break(emitter *yaml_emitter_t, line_break yaml_break_t) {
- emitter.line_break = line_break
-}
-
-///*
-// * Destroy a token object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_token_delete(yaml_token_t *token)
-//{
-// assert(token); // Non-NULL token object expected.
-//
-// switch (token.type)
-// {
-// case YAML_TAG_DIRECTIVE_TOKEN:
-// yaml_free(token.data.tag_directive.handle);
-// yaml_free(token.data.tag_directive.prefix);
-// break;
-//
-// case YAML_ALIAS_TOKEN:
-// yaml_free(token.data.alias.value);
-// break;
-//
-// case YAML_ANCHOR_TOKEN:
-// yaml_free(token.data.anchor.value);
-// break;
-//
-// case YAML_TAG_TOKEN:
-// yaml_free(token.data.tag.handle);
-// yaml_free(token.data.tag.suffix);
-// break;
-//
-// case YAML_SCALAR_TOKEN:
-// yaml_free(token.data.scalar.value);
-// break;
-//
-// default:
-// break;
-// }
-//
-// memset(token, 0, sizeof(yaml_token_t));
-//}
-//
-///*
-// * Check if a string is a valid UTF-8 sequence.
-// *
-// * Check 'reader.c' for more details on UTF-8 encoding.
-// */
-//
-//static int
-//yaml_check_utf8(yaml_char_t *start, size_t length)
-//{
-// yaml_char_t *end = start+length;
-// yaml_char_t *pointer = start;
-//
-// while (pointer < end) {
-// unsigned char octet;
-// unsigned int width;
-// unsigned int value;
-// size_t k;
-//
-// octet = pointer[0];
-// width = (octet & 0x80) == 0x00 ? 1 :
-// (octet & 0xE0) == 0xC0 ? 2 :
-// (octet & 0xF0) == 0xE0 ? 3 :
-// (octet & 0xF8) == 0xF0 ? 4 : 0;
-// value = (octet & 0x80) == 0x00 ? octet & 0x7F :
-// (octet & 0xE0) == 0xC0 ? octet & 0x1F :
-// (octet & 0xF0) == 0xE0 ? octet & 0x0F :
-// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0;
-// if (!width) return 0;
-// if (pointer+width > end) return 0;
-// for (k = 1; k < width; k ++) {
-// octet = pointer[k];
-// if ((octet & 0xC0) != 0x80) return 0;
-// value = (value << 6) + (octet & 0x3F);
-// }
-// if (!((width == 1) ||
-// (width == 2 && value >= 0x80) ||
-// (width == 3 && value >= 0x800) ||
-// (width == 4 && value >= 0x10000))) return 0;
-//
-// pointer += width;
-// }
-//
-// return 1;
-//}
-//
-
-// Create STREAM-START.
-func yaml_stream_start_event_initialize(event *yaml_event_t, encoding yaml_encoding_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- encoding: encoding,
- }
- return true
-}
-
-// Create STREAM-END.
-func yaml_stream_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- }
- return true
-}
-
-// Create DOCUMENT-START.
-func yaml_document_start_event_initialize(event *yaml_event_t, version_directive *yaml_version_directive_t,
- tag_directives []yaml_tag_directive_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: implicit,
- }
- return true
-}
-
-// Create DOCUMENT-END.
-func yaml_document_end_event_initialize(event *yaml_event_t, implicit bool) bool {
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- implicit: implicit,
- }
- return true
-}
-
-///*
-// * Create ALIAS.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_alias_event_initialize(event *yaml_event_t, anchor *yaml_char_t)
-//{
-// mark yaml_mark_t = { 0, 0, 0 }
-// anchor_copy *yaml_char_t = NULL
-//
-// assert(event) // Non-NULL event object is expected.
-// assert(anchor) // Non-NULL anchor is expected.
-//
-// if (!yaml_check_utf8(anchor, strlen((char *)anchor))) return 0
-//
-// anchor_copy = yaml_strdup(anchor)
-// if (!anchor_copy)
-// return 0
-//
-// ALIAS_EVENT_INIT(*event, anchor_copy, mark, mark)
-//
-// return 1
-//}
-
-// Create SCALAR.
-func yaml_scalar_event_initialize(event *yaml_event_t, anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style yaml_scalar_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- anchor: anchor,
- tag: tag,
- value: value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-START.
-func yaml_sequence_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_sequence_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create SEQUENCE-END.
-func yaml_sequence_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- }
- return true
-}
-
-// Create MAPPING-START.
-func yaml_mapping_start_event_initialize(event *yaml_event_t, anchor, tag []byte, implicit bool, style yaml_mapping_style_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(style),
- }
- return true
-}
-
-// Create MAPPING-END.
-func yaml_mapping_end_event_initialize(event *yaml_event_t) bool {
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- }
- return true
-}
-
-// Destroy an event object.
-func yaml_event_delete(event *yaml_event_t) {
- *event = yaml_event_t{}
-}
-
-///*
-// * Create a document object.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_initialize(document *yaml_document_t,
-// version_directive *yaml_version_directive_t,
-// tag_directives_start *yaml_tag_directive_t,
-// tag_directives_end *yaml_tag_directive_t,
-// start_implicit int, end_implicit int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// struct {
-// start *yaml_node_t
-// end *yaml_node_t
-// top *yaml_node_t
-// } nodes = { NULL, NULL, NULL }
-// version_directive_copy *yaml_version_directive_t = NULL
-// struct {
-// start *yaml_tag_directive_t
-// end *yaml_tag_directive_t
-// top *yaml_tag_directive_t
-// } tag_directives_copy = { NULL, NULL, NULL }
-// value yaml_tag_directive_t = { NULL, NULL }
-// mark yaml_mark_t = { 0, 0, 0 }
-//
-// assert(document) // Non-NULL document object is expected.
-// assert((tag_directives_start && tag_directives_end) ||
-// (tag_directives_start == tag_directives_end))
-// // Valid tag directives are expected.
-//
-// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error
-//
-// if (version_directive) {
-// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t))
-// if (!version_directive_copy) goto error
-// version_directive_copy.major = version_directive.major
-// version_directive_copy.minor = version_directive.minor
-// }
-//
-// if (tag_directives_start != tag_directives_end) {
-// tag_directive *yaml_tag_directive_t
-// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE))
-// goto error
-// for (tag_directive = tag_directives_start
-// tag_directive != tag_directives_end; tag_directive ++) {
-// assert(tag_directive.handle)
-// assert(tag_directive.prefix)
-// if (!yaml_check_utf8(tag_directive.handle,
-// strlen((char *)tag_directive.handle)))
-// goto error
-// if (!yaml_check_utf8(tag_directive.prefix,
-// strlen((char *)tag_directive.prefix)))
-// goto error
-// value.handle = yaml_strdup(tag_directive.handle)
-// value.prefix = yaml_strdup(tag_directive.prefix)
-// if (!value.handle || !value.prefix) goto error
-// if (!PUSH(&context, tag_directives_copy, value))
-// goto error
-// value.handle = NULL
-// value.prefix = NULL
-// }
-// }
-//
-// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy,
-// tag_directives_copy.start, tag_directives_copy.top,
-// start_implicit, end_implicit, mark, mark)
-//
-// return 1
-//
-//error:
-// STACK_DEL(&context, nodes)
-// yaml_free(version_directive_copy)
-// while (!STACK_EMPTY(&context, tag_directives_copy)) {
-// value yaml_tag_directive_t = POP(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-// }
-// STACK_DEL(&context, tag_directives_copy)
-// yaml_free(value.handle)
-// yaml_free(value.prefix)
-//
-// return 0
-//}
-//
-///*
-// * Destroy a document object.
-// */
-//
-//YAML_DECLARE(void)
-//yaml_document_delete(document *yaml_document_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// tag_directive *yaml_tag_directive_t
-//
-// context.error = YAML_NO_ERROR // Eliminate a compliler warning.
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// while (!STACK_EMPTY(&context, document.nodes)) {
-// node yaml_node_t = POP(&context, document.nodes)
-// yaml_free(node.tag)
-// switch (node.type) {
-// case YAML_SCALAR_NODE:
-// yaml_free(node.data.scalar.value)
-// break
-// case YAML_SEQUENCE_NODE:
-// STACK_DEL(&context, node.data.sequence.items)
-// break
-// case YAML_MAPPING_NODE:
-// STACK_DEL(&context, node.data.mapping.pairs)
-// break
-// default:
-// assert(0) // Should not happen.
-// }
-// }
-// STACK_DEL(&context, document.nodes)
-//
-// yaml_free(document.version_directive)
-// for (tag_directive = document.tag_directives.start
-// tag_directive != document.tag_directives.end
-// tag_directive++) {
-// yaml_free(tag_directive.handle)
-// yaml_free(tag_directive.prefix)
-// }
-// yaml_free(document.tag_directives.start)
-//
-// memset(document, 0, sizeof(yaml_document_t))
-//}
-//
-///**
-// * Get a document node.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_node(document *yaml_document_t, index int)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (index > 0 && document.nodes.start + index <= document.nodes.top) {
-// return document.nodes.start + index - 1
-// }
-// return NULL
-//}
-//
-///**
-// * Get the root object.
-// */
-//
-//YAML_DECLARE(yaml_node_t *)
-//yaml_document_get_root_node(document *yaml_document_t)
-//{
-// assert(document) // Non-NULL document object is expected.
-//
-// if (document.nodes.top != document.nodes.start) {
-// return document.nodes.start
-// }
-// return NULL
-//}
-//
-///*
-// * Add a scalar node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_scalar(document *yaml_document_t,
-// tag *yaml_char_t, value *yaml_char_t, length int,
-// style yaml_scalar_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// value_copy *yaml_char_t = NULL
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-// assert(value) // Non-NULL value is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (length < 0) {
-// length = strlen((char *)value)
-// }
-//
-// if (!yaml_check_utf8(value, length)) goto error
-// value_copy = yaml_malloc(length+1)
-// if (!value_copy) goto error
-// memcpy(value_copy, value, length)
-// value_copy[length] = '\0'
-//
-// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// yaml_free(tag_copy)
-// yaml_free(value_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a sequence node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_sequence(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_sequence_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_item_t
-// end *yaml_node_item_t
-// top *yaml_node_item_t
-// } items = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error
-//
-// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, items)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Add a mapping node to a document.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_add_mapping(document *yaml_document_t,
-// tag *yaml_char_t, style yaml_mapping_style_t)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-// mark yaml_mark_t = { 0, 0, 0 }
-// tag_copy *yaml_char_t = NULL
-// struct {
-// start *yaml_node_pair_t
-// end *yaml_node_pair_t
-// top *yaml_node_pair_t
-// } pairs = { NULL, NULL, NULL }
-// node yaml_node_t
-//
-// assert(document) // Non-NULL document object is expected.
-//
-// if (!tag) {
-// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG
-// }
-//
-// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error
-// tag_copy = yaml_strdup(tag)
-// if (!tag_copy) goto error
-//
-// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error
-//
-// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end,
-// style, mark, mark)
-// if (!PUSH(&context, document.nodes, node)) goto error
-//
-// return document.nodes.top - document.nodes.start
-//
-//error:
-// STACK_DEL(&context, pairs)
-// yaml_free(tag_copy)
-//
-// return 0
-//}
-//
-///*
-// * Append an item to a sequence node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_sequence_item(document *yaml_document_t,
-// sequence int, item int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// assert(document) // Non-NULL document is required.
-// assert(sequence > 0
-// && document.nodes.start + sequence <= document.nodes.top)
-// // Valid sequence id is required.
-// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE)
-// // A sequence node is required.
-// assert(item > 0 && document.nodes.start + item <= document.nodes.top)
-// // Valid item id is required.
-//
-// if (!PUSH(&context,
-// document.nodes.start[sequence-1].data.sequence.items, item))
-// return 0
-//
-// return 1
-//}
-//
-///*
-// * Append a pair of a key and a value to a mapping node.
-// */
-//
-//YAML_DECLARE(int)
-//yaml_document_append_mapping_pair(document *yaml_document_t,
-// mapping int, key int, value int)
-//{
-// struct {
-// error yaml_error_type_t
-// } context
-//
-// pair yaml_node_pair_t
-//
-// assert(document) // Non-NULL document is required.
-// assert(mapping > 0
-// && document.nodes.start + mapping <= document.nodes.top)
-// // Valid mapping id is required.
-// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE)
-// // A mapping node is required.
-// assert(key > 0 && document.nodes.start + key <= document.nodes.top)
-// // Valid key id is required.
-// assert(value > 0 && document.nodes.start + value <= document.nodes.top)
-// // Valid value id is required.
-//
-// pair.key = key
-// pair.value = value
-//
-// if (!PUSH(&context,
-// document.nodes.start[mapping-1].data.mapping.pairs, pair))
-// return 0
-//
-// return 1
-//}
-//
-//
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
deleted file mode 100644
index 085cddc..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/decode.go
+++ /dev/null
@@ -1,683 +0,0 @@
-package yaml
-
-import (
- "encoding"
- "encoding/base64"
- "fmt"
- "math"
- "reflect"
- "strconv"
- "time"
-)
-
-const (
- documentNode = 1 << iota
- mappingNode
- sequenceNode
- scalarNode
- aliasNode
-)
-
-type node struct {
- kind int
- line, column int
- tag string
- value string
- implicit bool
- children []*node
- anchors map[string]*node
-}
-
-// ----------------------------------------------------------------------------
-// Parser, produces a node tree out of a libyaml event stream.
-
-type parser struct {
- parser yaml_parser_t
- event yaml_event_t
- doc *node
-}
-
-func newParser(b []byte) *parser {
- p := parser{}
- if !yaml_parser_initialize(&p.parser) {
- panic("failed to initialize YAML emitter")
- }
-
- if len(b) == 0 {
- b = []byte{'\n'}
- }
-
- yaml_parser_set_input_string(&p.parser, b)
-
- p.skip()
- if p.event.typ != yaml_STREAM_START_EVENT {
- panic("expected stream start event, got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return &p
-}
-
-func (p *parser) destroy() {
- if p.event.typ != yaml_NO_EVENT {
- yaml_event_delete(&p.event)
- }
- yaml_parser_delete(&p.parser)
-}
-
-func (p *parser) skip() {
- if p.event.typ != yaml_NO_EVENT {
- if p.event.typ == yaml_STREAM_END_EVENT {
- failf("attempted to go past the end of stream; corrupted value?")
- }
- yaml_event_delete(&p.event)
- }
- if !yaml_parser_parse(&p.parser, &p.event) {
- p.fail()
- }
-}
-
-func (p *parser) fail() {
- var where string
- var line int
- if p.parser.problem_mark.line != 0 {
- line = p.parser.problem_mark.line
- } else if p.parser.context_mark.line != 0 {
- line = p.parser.context_mark.line
- }
- if line != 0 {
- where = "line " + strconv.Itoa(line) + ": "
- }
- var msg string
- if len(p.parser.problem) > 0 {
- msg = p.parser.problem
- } else {
- msg = "unknown problem parsing YAML content"
- }
- failf("%s%s", where, msg)
-}
-
-func (p *parser) anchor(n *node, anchor []byte) {
- if anchor != nil {
- p.doc.anchors[string(anchor)] = n
- }
-}
-
-func (p *parser) parse() *node {
- switch p.event.typ {
- case yaml_SCALAR_EVENT:
- return p.scalar()
- case yaml_ALIAS_EVENT:
- return p.alias()
- case yaml_MAPPING_START_EVENT:
- return p.mapping()
- case yaml_SEQUENCE_START_EVENT:
- return p.sequence()
- case yaml_DOCUMENT_START_EVENT:
- return p.document()
- case yaml_STREAM_END_EVENT:
- // Happens when attempting to decode an empty buffer.
- return nil
- default:
- panic("attempted to parse unknown event: " + strconv.Itoa(int(p.event.typ)))
- }
- panic("unreachable")
-}
-
-func (p *parser) node(kind int) *node {
- return &node{
- kind: kind,
- line: p.event.start_mark.line,
- column: p.event.start_mark.column,
- }
-}
-
-func (p *parser) document() *node {
- n := p.node(documentNode)
- n.anchors = make(map[string]*node)
- p.doc = n
- p.skip()
- n.children = append(n.children, p.parse())
- if p.event.typ != yaml_DOCUMENT_END_EVENT {
- panic("expected end of document event but got " + strconv.Itoa(int(p.event.typ)))
- }
- p.skip()
- return n
-}
-
-func (p *parser) alias() *node {
- n := p.node(aliasNode)
- n.value = string(p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) scalar() *node {
- n := p.node(scalarNode)
- n.value = string(p.event.value)
- n.tag = string(p.event.tag)
- n.implicit = p.event.implicit
- p.anchor(n, p.event.anchor)
- p.skip()
- return n
-}
-
-func (p *parser) sequence() *node {
- n := p.node(sequenceNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_SEQUENCE_END_EVENT {
- n.children = append(n.children, p.parse())
- }
- p.skip()
- return n
-}
-
-func (p *parser) mapping() *node {
- n := p.node(mappingNode)
- p.anchor(n, p.event.anchor)
- p.skip()
- for p.event.typ != yaml_MAPPING_END_EVENT {
- n.children = append(n.children, p.parse(), p.parse())
- }
- p.skip()
- return n
-}
-
-// ----------------------------------------------------------------------------
-// Decoder, unmarshals a node into a provided value.
-
-type decoder struct {
- doc *node
- aliases map[string]bool
- mapType reflect.Type
- terrors []string
-}
-
-var (
- mapItemType = reflect.TypeOf(MapItem{})
- durationType = reflect.TypeOf(time.Duration(0))
- defaultMapType = reflect.TypeOf(map[interface{}]interface{}{})
- ifaceType = defaultMapType.Elem()
-)
-
-func newDecoder() *decoder {
- d := &decoder{mapType: defaultMapType}
- d.aliases = make(map[string]bool)
- return d
-}
-
-func (d *decoder) terror(n *node, tag string, out reflect.Value) {
- if n.tag != "" {
- tag = n.tag
- }
- value := n.value
- if tag != yaml_SEQ_TAG && tag != yaml_MAP_TAG {
- if len(value) > 10 {
- value = " `" + value[:7] + "...`"
- } else {
- value = " `" + value + "`"
- }
- }
- d.terrors = append(d.terrors, fmt.Sprintf("line %d: cannot unmarshal %s%s into %s", n.line+1, shortTag(tag), value, out.Type()))
-}
-
-func (d *decoder) callUnmarshaler(n *node, u Unmarshaler) (good bool) {
- terrlen := len(d.terrors)
- err := u.UnmarshalYAML(func(v interface{}) (err error) {
- defer handleErr(&err)
- d.unmarshal(n, reflect.ValueOf(v))
- if len(d.terrors) > terrlen {
- issues := d.terrors[terrlen:]
- d.terrors = d.terrors[:terrlen]
- return &TypeError{issues}
- }
- return nil
- })
- if e, ok := err.(*TypeError); ok {
- d.terrors = append(d.terrors, e.Errors...)
- return false
- }
- if err != nil {
- fail(err)
- }
- return true
-}
-
-// d.prepare initializes and dereferences pointers and calls UnmarshalYAML
-// if a value is found to implement it.
-// It returns the initialized and dereferenced out value, whether
-// unmarshalling was already done by UnmarshalYAML, and if so whether
-// its types unmarshalled appropriately.
-//
-// If n holds a null value, prepare returns before doing anything.
-func (d *decoder) prepare(n *node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) {
- if n.tag == yaml_NULL_TAG || n.kind == scalarNode && n.tag == "" && (n.value == "null" || n.value == "") {
- return out, false, false
- }
- again := true
- for again {
- again = false
- if out.Kind() == reflect.Ptr {
- if out.IsNil() {
- out.Set(reflect.New(out.Type().Elem()))
- }
- out = out.Elem()
- again = true
- }
- if out.CanAddr() {
- if u, ok := out.Addr().Interface().(Unmarshaler); ok {
- good = d.callUnmarshaler(n, u)
- return out, true, good
- }
- }
- }
- return out, false, false
-}
-
-func (d *decoder) unmarshal(n *node, out reflect.Value) (good bool) {
- switch n.kind {
- case documentNode:
- return d.document(n, out)
- case aliasNode:
- return d.alias(n, out)
- }
- out, unmarshaled, good := d.prepare(n, out)
- if unmarshaled {
- return good
- }
- switch n.kind {
- case scalarNode:
- good = d.scalar(n, out)
- case mappingNode:
- good = d.mapping(n, out)
- case sequenceNode:
- good = d.sequence(n, out)
- default:
- panic("internal error: unknown node kind: " + strconv.Itoa(n.kind))
- }
- return good
-}
-
-func (d *decoder) document(n *node, out reflect.Value) (good bool) {
- if len(n.children) == 1 {
- d.doc = n
- d.unmarshal(n.children[0], out)
- return true
- }
- return false
-}
-
-func (d *decoder) alias(n *node, out reflect.Value) (good bool) {
- an, ok := d.doc.anchors[n.value]
- if !ok {
- failf("unknown anchor '%s' referenced", n.value)
- }
- if d.aliases[n.value] {
- failf("anchor '%s' value contains itself", n.value)
- }
- d.aliases[n.value] = true
- good = d.unmarshal(an, out)
- delete(d.aliases, n.value)
- return good
-}
-
-var zeroValue reflect.Value
-
-func resetMap(out reflect.Value) {
- for _, k := range out.MapKeys() {
- out.SetMapIndex(k, zeroValue)
- }
-}
-
-func (d *decoder) scalar(n *node, out reflect.Value) (good bool) {
- var tag string
- var resolved interface{}
- if n.tag == "" && !n.implicit {
- tag = yaml_STR_TAG
- resolved = n.value
- } else {
- tag, resolved = resolve(n.tag, n.value)
- if tag == yaml_BINARY_TAG {
- data, err := base64.StdEncoding.DecodeString(resolved.(string))
- if err != nil {
- failf("!!binary value contains invalid base64 data")
- }
- resolved = string(data)
- }
- }
- if resolved == nil {
- if out.Kind() == reflect.Map && !out.CanAddr() {
- resetMap(out)
- } else {
- out.Set(reflect.Zero(out.Type()))
- }
- return true
- }
- if s, ok := resolved.(string); ok && out.CanAddr() {
- if u, ok := out.Addr().Interface().(encoding.TextUnmarshaler); ok {
- err := u.UnmarshalText([]byte(s))
- if err != nil {
- fail(err)
- }
- return true
- }
- }
- switch out.Kind() {
- case reflect.String:
- if tag == yaml_BINARY_TAG {
- out.SetString(resolved.(string))
- good = true
- } else if resolved != nil {
- out.SetString(n.value)
- good = true
- }
- case reflect.Interface:
- if resolved == nil {
- out.Set(reflect.Zero(out.Type()))
- } else {
- out.Set(reflect.ValueOf(resolved))
- }
- good = true
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- switch resolved := resolved.(type) {
- case int:
- if !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case int64:
- if !out.OverflowInt(resolved) {
- out.SetInt(resolved)
- good = true
- }
- case uint64:
- if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case float64:
- if resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) {
- out.SetInt(int64(resolved))
- good = true
- }
- case string:
- if out.Type() == durationType {
- d, err := time.ParseDuration(resolved)
- if err == nil {
- out.SetInt(int64(d))
- good = true
- }
- }
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- switch resolved := resolved.(type) {
- case int:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case int64:
- if resolved >= 0 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case uint64:
- if !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- case float64:
- if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) {
- out.SetUint(uint64(resolved))
- good = true
- }
- }
- case reflect.Bool:
- switch resolved := resolved.(type) {
- case bool:
- out.SetBool(resolved)
- good = true
- }
- case reflect.Float32, reflect.Float64:
- switch resolved := resolved.(type) {
- case int:
- out.SetFloat(float64(resolved))
- good = true
- case int64:
- out.SetFloat(float64(resolved))
- good = true
- case uint64:
- out.SetFloat(float64(resolved))
- good = true
- case float64:
- out.SetFloat(resolved)
- good = true
- }
- case reflect.Ptr:
- if out.Type().Elem() == reflect.TypeOf(resolved) {
- // TODO DOes this make sense? When is out a Ptr except when decoding a nil value?
- elem := reflect.New(out.Type().Elem())
- elem.Elem().Set(reflect.ValueOf(resolved))
- out.Set(elem)
- good = true
- }
- }
- if !good {
- d.terror(n, tag, out)
- }
- return good
-}
-
-func settableValueOf(i interface{}) reflect.Value {
- v := reflect.ValueOf(i)
- sv := reflect.New(v.Type()).Elem()
- sv.Set(v)
- return sv
-}
-
-func (d *decoder) sequence(n *node, out reflect.Value) (good bool) {
- l := len(n.children)
-
- var iface reflect.Value
- switch out.Kind() {
- case reflect.Slice:
- out.Set(reflect.MakeSlice(out.Type(), l, l))
- case reflect.Interface:
- // No type hints. Will have to use a generic sequence.
- iface = out
- out = settableValueOf(make([]interface{}, l))
- default:
- d.terror(n, yaml_SEQ_TAG, out)
- return false
- }
- et := out.Type().Elem()
-
- j := 0
- for i := 0; i < l; i++ {
- e := reflect.New(et).Elem()
- if ok := d.unmarshal(n.children[i], e); ok {
- out.Index(j).Set(e)
- j++
- }
- }
- out.Set(out.Slice(0, j))
- if iface.IsValid() {
- iface.Set(out)
- }
- return true
-}
-
-func (d *decoder) mapping(n *node, out reflect.Value) (good bool) {
- switch out.Kind() {
- case reflect.Struct:
- return d.mappingStruct(n, out)
- case reflect.Slice:
- return d.mappingSlice(n, out)
- case reflect.Map:
- // okay
- case reflect.Interface:
- if d.mapType.Kind() == reflect.Map {
- iface := out
- out = reflect.MakeMap(d.mapType)
- iface.Set(out)
- } else {
- slicev := reflect.New(d.mapType).Elem()
- if !d.mappingSlice(n, slicev) {
- return false
- }
- out.Set(slicev)
- return true
- }
- default:
- d.terror(n, yaml_MAP_TAG, out)
- return false
- }
- outt := out.Type()
- kt := outt.Key()
- et := outt.Elem()
-
- mapType := d.mapType
- if outt.Key() == ifaceType && outt.Elem() == ifaceType {
- d.mapType = outt
- }
-
- if out.IsNil() {
- out.Set(reflect.MakeMap(outt))
- }
- l := len(n.children)
- for i := 0; i < l; i += 2 {
- if isMerge(n.children[i]) {
- d.merge(n.children[i+1], out)
- continue
- }
- k := reflect.New(kt).Elem()
- if d.unmarshal(n.children[i], k) {
- kkind := k.Kind()
- if kkind == reflect.Interface {
- kkind = k.Elem().Kind()
- }
- if kkind == reflect.Map || kkind == reflect.Slice {
- failf("invalid map key: %#v", k.Interface())
- }
- e := reflect.New(et).Elem()
- if d.unmarshal(n.children[i+1], e) {
- out.SetMapIndex(k, e)
- }
- }
- }
- d.mapType = mapType
- return true
-}
-
-func (d *decoder) mappingSlice(n *node, out reflect.Value) (good bool) {
- outt := out.Type()
- if outt.Elem() != mapItemType {
- d.terror(n, yaml_MAP_TAG, out)
- return false
- }
-
- mapType := d.mapType
- d.mapType = outt
-
- var slice []MapItem
- var l = len(n.children)
- for i := 0; i < l; i += 2 {
- if isMerge(n.children[i]) {
- d.merge(n.children[i+1], out)
- continue
- }
- item := MapItem{}
- k := reflect.ValueOf(&item.Key).Elem()
- if d.unmarshal(n.children[i], k) {
- v := reflect.ValueOf(&item.Value).Elem()
- if d.unmarshal(n.children[i+1], v) {
- slice = append(slice, item)
- }
- }
- }
- out.Set(reflect.ValueOf(slice))
- d.mapType = mapType
- return true
-}
-
-func (d *decoder) mappingStruct(n *node, out reflect.Value) (good bool) {
- sinfo, err := getStructInfo(out.Type())
- if err != nil {
- panic(err)
- }
- name := settableValueOf("")
- l := len(n.children)
-
- var inlineMap reflect.Value
- var elemType reflect.Type
- if sinfo.InlineMap != -1 {
- inlineMap = out.Field(sinfo.InlineMap)
- inlineMap.Set(reflect.New(inlineMap.Type()).Elem())
- elemType = inlineMap.Type().Elem()
- }
-
- for i := 0; i < l; i += 2 {
- ni := n.children[i]
- if isMerge(ni) {
- d.merge(n.children[i+1], out)
- continue
- }
- if !d.unmarshal(ni, name) {
- continue
- }
- if info, ok := sinfo.FieldsMap[name.String()]; ok {
- var field reflect.Value
- if info.Inline == nil {
- field = out.Field(info.Num)
- } else {
- field = out.FieldByIndex(info.Inline)
- }
- d.unmarshal(n.children[i+1], field)
- } else if sinfo.InlineMap != -1 {
- if inlineMap.IsNil() {
- inlineMap.Set(reflect.MakeMap(inlineMap.Type()))
- }
- value := reflect.New(elemType).Elem()
- d.unmarshal(n.children[i+1], value)
- inlineMap.SetMapIndex(name, value)
- }
- }
- return true
-}
-
-func failWantMap() {
- failf("map merge requires map or sequence of maps as the value")
-}
-
-func (d *decoder) merge(n *node, out reflect.Value) {
- switch n.kind {
- case mappingNode:
- d.unmarshal(n, out)
- case aliasNode:
- an, ok := d.doc.anchors[n.value]
- if ok && an.kind != mappingNode {
- failWantMap()
- }
- d.unmarshal(n, out)
- case sequenceNode:
- // Step backwards as earlier nodes take precedence.
- for i := len(n.children) - 1; i >= 0; i-- {
- ni := n.children[i]
- if ni.kind == aliasNode {
- an, ok := d.doc.anchors[ni.value]
- if ok && an.kind != mappingNode {
- failWantMap()
- }
- } else if ni.kind != mappingNode {
- failWantMap()
- }
- d.unmarshal(ni, out)
- }
- default:
- failWantMap()
- }
-}
-
-func isMerge(n *node) bool {
- return n.kind == scalarNode && n.value == "<<" && (n.implicit == true || n.tag == yaml_MERGE_TAG)
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
deleted file mode 100644
index 2befd55..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/emitterc.go
+++ /dev/null
@@ -1,1685 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// Flush the buffer if needed.
-func flush(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) {
- return yaml_emitter_flush(emitter)
- }
- return true
-}
-
-// Put a character to the output buffer.
-func put(emitter *yaml_emitter_t, value byte) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.buffer[emitter.buffer_pos] = value
- emitter.buffer_pos++
- emitter.column++
- return true
-}
-
-// Put a line break to the output buffer.
-func put_break(emitter *yaml_emitter_t) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- switch emitter.line_break {
- case yaml_CR_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\r'
- emitter.buffer_pos += 1
- case yaml_LN_BREAK:
- emitter.buffer[emitter.buffer_pos] = '\n'
- emitter.buffer_pos += 1
- case yaml_CRLN_BREAK:
- emitter.buffer[emitter.buffer_pos+0] = '\r'
- emitter.buffer[emitter.buffer_pos+1] = '\n'
- emitter.buffer_pos += 2
- default:
- panic("unknown line break setting")
- }
- emitter.column = 0
- emitter.line++
- return true
-}
-
-// Copy a character from a string into buffer.
-func write(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if emitter.buffer_pos+5 >= len(emitter.buffer) && !yaml_emitter_flush(emitter) {
- return false
- }
- p := emitter.buffer_pos
- w := width(s[*i])
- switch w {
- case 4:
- emitter.buffer[p+3] = s[*i+3]
- fallthrough
- case 3:
- emitter.buffer[p+2] = s[*i+2]
- fallthrough
- case 2:
- emitter.buffer[p+1] = s[*i+1]
- fallthrough
- case 1:
- emitter.buffer[p+0] = s[*i+0]
- default:
- panic("unknown character width")
- }
- emitter.column++
- emitter.buffer_pos += w
- *i += w
- return true
-}
-
-// Write a whole string into buffer.
-func write_all(emitter *yaml_emitter_t, s []byte) bool {
- for i := 0; i < len(s); {
- if !write(emitter, s, &i) {
- return false
- }
- }
- return true
-}
-
-// Copy a line break character from a string into buffer.
-func write_break(emitter *yaml_emitter_t, s []byte, i *int) bool {
- if s[*i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- *i++
- } else {
- if !write(emitter, s, i) {
- return false
- }
- emitter.column = 0
- emitter.line++
- }
- return true
-}
-
-// Set an emitter error and return false.
-func yaml_emitter_set_emitter_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_EMITTER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Emit an event.
-func yaml_emitter_emit(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.events = append(emitter.events, *event)
- for !yaml_emitter_need_more_events(emitter) {
- event := &emitter.events[emitter.events_head]
- if !yaml_emitter_analyze_event(emitter, event) {
- return false
- }
- if !yaml_emitter_state_machine(emitter, event) {
- return false
- }
- yaml_event_delete(event)
- emitter.events_head++
- }
- return true
-}
-
-// Check if we need to accumulate more events before emitting.
-//
-// We accumulate extra
-// - 1 event for DOCUMENT-START
-// - 2 events for SEQUENCE-START
-// - 3 events for MAPPING-START
-//
-func yaml_emitter_need_more_events(emitter *yaml_emitter_t) bool {
- if emitter.events_head == len(emitter.events) {
- return true
- }
- var accumulate int
- switch emitter.events[emitter.events_head].typ {
- case yaml_DOCUMENT_START_EVENT:
- accumulate = 1
- break
- case yaml_SEQUENCE_START_EVENT:
- accumulate = 2
- break
- case yaml_MAPPING_START_EVENT:
- accumulate = 3
- break
- default:
- return false
- }
- if len(emitter.events)-emitter.events_head > accumulate {
- return false
- }
- var level int
- for i := emitter.events_head; i < len(emitter.events); i++ {
- switch emitter.events[i].typ {
- case yaml_STREAM_START_EVENT, yaml_DOCUMENT_START_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT:
- level++
- case yaml_STREAM_END_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_END_EVENT, yaml_MAPPING_END_EVENT:
- level--
- }
- if level == 0 {
- return false
- }
- }
- return true
-}
-
-// Append a directive to the directives stack.
-func yaml_emitter_append_tag_directive(emitter *yaml_emitter_t, value *yaml_tag_directive_t, allow_duplicates bool) bool {
- for i := 0; i < len(emitter.tag_directives); i++ {
- if bytes.Equal(value.handle, emitter.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_emitter_set_emitter_error(emitter, "duplicate %TAG directive")
- }
- }
-
- // [Go] Do we actually need to copy this given garbage collection
- // and the lack of deallocating destructors?
- tag_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(tag_copy.handle, value.handle)
- copy(tag_copy.prefix, value.prefix)
- emitter.tag_directives = append(emitter.tag_directives, tag_copy)
- return true
-}
-
-// Increase the indentation level.
-func yaml_emitter_increase_indent(emitter *yaml_emitter_t, flow, indentless bool) bool {
- emitter.indents = append(emitter.indents, emitter.indent)
- if emitter.indent < 0 {
- if flow {
- emitter.indent = emitter.best_indent
- } else {
- emitter.indent = 0
- }
- } else if !indentless {
- emitter.indent += emitter.best_indent
- }
- return true
-}
-
-// State dispatcher.
-func yaml_emitter_state_machine(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- switch emitter.state {
- default:
- case yaml_EMIT_STREAM_START_STATE:
- return yaml_emitter_emit_stream_start(emitter, event)
-
- case yaml_EMIT_FIRST_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, true)
-
- case yaml_EMIT_DOCUMENT_START_STATE:
- return yaml_emitter_emit_document_start(emitter, event, false)
-
- case yaml_EMIT_DOCUMENT_CONTENT_STATE:
- return yaml_emitter_emit_document_content(emitter, event)
-
- case yaml_EMIT_DOCUMENT_END_STATE:
- return yaml_emitter_emit_document_end(emitter, event)
-
- case yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, true)
-
- case yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_flow_sequence_item(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_KEY_STATE:
- return yaml_emitter_emit_flow_mapping_key(emitter, event, false)
-
- case yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, true)
-
- case yaml_EMIT_FLOW_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_flow_mapping_value(emitter, event, false)
-
- case yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, true)
-
- case yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE:
- return yaml_emitter_emit_block_sequence_item(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_KEY_STATE:
- return yaml_emitter_emit_block_mapping_key(emitter, event, false)
-
- case yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, true)
-
- case yaml_EMIT_BLOCK_MAPPING_VALUE_STATE:
- return yaml_emitter_emit_block_mapping_value(emitter, event, false)
-
- case yaml_EMIT_END_STATE:
- return yaml_emitter_set_emitter_error(emitter, "expected nothing after STREAM-END")
- }
- panic("invalid emitter state")
-}
-
-// Expect STREAM-START.
-func yaml_emitter_emit_stream_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_STREAM_START_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected STREAM-START")
- }
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = event.encoding
- if emitter.encoding == yaml_ANY_ENCODING {
- emitter.encoding = yaml_UTF8_ENCODING
- }
- }
- if emitter.best_indent < 2 || emitter.best_indent > 9 {
- emitter.best_indent = 2
- }
- if emitter.best_width >= 0 && emitter.best_width <= emitter.best_indent*2 {
- emitter.best_width = 80
- }
- if emitter.best_width < 0 {
- emitter.best_width = 1<<31 - 1
- }
- if emitter.line_break == yaml_ANY_BREAK {
- emitter.line_break = yaml_LN_BREAK
- }
-
- emitter.indent = -1
- emitter.line = 0
- emitter.column = 0
- emitter.whitespace = true
- emitter.indention = true
-
- if emitter.encoding != yaml_UTF8_ENCODING {
- if !yaml_emitter_write_bom(emitter) {
- return false
- }
- }
- emitter.state = yaml_EMIT_FIRST_DOCUMENT_START_STATE
- return true
-}
-
-// Expect DOCUMENT-START or STREAM-END.
-func yaml_emitter_emit_document_start(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
-
- if event.typ == yaml_DOCUMENT_START_EVENT {
-
- if event.version_directive != nil {
- if !yaml_emitter_analyze_version_directive(emitter, event.version_directive) {
- return false
- }
- }
-
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_analyze_tag_directive(emitter, tag_directive) {
- return false
- }
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, false) {
- return false
- }
- }
-
- for i := 0; i < len(default_tag_directives); i++ {
- tag_directive := &default_tag_directives[i]
- if !yaml_emitter_append_tag_directive(emitter, tag_directive, true) {
- return false
- }
- }
-
- implicit := event.implicit
- if !first || emitter.canonical {
- implicit = false
- }
-
- if emitter.open_ended && (event.version_directive != nil || len(event.tag_directives) > 0) {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if event.version_directive != nil {
- implicit = false
- if !yaml_emitter_write_indicator(emitter, []byte("%YAML"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("1.1"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if len(event.tag_directives) > 0 {
- implicit = false
- for i := 0; i < len(event.tag_directives); i++ {
- tag_directive := &event.tag_directives[i]
- if !yaml_emitter_write_indicator(emitter, []byte("%TAG"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_handle(emitter, tag_directive.handle) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, tag_directive.prefix, true) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- if yaml_emitter_check_empty_document(emitter) {
- implicit = false
- }
- if !implicit {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte("---"), true, false, false) {
- return false
- }
- if emitter.canonical {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- }
-
- emitter.state = yaml_EMIT_DOCUMENT_CONTENT_STATE
- return true
- }
-
- if event.typ == yaml_STREAM_END_EVENT {
- if emitter.open_ended {
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_END_STATE
- return true
- }
-
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-START or STREAM-END")
-}
-
-// Expect the root node.
-func yaml_emitter_emit_document_content(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- emitter.states = append(emitter.states, yaml_EMIT_DOCUMENT_END_STATE)
- return yaml_emitter_emit_node(emitter, event, true, false, false, false)
-}
-
-// Expect DOCUMENT-END.
-func yaml_emitter_emit_document_end(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if event.typ != yaml_DOCUMENT_END_EVENT {
- return yaml_emitter_set_emitter_error(emitter, "expected DOCUMENT-END")
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !event.implicit {
- // [Go] Allocate the slice elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("..."), true, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_flush(emitter) {
- return false
- }
- emitter.state = yaml_EMIT_DOCUMENT_START_STATE
- emitter.tag_directives = emitter.tag_directives[:0]
- return true
-}
-
-// Expect a flow item node.
-func yaml_emitter_emit_flow_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'['}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{']'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
-
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
-
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a flow key node.
-func yaml_emitter_emit_flow_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_write_indicator(emitter, []byte{'{'}, true, true, false) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- emitter.flow_level++
- }
-
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.flow_level--
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- if emitter.canonical && !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'}'}, false, false, false) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
-
- if !first {
- if !yaml_emitter_write_indicator(emitter, []byte{','}, false, false, false) {
- return false
- }
- }
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
-
- if !emitter.canonical && yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, false) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a flow value node.
-func yaml_emitter_emit_flow_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if emitter.canonical || emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, false) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_FLOW_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block item node.
-func yaml_emitter_emit_block_sequence_item(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, emitter.mapping_context && !emitter.indention) {
- return false
- }
- }
- if event.typ == yaml_SEQUENCE_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'-'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE)
- return yaml_emitter_emit_node(emitter, event, false, true, false, false)
-}
-
-// Expect a block key node.
-func yaml_emitter_emit_block_mapping_key(emitter *yaml_emitter_t, event *yaml_event_t, first bool) bool {
- if first {
- if !yaml_emitter_increase_indent(emitter, false, false) {
- return false
- }
- }
- if event.typ == yaml_MAPPING_END_EVENT {
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
- }
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if yaml_emitter_check_simple_key(emitter) {
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, true)
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'?'}, true, false, true) {
- return false
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_VALUE_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a block value node.
-func yaml_emitter_emit_block_mapping_value(emitter *yaml_emitter_t, event *yaml_event_t, simple bool) bool {
- if simple {
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, false, false, false) {
- return false
- }
- } else {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{':'}, true, false, true) {
- return false
- }
- }
- emitter.states = append(emitter.states, yaml_EMIT_BLOCK_MAPPING_KEY_STATE)
- return yaml_emitter_emit_node(emitter, event, false, false, true, false)
-}
-
-// Expect a node.
-func yaml_emitter_emit_node(emitter *yaml_emitter_t, event *yaml_event_t,
- root bool, sequence bool, mapping bool, simple_key bool) bool {
-
- emitter.root_context = root
- emitter.sequence_context = sequence
- emitter.mapping_context = mapping
- emitter.simple_key_context = simple_key
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- return yaml_emitter_emit_alias(emitter, event)
- case yaml_SCALAR_EVENT:
- return yaml_emitter_emit_scalar(emitter, event)
- case yaml_SEQUENCE_START_EVENT:
- return yaml_emitter_emit_sequence_start(emitter, event)
- case yaml_MAPPING_START_EVENT:
- return yaml_emitter_emit_mapping_start(emitter, event)
- default:
- return yaml_emitter_set_emitter_error(emitter,
- "expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS")
- }
- return false
-}
-
-// Expect ALIAS.
-func yaml_emitter_emit_alias(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SCALAR.
-func yaml_emitter_emit_scalar(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_select_scalar_style(emitter, event) {
- return false
- }
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if !yaml_emitter_increase_indent(emitter, true, false) {
- return false
- }
- if !yaml_emitter_process_scalar(emitter) {
- return false
- }
- emitter.indent = emitter.indents[len(emitter.indents)-1]
- emitter.indents = emitter.indents[:len(emitter.indents)-1]
- emitter.state = emitter.states[len(emitter.states)-1]
- emitter.states = emitter.states[:len(emitter.states)-1]
- return true
-}
-
-// Expect SEQUENCE-START.
-func yaml_emitter_emit_sequence_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.sequence_style() == yaml_FLOW_SEQUENCE_STYLE ||
- yaml_emitter_check_empty_sequence(emitter) {
- emitter.state = yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE
- }
- return true
-}
-
-// Expect MAPPING-START.
-func yaml_emitter_emit_mapping_start(emitter *yaml_emitter_t, event *yaml_event_t) bool {
- if !yaml_emitter_process_anchor(emitter) {
- return false
- }
- if !yaml_emitter_process_tag(emitter) {
- return false
- }
- if emitter.flow_level > 0 || emitter.canonical || event.mapping_style() == yaml_FLOW_MAPPING_STYLE ||
- yaml_emitter_check_empty_mapping(emitter) {
- emitter.state = yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE
- } else {
- emitter.state = yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE
- }
- return true
-}
-
-// Check if the document content is an empty scalar.
-func yaml_emitter_check_empty_document(emitter *yaml_emitter_t) bool {
- return false // [Go] Huh?
-}
-
-// Check if the next events represent an empty sequence.
-func yaml_emitter_check_empty_sequence(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_SEQUENCE_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_SEQUENCE_END_EVENT
-}
-
-// Check if the next events represent an empty mapping.
-func yaml_emitter_check_empty_mapping(emitter *yaml_emitter_t) bool {
- if len(emitter.events)-emitter.events_head < 2 {
- return false
- }
- return emitter.events[emitter.events_head].typ == yaml_MAPPING_START_EVENT &&
- emitter.events[emitter.events_head+1].typ == yaml_MAPPING_END_EVENT
-}
-
-// Check if the next node can be expressed as a simple key.
-func yaml_emitter_check_simple_key(emitter *yaml_emitter_t) bool {
- length := 0
- switch emitter.events[emitter.events_head].typ {
- case yaml_ALIAS_EVENT:
- length += len(emitter.anchor_data.anchor)
- case yaml_SCALAR_EVENT:
- if emitter.scalar_data.multiline {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix) +
- len(emitter.scalar_data.value)
- case yaml_SEQUENCE_START_EVENT:
- if !yaml_emitter_check_empty_sequence(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- case yaml_MAPPING_START_EVENT:
- if !yaml_emitter_check_empty_mapping(emitter) {
- return false
- }
- length += len(emitter.anchor_data.anchor) +
- len(emitter.tag_data.handle) +
- len(emitter.tag_data.suffix)
- default:
- return false
- }
- return length <= 128
-}
-
-// Determine an acceptable scalar style.
-func yaml_emitter_select_scalar_style(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0
- if no_tag && !event.implicit && !event.quoted_implicit {
- return yaml_emitter_set_emitter_error(emitter, "neither tag nor implicit flags are specified")
- }
-
- style := event.scalar_style()
- if style == yaml_ANY_SCALAR_STYLE {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- if emitter.canonical {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- if emitter.simple_key_context && emitter.scalar_data.multiline {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
-
- if style == yaml_PLAIN_SCALAR_STYLE {
- if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed ||
- emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- if no_tag && !event.implicit {
- style = yaml_SINGLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_SINGLE_QUOTED_SCALAR_STYLE {
- if !emitter.scalar_data.single_quoted_allowed {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
- if style == yaml_LITERAL_SCALAR_STYLE || style == yaml_FOLDED_SCALAR_STYLE {
- if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- }
-
- if no_tag && !event.quoted_implicit && style != yaml_PLAIN_SCALAR_STYLE {
- emitter.tag_data.handle = []byte{'!'}
- }
- emitter.scalar_data.style = style
- return true
-}
-
-// Write an achor.
-func yaml_emitter_process_anchor(emitter *yaml_emitter_t) bool {
- if emitter.anchor_data.anchor == nil {
- return true
- }
- c := []byte{'&'}
- if emitter.anchor_data.alias {
- c[0] = '*'
- }
- if !yaml_emitter_write_indicator(emitter, c, true, false, false) {
- return false
- }
- return yaml_emitter_write_anchor(emitter, emitter.anchor_data.anchor)
-}
-
-// Write a tag.
-func yaml_emitter_process_tag(emitter *yaml_emitter_t) bool {
- if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 {
- return true
- }
- if len(emitter.tag_data.handle) > 0 {
- if !yaml_emitter_write_tag_handle(emitter, emitter.tag_data.handle) {
- return false
- }
- if len(emitter.tag_data.suffix) > 0 {
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- }
- } else {
- // [Go] Allocate these slices elsewhere.
- if !yaml_emitter_write_indicator(emitter, []byte("!<"), true, false, false) {
- return false
- }
- if !yaml_emitter_write_tag_content(emitter, emitter.tag_data.suffix, false) {
- return false
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, false, false, false) {
- return false
- }
- }
- return true
-}
-
-// Write a scalar.
-func yaml_emitter_process_scalar(emitter *yaml_emitter_t) bool {
- switch emitter.scalar_data.style {
- case yaml_PLAIN_SCALAR_STYLE:
- return yaml_emitter_write_plain_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_SINGLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_single_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_DOUBLE_QUOTED_SCALAR_STYLE:
- return yaml_emitter_write_double_quoted_scalar(emitter, emitter.scalar_data.value, !emitter.simple_key_context)
-
- case yaml_LITERAL_SCALAR_STYLE:
- return yaml_emitter_write_literal_scalar(emitter, emitter.scalar_data.value)
-
- case yaml_FOLDED_SCALAR_STYLE:
- return yaml_emitter_write_folded_scalar(emitter, emitter.scalar_data.value)
- }
- panic("unknown scalar style")
-}
-
-// Check if a %YAML directive is valid.
-func yaml_emitter_analyze_version_directive(emitter *yaml_emitter_t, version_directive *yaml_version_directive_t) bool {
- if version_directive.major != 1 || version_directive.minor != 1 {
- return yaml_emitter_set_emitter_error(emitter, "incompatible %YAML directive")
- }
- return true
-}
-
-// Check if a %TAG directive is valid.
-func yaml_emitter_analyze_tag_directive(emitter *yaml_emitter_t, tag_directive *yaml_tag_directive_t) bool {
- handle := tag_directive.handle
- prefix := tag_directive.prefix
- if len(handle) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must not be empty")
- }
- if handle[0] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must start with '!'")
- }
- if handle[len(handle)-1] != '!' {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must end with '!'")
- }
- for i := 1; i < len(handle)-1; i += width(handle[i]) {
- if !is_alpha(handle, i) {
- return yaml_emitter_set_emitter_error(emitter, "tag handle must contain alphanumerical characters only")
- }
- }
- if len(prefix) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag prefix must not be empty")
- }
- return true
-}
-
-// Check if an anchor is valid.
-func yaml_emitter_analyze_anchor(emitter *yaml_emitter_t, anchor []byte, alias bool) bool {
- if len(anchor) == 0 {
- problem := "anchor value must not be empty"
- if alias {
- problem = "alias value must not be empty"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- for i := 0; i < len(anchor); i += width(anchor[i]) {
- if !is_alpha(anchor, i) {
- problem := "anchor value must contain alphanumerical characters only"
- if alias {
- problem = "alias value must contain alphanumerical characters only"
- }
- return yaml_emitter_set_emitter_error(emitter, problem)
- }
- }
- emitter.anchor_data.anchor = anchor
- emitter.anchor_data.alias = alias
- return true
-}
-
-// Check if a tag is valid.
-func yaml_emitter_analyze_tag(emitter *yaml_emitter_t, tag []byte) bool {
- if len(tag) == 0 {
- return yaml_emitter_set_emitter_error(emitter, "tag value must not be empty")
- }
- for i := 0; i < len(emitter.tag_directives); i++ {
- tag_directive := &emitter.tag_directives[i]
- if bytes.HasPrefix(tag, tag_directive.prefix) {
- emitter.tag_data.handle = tag_directive.handle
- emitter.tag_data.suffix = tag[len(tag_directive.prefix):]
- return true
- }
- }
- emitter.tag_data.suffix = tag
- return true
-}
-
-// Check if a scalar is valid.
-func yaml_emitter_analyze_scalar(emitter *yaml_emitter_t, value []byte) bool {
- var (
- block_indicators = false
- flow_indicators = false
- line_breaks = false
- special_characters = false
-
- leading_space = false
- leading_break = false
- trailing_space = false
- trailing_break = false
- break_space = false
- space_break = false
-
- preceeded_by_whitespace = false
- followed_by_whitespace = false
- previous_space = false
- previous_break = false
- )
-
- emitter.scalar_data.value = value
-
- if len(value) == 0 {
- emitter.scalar_data.multiline = false
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = false
- return true
- }
-
- if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) {
- block_indicators = true
- flow_indicators = true
- }
-
- preceeded_by_whitespace = true
- for i, w := 0, 0; i < len(value); i += w {
- w = width(value[i])
- followed_by_whitespace = i+w >= len(value) || is_blank(value, i+w)
-
- if i == 0 {
- switch value[i] {
- case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`':
- flow_indicators = true
- block_indicators = true
- case '?', ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '-':
- if followed_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- } else {
- switch value[i] {
- case ',', '?', '[', ']', '{', '}':
- flow_indicators = true
- case ':':
- flow_indicators = true
- if followed_by_whitespace {
- block_indicators = true
- }
- case '#':
- if preceeded_by_whitespace {
- flow_indicators = true
- block_indicators = true
- }
- }
- }
-
- if !is_printable(value, i) || !is_ascii(value, i) && !emitter.unicode {
- special_characters = true
- }
- if is_space(value, i) {
- if i == 0 {
- leading_space = true
- }
- if i+width(value[i]) == len(value) {
- trailing_space = true
- }
- if previous_break {
- break_space = true
- }
- previous_space = true
- previous_break = false
- } else if is_break(value, i) {
- line_breaks = true
- if i == 0 {
- leading_break = true
- }
- if i+width(value[i]) == len(value) {
- trailing_break = true
- }
- if previous_space {
- space_break = true
- }
- previous_space = false
- previous_break = true
- } else {
- previous_space = false
- previous_break = false
- }
-
- // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition.
- preceeded_by_whitespace = is_blankz(value, i)
- }
-
- emitter.scalar_data.multiline = line_breaks
- emitter.scalar_data.flow_plain_allowed = true
- emitter.scalar_data.block_plain_allowed = true
- emitter.scalar_data.single_quoted_allowed = true
- emitter.scalar_data.block_allowed = true
-
- if leading_space || leading_break || trailing_space || trailing_break {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if trailing_space {
- emitter.scalar_data.block_allowed = false
- }
- if break_space {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- }
- if space_break || special_characters {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- emitter.scalar_data.single_quoted_allowed = false
- emitter.scalar_data.block_allowed = false
- }
- if line_breaks {
- emitter.scalar_data.flow_plain_allowed = false
- emitter.scalar_data.block_plain_allowed = false
- }
- if flow_indicators {
- emitter.scalar_data.flow_plain_allowed = false
- }
- if block_indicators {
- emitter.scalar_data.block_plain_allowed = false
- }
- return true
-}
-
-// Check if the event data is valid.
-func yaml_emitter_analyze_event(emitter *yaml_emitter_t, event *yaml_event_t) bool {
-
- emitter.anchor_data.anchor = nil
- emitter.tag_data.handle = nil
- emitter.tag_data.suffix = nil
- emitter.scalar_data.value = nil
-
- switch event.typ {
- case yaml_ALIAS_EVENT:
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, true) {
- return false
- }
-
- case yaml_SCALAR_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || (!event.implicit && !event.quoted_implicit)) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- if !yaml_emitter_analyze_scalar(emitter, event.value) {
- return false
- }
-
- case yaml_SEQUENCE_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
-
- case yaml_MAPPING_START_EVENT:
- if len(event.anchor) > 0 {
- if !yaml_emitter_analyze_anchor(emitter, event.anchor, false) {
- return false
- }
- }
- if len(event.tag) > 0 && (emitter.canonical || !event.implicit) {
- if !yaml_emitter_analyze_tag(emitter, event.tag) {
- return false
- }
- }
- }
- return true
-}
-
-// Write the BOM character.
-func yaml_emitter_write_bom(emitter *yaml_emitter_t) bool {
- if !flush(emitter) {
- return false
- }
- pos := emitter.buffer_pos
- emitter.buffer[pos+0] = '\xEF'
- emitter.buffer[pos+1] = '\xBB'
- emitter.buffer[pos+2] = '\xBF'
- emitter.buffer_pos += 3
- return true
-}
-
-func yaml_emitter_write_indent(emitter *yaml_emitter_t) bool {
- indent := emitter.indent
- if indent < 0 {
- indent = 0
- }
- if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) {
- if !put_break(emitter) {
- return false
- }
- }
- for emitter.column < indent {
- if !put(emitter, ' ') {
- return false
- }
- }
- emitter.whitespace = true
- emitter.indention = true
- return true
-}
-
-func yaml_emitter_write_indicator(emitter *yaml_emitter_t, indicator []byte, need_whitespace, is_whitespace, is_indention bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, indicator) {
- return false
- }
- emitter.whitespace = is_whitespace
- emitter.indention = (emitter.indention && is_indention)
- emitter.open_ended = false
- return true
-}
-
-func yaml_emitter_write_anchor(emitter *yaml_emitter_t, value []byte) bool {
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_handle(emitter *yaml_emitter_t, value []byte) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- if !write_all(emitter, value) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_tag_content(emitter *yaml_emitter_t, value []byte, need_whitespace bool) bool {
- if need_whitespace && !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
- for i := 0; i < len(value); {
- var must_write bool
- switch value[i] {
- case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']':
- must_write = true
- default:
- must_write = is_alpha(value, i)
- }
- if must_write {
- if !write(emitter, value, &i) {
- return false
- }
- } else {
- w := width(value[i])
- for k := 0; k < w; k++ {
- octet := value[i]
- i++
- if !put(emitter, '%') {
- return false
- }
-
- c := octet >> 4
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
-
- c = octet & 0x0f
- if c < 10 {
- c += '0'
- } else {
- c += 'A' - 10
- }
- if !put(emitter, c) {
- return false
- }
- }
- }
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_plain_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- if !emitter.whitespace {
- if !put(emitter, ' ') {
- return false
- }
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
-
- emitter.whitespace = false
- emitter.indention = false
- if emitter.root_context {
- emitter.open_ended = true
- }
-
- return true
-}
-
-func yaml_emitter_write_single_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
-
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, true, false, false) {
- return false
- }
-
- spaces := false
- breaks := false
- for i := 0; i < len(value); {
- if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !is_space(value, i+1) {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- spaces = true
- } else if is_break(value, i) {
- if !breaks && value[i] == '\n' {
- if !put_break(emitter) {
- return false
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if value[i] == '\'' {
- if !put(emitter, '\'') {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- spaces = false
- breaks = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'\''}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_double_quoted_scalar(emitter *yaml_emitter_t, value []byte, allow_breaks bool) bool {
- spaces := false
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, true, false, false) {
- return false
- }
-
- for i := 0; i < len(value); {
- if !is_printable(value, i) || (!emitter.unicode && !is_ascii(value, i)) ||
- is_bom(value, i) || is_break(value, i) ||
- value[i] == '"' || value[i] == '\\' {
-
- octet := value[i]
-
- var w int
- var v rune
- switch {
- case octet&0x80 == 0x00:
- w, v = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, v = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, v = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, v = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = value[i+k]
- v = (v << 6) + (rune(octet) & 0x3F)
- }
- i += w
-
- if !put(emitter, '\\') {
- return false
- }
-
- var ok bool
- switch v {
- case 0x00:
- ok = put(emitter, '0')
- case 0x07:
- ok = put(emitter, 'a')
- case 0x08:
- ok = put(emitter, 'b')
- case 0x09:
- ok = put(emitter, 't')
- case 0x0A:
- ok = put(emitter, 'n')
- case 0x0b:
- ok = put(emitter, 'v')
- case 0x0c:
- ok = put(emitter, 'f')
- case 0x0d:
- ok = put(emitter, 'r')
- case 0x1b:
- ok = put(emitter, 'e')
- case 0x22:
- ok = put(emitter, '"')
- case 0x5c:
- ok = put(emitter, '\\')
- case 0x85:
- ok = put(emitter, 'N')
- case 0xA0:
- ok = put(emitter, '_')
- case 0x2028:
- ok = put(emitter, 'L')
- case 0x2029:
- ok = put(emitter, 'P')
- default:
- if v <= 0xFF {
- ok = put(emitter, 'x')
- w = 2
- } else if v <= 0xFFFF {
- ok = put(emitter, 'u')
- w = 4
- } else {
- ok = put(emitter, 'U')
- w = 8
- }
- for k := (w - 1) * 4; ok && k >= 0; k -= 4 {
- digit := byte((v >> uint(k)) & 0x0F)
- if digit < 10 {
- ok = put(emitter, digit+'0')
- } else {
- ok = put(emitter, digit+'A'-10)
- }
- }
- }
- if !ok {
- return false
- }
- spaces = false
- } else if is_space(value, i) {
- if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- if is_space(value, i+1) {
- if !put(emitter, '\\') {
- return false
- }
- }
- i += width(value[i])
- } else if !write(emitter, value, &i) {
- return false
- }
- spaces = true
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- spaces = false
- }
- }
- if !yaml_emitter_write_indicator(emitter, []byte{'"'}, false, false, false) {
- return false
- }
- emitter.whitespace = false
- emitter.indention = false
- return true
-}
-
-func yaml_emitter_write_block_scalar_hints(emitter *yaml_emitter_t, value []byte) bool {
- if is_space(value, 0) || is_break(value, 0) {
- indent_hint := []byte{'0' + byte(emitter.best_indent)}
- if !yaml_emitter_write_indicator(emitter, indent_hint, false, false, false) {
- return false
- }
- }
-
- emitter.open_ended = false
-
- var chomp_hint [1]byte
- if len(value) == 0 {
- chomp_hint[0] = '-'
- } else {
- i := len(value) - 1
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if !is_break(value, i) {
- chomp_hint[0] = '-'
- } else if i == 0 {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- } else {
- i--
- for value[i]&0xC0 == 0x80 {
- i--
- }
- if is_break(value, i) {
- chomp_hint[0] = '+'
- emitter.open_ended = true
- }
- }
- }
- if chomp_hint[0] != 0 {
- if !yaml_emitter_write_indicator(emitter, chomp_hint[:], false, false, false) {
- return false
- }
- }
- return true
-}
-
-func yaml_emitter_write_literal_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'|'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
- breaks := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- }
- if !write(emitter, value, &i) {
- return false
- }
- emitter.indention = false
- breaks = false
- }
- }
-
- return true
-}
-
-func yaml_emitter_write_folded_scalar(emitter *yaml_emitter_t, value []byte) bool {
- if !yaml_emitter_write_indicator(emitter, []byte{'>'}, true, false, false) {
- return false
- }
- if !yaml_emitter_write_block_scalar_hints(emitter, value) {
- return false
- }
-
- if !put_break(emitter) {
- return false
- }
- emitter.indention = true
- emitter.whitespace = true
-
- breaks := true
- leading_spaces := true
- for i := 0; i < len(value); {
- if is_break(value, i) {
- if !breaks && !leading_spaces && value[i] == '\n' {
- k := 0
- for is_break(value, k) {
- k += width(value[k])
- }
- if !is_blankz(value, k) {
- if !put_break(emitter) {
- return false
- }
- }
- }
- if !write_break(emitter, value, &i) {
- return false
- }
- emitter.indention = true
- breaks = true
- } else {
- if breaks {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- leading_spaces = is_blank(value, i)
- }
- if !breaks && is_space(value, i) && !is_space(value, i+1) && emitter.column > emitter.best_width {
- if !yaml_emitter_write_indent(emitter) {
- return false
- }
- i += width(value[i])
- } else {
- if !write(emitter, value, &i) {
- return false
- }
- }
- emitter.indention = false
- breaks = false
- }
- }
- return true
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
deleted file mode 100644
index 84f8499..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/encode.go
+++ /dev/null
@@ -1,306 +0,0 @@
-package yaml
-
-import (
- "encoding"
- "fmt"
- "reflect"
- "regexp"
- "sort"
- "strconv"
- "strings"
- "time"
-)
-
-type encoder struct {
- emitter yaml_emitter_t
- event yaml_event_t
- out []byte
- flow bool
-}
-
-func newEncoder() (e *encoder) {
- e = &encoder{}
- e.must(yaml_emitter_initialize(&e.emitter))
- yaml_emitter_set_output_string(&e.emitter, &e.out)
- yaml_emitter_set_unicode(&e.emitter, true)
- e.must(yaml_stream_start_event_initialize(&e.event, yaml_UTF8_ENCODING))
- e.emit()
- e.must(yaml_document_start_event_initialize(&e.event, nil, nil, true))
- e.emit()
- return e
-}
-
-func (e *encoder) finish() {
- e.must(yaml_document_end_event_initialize(&e.event, true))
- e.emit()
- e.emitter.open_ended = false
- e.must(yaml_stream_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) destroy() {
- yaml_emitter_delete(&e.emitter)
-}
-
-func (e *encoder) emit() {
- // This will internally delete the e.event value.
- if !yaml_emitter_emit(&e.emitter, &e.event) && e.event.typ != yaml_DOCUMENT_END_EVENT && e.event.typ != yaml_STREAM_END_EVENT {
- e.must(false)
- }
-}
-
-func (e *encoder) must(ok bool) {
- if !ok {
- msg := e.emitter.problem
- if msg == "" {
- msg = "unknown problem generating YAML content"
- }
- failf("%s", msg)
- }
-}
-
-func (e *encoder) marshal(tag string, in reflect.Value) {
- if !in.IsValid() {
- e.nilv()
- return
- }
- iface := in.Interface()
- if m, ok := iface.(Marshaler); ok {
- v, err := m.MarshalYAML()
- if err != nil {
- fail(err)
- }
- if v == nil {
- e.nilv()
- return
- }
- in = reflect.ValueOf(v)
- } else if m, ok := iface.(encoding.TextMarshaler); ok {
- text, err := m.MarshalText()
- if err != nil {
- fail(err)
- }
- in = reflect.ValueOf(string(text))
- }
- switch in.Kind() {
- case reflect.Interface:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Map:
- e.mapv(tag, in)
- case reflect.Ptr:
- if in.IsNil() {
- e.nilv()
- } else {
- e.marshal(tag, in.Elem())
- }
- case reflect.Struct:
- e.structv(tag, in)
- case reflect.Slice:
- if in.Type().Elem() == mapItemType {
- e.itemsv(tag, in)
- } else {
- e.slicev(tag, in)
- }
- case reflect.String:
- e.stringv(tag, in)
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- if in.Type() == durationType {
- e.stringv(tag, reflect.ValueOf(iface.(time.Duration).String()))
- } else {
- e.intv(tag, in)
- }
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- e.uintv(tag, in)
- case reflect.Float32, reflect.Float64:
- e.floatv(tag, in)
- case reflect.Bool:
- e.boolv(tag, in)
- default:
- panic("cannot marshal type: " + in.Type().String())
- }
-}
-
-func (e *encoder) mapv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- keys := keyList(in.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- e.marshal("", k)
- e.marshal("", in.MapIndex(k))
- }
- })
-}
-
-func (e *encoder) itemsv(tag string, in reflect.Value) {
- e.mappingv(tag, func() {
- slice := in.Convert(reflect.TypeOf([]MapItem{})).Interface().([]MapItem)
- for _, item := range slice {
- e.marshal("", reflect.ValueOf(item.Key))
- e.marshal("", reflect.ValueOf(item.Value))
- }
- })
-}
-
-func (e *encoder) structv(tag string, in reflect.Value) {
- sinfo, err := getStructInfo(in.Type())
- if err != nil {
- panic(err)
- }
- e.mappingv(tag, func() {
- for _, info := range sinfo.FieldsList {
- var value reflect.Value
- if info.Inline == nil {
- value = in.Field(info.Num)
- } else {
- value = in.FieldByIndex(info.Inline)
- }
- if info.OmitEmpty && isZero(value) {
- continue
- }
- e.marshal("", reflect.ValueOf(info.Key))
- e.flow = info.Flow
- e.marshal("", value)
- }
- if sinfo.InlineMap >= 0 {
- m := in.Field(sinfo.InlineMap)
- if m.Len() > 0 {
- e.flow = false
- keys := keyList(m.MapKeys())
- sort.Sort(keys)
- for _, k := range keys {
- if _, found := sinfo.FieldsMap[k.String()]; found {
- panic(fmt.Sprintf("Can't have key %q in inlined map; conflicts with struct field", k.String()))
- }
- e.marshal("", k)
- e.flow = false
- e.marshal("", m.MapIndex(k))
- }
- }
- }
- })
-}
-
-func (e *encoder) mappingv(tag string, f func()) {
- implicit := tag == ""
- style := yaml_BLOCK_MAPPING_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_MAPPING_STYLE
- }
- e.must(yaml_mapping_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- f()
- e.must(yaml_mapping_end_event_initialize(&e.event))
- e.emit()
-}
-
-func (e *encoder) slicev(tag string, in reflect.Value) {
- implicit := tag == ""
- style := yaml_BLOCK_SEQUENCE_STYLE
- if e.flow {
- e.flow = false
- style = yaml_FLOW_SEQUENCE_STYLE
- }
- e.must(yaml_sequence_start_event_initialize(&e.event, nil, []byte(tag), implicit, style))
- e.emit()
- n := in.Len()
- for i := 0; i < n; i++ {
- e.marshal("", in.Index(i))
- }
- e.must(yaml_sequence_end_event_initialize(&e.event))
- e.emit()
-}
-
-// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1.
-//
-// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported
-// in YAML 1.2 and by this package, but these should be marshalled quoted for
-// the time being for compatibility with other parsers.
-func isBase60Float(s string) (result bool) {
- // Fast path.
- if s == "" {
- return false
- }
- c := s[0]
- if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 {
- return false
- }
- // Do the full match.
- return base60float.MatchString(s)
-}
-
-// From http://yaml.org/type/float.html, except the regular expression there
-// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix.
-var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`)
-
-func (e *encoder) stringv(tag string, in reflect.Value) {
- var style yaml_scalar_style_t
- s := in.String()
- rtag, rs := resolve("", s)
- if rtag == yaml_BINARY_TAG {
- if tag == "" || tag == yaml_STR_TAG {
- tag = rtag
- s = rs.(string)
- } else if tag == yaml_BINARY_TAG {
- failf("explicitly tagged !!binary data must be base64-encoded")
- } else {
- failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag))
- }
- }
- if tag == "" && (rtag != yaml_STR_TAG || isBase60Float(s)) {
- style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- } else if strings.Contains(s, "\n") {
- style = yaml_LITERAL_SCALAR_STYLE
- } else {
- style = yaml_PLAIN_SCALAR_STYLE
- }
- e.emitScalar(s, "", tag, style)
-}
-
-func (e *encoder) boolv(tag string, in reflect.Value) {
- var s string
- if in.Bool() {
- s = "true"
- } else {
- s = "false"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) intv(tag string, in reflect.Value) {
- s := strconv.FormatInt(in.Int(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) uintv(tag string, in reflect.Value) {
- s := strconv.FormatUint(in.Uint(), 10)
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) floatv(tag string, in reflect.Value) {
- // FIXME: Handle 64 bits here.
- s := strconv.FormatFloat(float64(in.Float()), 'g', -1, 32)
- switch s {
- case "+Inf":
- s = ".inf"
- case "-Inf":
- s = "-.inf"
- case "NaN":
- s = ".nan"
- }
- e.emitScalar(s, "", tag, yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) nilv() {
- e.emitScalar("null", "", "", yaml_PLAIN_SCALAR_STYLE)
-}
-
-func (e *encoder) emitScalar(value, anchor, tag string, style yaml_scalar_style_t) {
- implicit := tag == ""
- e.must(yaml_scalar_event_initialize(&e.event, []byte(anchor), []byte(tag), []byte(value), implicit, implicit, style))
- e.emit()
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
deleted file mode 100644
index 0a7037a..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/parserc.go
+++ /dev/null
@@ -1,1096 +0,0 @@
-package yaml
-
-import (
- "bytes"
-)
-
-// The parser implements the following grammar:
-//
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// implicit_document ::= block_node DOCUMENT-END*
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// | properties (block_content | indentless_block_sequence)?
-// | block_content
-// | indentless_block_sequence
-// block_node ::= ALIAS
-// | properties block_content?
-// | block_content
-// flow_node ::= ALIAS
-// | properties flow_content?
-// | flow_content
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// block_content ::= block_collection | flow_collection | SCALAR
-// flow_content ::= flow_collection | SCALAR
-// block_collection ::= block_sequence | block_mapping
-// flow_collection ::= flow_sequence | flow_mapping
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// block_mapping ::= BLOCK-MAPPING_START
-// ((KEY block_node_or_indentless_sequence?)?
-// (VALUE block_node_or_indentless_sequence?)?)*
-// BLOCK-END
-// flow_sequence ::= FLOW-SEQUENCE-START
-// (flow_sequence_entry FLOW-ENTRY)*
-// flow_sequence_entry?
-// FLOW-SEQUENCE-END
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// flow_mapping ::= FLOW-MAPPING-START
-// (flow_mapping_entry FLOW-ENTRY)*
-// flow_mapping_entry?
-// FLOW-MAPPING-END
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-// Peek the next token in the token queue.
-func peek_token(parser *yaml_parser_t) *yaml_token_t {
- if parser.token_available || yaml_parser_fetch_more_tokens(parser) {
- return &parser.tokens[parser.tokens_head]
- }
- return nil
-}
-
-// Remove the next token from the queue (must be called after peek_token).
-func skip_token(parser *yaml_parser_t) {
- parser.token_available = false
- parser.tokens_parsed++
- parser.stream_end_produced = parser.tokens[parser.tokens_head].typ == yaml_STREAM_END_TOKEN
- parser.tokens_head++
-}
-
-// Get the next event.
-func yaml_parser_parse(parser *yaml_parser_t, event *yaml_event_t) bool {
- // Erase the event object.
- *event = yaml_event_t{}
-
- // No events after the end of the stream or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR || parser.state == yaml_PARSE_END_STATE {
- return true
- }
-
- // Generate the next event.
- return yaml_parser_state_machine(parser, event)
-}
-
-// Set parser error.
-func yaml_parser_set_parser_error(parser *yaml_parser_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-func yaml_parser_set_parser_error_context(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string, problem_mark yaml_mark_t) bool {
- parser.error = yaml_PARSER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = problem_mark
- return false
-}
-
-// State dispatcher.
-func yaml_parser_state_machine(parser *yaml_parser_t, event *yaml_event_t) bool {
- //trace("yaml_parser_state_machine", "state:", parser.state.String())
-
- switch parser.state {
- case yaml_PARSE_STREAM_START_STATE:
- return yaml_parser_parse_stream_start(parser, event)
-
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, true)
-
- case yaml_PARSE_DOCUMENT_START_STATE:
- return yaml_parser_parse_document_start(parser, event, false)
-
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return yaml_parser_parse_document_content(parser, event)
-
- case yaml_PARSE_DOCUMENT_END_STATE:
- return yaml_parser_parse_document_end(parser, event)
-
- case yaml_PARSE_BLOCK_NODE_STATE:
- return yaml_parser_parse_node(parser, event, true, false)
-
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return yaml_parser_parse_node(parser, event, true, true)
-
- case yaml_PARSE_FLOW_NODE_STATE:
- return yaml_parser_parse_node(parser, event, false, false)
-
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, true)
-
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_block_sequence_entry(parser, event, false)
-
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_indentless_sequence_entry(parser, event)
-
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, true)
-
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return yaml_parser_parse_block_mapping_key(parser, event, false)
-
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return yaml_parser_parse_block_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, true)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return yaml_parser_parse_flow_sequence_entry(parser, event, false)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_key(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_value(parser, event)
-
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return yaml_parser_parse_flow_sequence_entry_mapping_end(parser, event)
-
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, true)
-
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return yaml_parser_parse_flow_mapping_key(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, false)
-
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return yaml_parser_parse_flow_mapping_value(parser, event, true)
-
- default:
- panic("invalid parser state")
- }
- return false
-}
-
-// Parse the production:
-// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-// ************
-func yaml_parser_parse_stream_start(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_STREAM_START_TOKEN {
- return yaml_parser_set_parser_error(parser, "did not find expected ", token.start_mark)
- }
- parser.state = yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- encoding: token.encoding,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// *************************
-func yaml_parser_parse_document_start(parser *yaml_parser_t, event *yaml_event_t, implicit bool) bool {
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- // Parse extra document end indicators.
- if !implicit {
- for token.typ == yaml_DOCUMENT_END_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- if implicit && token.typ != yaml_VERSION_DIRECTIVE_TOKEN &&
- token.typ != yaml_TAG_DIRECTIVE_TOKEN &&
- token.typ != yaml_DOCUMENT_START_TOKEN &&
- token.typ != yaml_STREAM_END_TOKEN {
- // Parse an implicit document.
- if !yaml_parser_process_directives(parser, nil, nil) {
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_BLOCK_NODE_STATE
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- } else if token.typ != yaml_STREAM_END_TOKEN {
- // Parse an explicit document.
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
- start_mark := token.start_mark
- if !yaml_parser_process_directives(parser, &version_directive, &tag_directives) {
- return false
- }
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_DOCUMENT_START_TOKEN {
- yaml_parser_set_parser_error(parser,
- "did not find expected ", token.start_mark)
- return false
- }
- parser.states = append(parser.states, yaml_PARSE_DOCUMENT_END_STATE)
- parser.state = yaml_PARSE_DOCUMENT_CONTENT_STATE
- end_mark := token.end_mark
-
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- version_directive: version_directive,
- tag_directives: tag_directives,
- implicit: false,
- }
- skip_token(parser)
-
- } else {
- // Parse the stream end.
- parser.state = yaml_PARSE_END_STATE
- *event = yaml_event_t{
- typ: yaml_STREAM_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- }
-
- return true
-}
-
-// Parse the productions:
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-// ***********
-//
-func yaml_parser_parse_document_content(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN ||
- token.typ == yaml_TAG_DIRECTIVE_TOKEN ||
- token.typ == yaml_DOCUMENT_START_TOKEN ||
- token.typ == yaml_DOCUMENT_END_TOKEN ||
- token.typ == yaml_STREAM_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- return yaml_parser_process_empty_scalar(parser, event,
- token.start_mark)
- }
- return yaml_parser_parse_node(parser, event, true, false)
-}
-
-// Parse the productions:
-// implicit_document ::= block_node DOCUMENT-END*
-// *************
-// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
-//
-func yaml_parser_parse_document_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- implicit := true
- if token.typ == yaml_DOCUMENT_END_TOKEN {
- end_mark = token.end_mark
- skip_token(parser)
- implicit = false
- }
-
- parser.tag_directives = parser.tag_directives[:0]
-
- parser.state = yaml_PARSE_DOCUMENT_START_STATE
- *event = yaml_event_t{
- typ: yaml_DOCUMENT_END_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- implicit: implicit,
- }
- return true
-}
-
-// Parse the productions:
-// block_node_or_indentless_sequence ::=
-// ALIAS
-// *****
-// | properties (block_content | indentless_block_sequence)?
-// ********** *
-// | block_content | indentless_block_sequence
-// *
-// block_node ::= ALIAS
-// *****
-// | properties block_content?
-// ********** *
-// | block_content
-// *
-// flow_node ::= ALIAS
-// *****
-// | properties flow_content?
-// ********** *
-// | flow_content
-// *
-// properties ::= TAG ANCHOR? | ANCHOR TAG?
-// *************************
-// block_content ::= block_collection | flow_collection | SCALAR
-// ******
-// flow_content ::= flow_collection | SCALAR
-// ******
-func yaml_parser_parse_node(parser *yaml_parser_t, event *yaml_event_t, block, indentless_sequence bool) bool {
- //defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)()
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_ALIAS_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- *event = yaml_event_t{
- typ: yaml_ALIAS_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- anchor: token.value,
- }
- skip_token(parser)
- return true
- }
-
- start_mark := token.start_mark
- end_mark := token.start_mark
-
- var tag_token bool
- var tag_handle, tag_suffix, anchor []byte
- var tag_mark yaml_mark_t
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- start_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- } else if token.typ == yaml_TAG_TOKEN {
- tag_token = true
- tag_handle = token.value
- tag_suffix = token.suffix
- start_mark = token.start_mark
- tag_mark = token.start_mark
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_ANCHOR_TOKEN {
- anchor = token.value
- end_mark = token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
- }
-
- var tag []byte
- if tag_token {
- if len(tag_handle) == 0 {
- tag = tag_suffix
- tag_suffix = nil
- } else {
- for i := range parser.tag_directives {
- if bytes.Equal(parser.tag_directives[i].handle, tag_handle) {
- tag = append([]byte(nil), parser.tag_directives[i].prefix...)
- tag = append(tag, tag_suffix...)
- break
- }
- }
- if len(tag) == 0 {
- yaml_parser_set_parser_error_context(parser,
- "while parsing a node", start_mark,
- "found undefined tag handle", tag_mark)
- return false
- }
- }
- }
-
- implicit := len(tag) == 0
- if indentless_sequence && token.typ == yaml_BLOCK_ENTRY_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_SCALAR_TOKEN {
- var plain_implicit, quoted_implicit bool
- end_mark = token.end_mark
- if (len(tag) == 0 && token.style == yaml_PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') {
- plain_implicit = true
- } else if len(tag) == 0 {
- quoted_implicit = true
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- value: token.value,
- implicit: plain_implicit,
- quoted_implicit: quoted_implicit,
- style: yaml_style_t(token.style),
- }
- skip_token(parser)
- return true
- }
- if token.typ == yaml_FLOW_SEQUENCE_START_TOKEN {
- // [Go] Some of the events below can be merged as they differ only on style.
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_SEQUENCE_STYLE),
- }
- return true
- }
- if token.typ == yaml_FLOW_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_SEQUENCE_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_SEQUENCE_STYLE),
- }
- return true
- }
- if block && token.typ == yaml_BLOCK_MAPPING_START_TOKEN {
- end_mark = token.end_mark
- parser.state = yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- style: yaml_style_t(yaml_BLOCK_MAPPING_STYLE),
- }
- return true
- }
- if len(anchor) > 0 || len(tag) > 0 {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: start_mark,
- end_mark: end_mark,
- anchor: anchor,
- tag: tag,
- implicit: implicit,
- quoted_implicit: false,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
- }
-
- context := "while parsing a flow node"
- if block {
- context = "while parsing a block node"
- }
- yaml_parser_set_parser_error_context(parser, context, start_mark,
- "did not find expected node content", token.start_mark)
- return false
-}
-
-// Parse the productions:
-// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-// ******************** *********** * *********
-//
-func yaml_parser_parse_block_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN && token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- } else {
- parser.state = yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- }
- if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block collection", context_mark,
- "did not find expected '-' indicator", token.start_mark)
-}
-
-// Parse the productions:
-// indentless_sequence ::= (BLOCK-ENTRY block_node?)+
-// *********** *
-func yaml_parser_parse_indentless_sequence_entry(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_BLOCK_ENTRY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_BLOCK_ENTRY_TOKEN &&
- token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, true, false)
- }
- parser.state = yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be token.end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-// *******************
-// ((KEY block_node_or_indentless_sequence?)?
-// *** *
-// (VALUE block_node_or_indentless_sequence?)?)*
-//
-// BLOCK-END
-// *********
-//
-func yaml_parser_parse_block_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ == yaml_KEY_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- } else {
- parser.state = yaml_PARSE_BLOCK_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- } else if token.typ == yaml_BLOCK_END_TOKEN {
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
- }
-
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a block mapping", context_mark,
- "did not find expected key", token.start_mark)
-}
-
-// Parse the productions:
-// block_mapping ::= BLOCK-MAPPING_START
-//
-// ((KEY block_node_or_indentless_sequence?)?
-//
-// (VALUE block_node_or_indentless_sequence?)?)*
-// ***** *
-// BLOCK-END
-//
-//
-func yaml_parser_parse_block_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- mark := token.end_mark
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_KEY_TOKEN &&
- token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_BLOCK_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_BLOCK_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, true, true)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
- }
- parser.state = yaml_PARSE_BLOCK_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence ::= FLOW-SEQUENCE-START
-// *******************
-// (flow_sequence_entry FLOW-ENTRY)*
-// * **********
-// flow_sequence_entry?
-// *
-// FLOW-SEQUENCE-END
-// *****************
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow sequence", context_mark,
- "did not find expected ',' or ']'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_START_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- implicit: true,
- style: yaml_style_t(yaml_FLOW_MAPPING_STYLE),
- }
- skip_token(parser)
- return true
- } else if token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
-
- *event = yaml_event_t{
- typ: yaml_SEQUENCE_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
-
- skip_token(parser)
- return true
-}
-
-//
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_key(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- mark := token.end_mark
- skip_token(parser)
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// ***** *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_value(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_SEQUENCE_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Parse the productions:
-// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// *
-//
-func yaml_parser_parse_flow_sequence_entry_mapping_end(parser *yaml_parser_t, event *yaml_event_t) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- parser.state = yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.start_mark, // [Go] Shouldn't this be end_mark?
- }
- return true
-}
-
-// Parse the productions:
-// flow_mapping ::= FLOW-MAPPING-START
-// ******************
-// (flow_mapping_entry FLOW-ENTRY)*
-// * **********
-// flow_mapping_entry?
-// ******************
-// FLOW-MAPPING-END
-// ****************
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * *** *
-//
-func yaml_parser_parse_flow_mapping_key(parser *yaml_parser_t, event *yaml_event_t, first bool) bool {
- if first {
- token := peek_token(parser)
- parser.marks = append(parser.marks, token.start_mark)
- skip_token(parser)
- }
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- if !first {
- if token.typ == yaml_FLOW_ENTRY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- } else {
- context_mark := parser.marks[len(parser.marks)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- return yaml_parser_set_parser_error_context(parser,
- "while parsing a flow mapping", context_mark,
- "did not find expected ',' or '}'", token.start_mark)
- }
- }
-
- if token.typ == yaml_KEY_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_VALUE_TOKEN &&
- token.typ != yaml_FLOW_ENTRY_TOKEN &&
- token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- } else {
- parser.state = yaml_PARSE_FLOW_MAPPING_VALUE_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- } else if token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
-
- parser.state = parser.states[len(parser.states)-1]
- parser.states = parser.states[:len(parser.states)-1]
- parser.marks = parser.marks[:len(parser.marks)-1]
- *event = yaml_event_t{
- typ: yaml_MAPPING_END_EVENT,
- start_mark: token.start_mark,
- end_mark: token.end_mark,
- }
- skip_token(parser)
- return true
-}
-
-// Parse the productions:
-// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-// * ***** *
-//
-func yaml_parser_parse_flow_mapping_value(parser *yaml_parser_t, event *yaml_event_t, empty bool) bool {
- token := peek_token(parser)
- if token == nil {
- return false
- }
- if empty {
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
- }
- if token.typ == yaml_VALUE_TOKEN {
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- if token.typ != yaml_FLOW_ENTRY_TOKEN && token.typ != yaml_FLOW_MAPPING_END_TOKEN {
- parser.states = append(parser.states, yaml_PARSE_FLOW_MAPPING_KEY_STATE)
- return yaml_parser_parse_node(parser, event, false, false)
- }
- }
- parser.state = yaml_PARSE_FLOW_MAPPING_KEY_STATE
- return yaml_parser_process_empty_scalar(parser, event, token.start_mark)
-}
-
-// Generate an empty scalar event.
-func yaml_parser_process_empty_scalar(parser *yaml_parser_t, event *yaml_event_t, mark yaml_mark_t) bool {
- *event = yaml_event_t{
- typ: yaml_SCALAR_EVENT,
- start_mark: mark,
- end_mark: mark,
- value: nil, // Empty
- implicit: true,
- style: yaml_style_t(yaml_PLAIN_SCALAR_STYLE),
- }
- return true
-}
-
-var default_tag_directives = []yaml_tag_directive_t{
- {[]byte("!"), []byte("!")},
- {[]byte("!!"), []byte("tag:yaml.org,2002:")},
-}
-
-// Parse directives.
-func yaml_parser_process_directives(parser *yaml_parser_t,
- version_directive_ref **yaml_version_directive_t,
- tag_directives_ref *[]yaml_tag_directive_t) bool {
-
- var version_directive *yaml_version_directive_t
- var tag_directives []yaml_tag_directive_t
-
- token := peek_token(parser)
- if token == nil {
- return false
- }
-
- for token.typ == yaml_VERSION_DIRECTIVE_TOKEN || token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- if token.typ == yaml_VERSION_DIRECTIVE_TOKEN {
- if version_directive != nil {
- yaml_parser_set_parser_error(parser,
- "found duplicate %YAML directive", token.start_mark)
- return false
- }
- if token.major != 1 || token.minor != 1 {
- yaml_parser_set_parser_error(parser,
- "found incompatible YAML document", token.start_mark)
- return false
- }
- version_directive = &yaml_version_directive_t{
- major: token.major,
- minor: token.minor,
- }
- } else if token.typ == yaml_TAG_DIRECTIVE_TOKEN {
- value := yaml_tag_directive_t{
- handle: token.value,
- prefix: token.prefix,
- }
- if !yaml_parser_append_tag_directive(parser, value, false, token.start_mark) {
- return false
- }
- tag_directives = append(tag_directives, value)
- }
-
- skip_token(parser)
- token = peek_token(parser)
- if token == nil {
- return false
- }
- }
-
- for i := range default_tag_directives {
- if !yaml_parser_append_tag_directive(parser, default_tag_directives[i], true, token.start_mark) {
- return false
- }
- }
-
- if version_directive_ref != nil {
- *version_directive_ref = version_directive
- }
- if tag_directives_ref != nil {
- *tag_directives_ref = tag_directives
- }
- return true
-}
-
-// Append a tag directive to the directives stack.
-func yaml_parser_append_tag_directive(parser *yaml_parser_t, value yaml_tag_directive_t, allow_duplicates bool, mark yaml_mark_t) bool {
- for i := range parser.tag_directives {
- if bytes.Equal(value.handle, parser.tag_directives[i].handle) {
- if allow_duplicates {
- return true
- }
- return yaml_parser_set_parser_error(parser, "found duplicate %TAG directive", mark)
- }
- }
-
- // [Go] I suspect the copy is unnecessary. This was likely done
- // because there was no way to track ownership of the data.
- value_copy := yaml_tag_directive_t{
- handle: make([]byte, len(value.handle)),
- prefix: make([]byte, len(value.prefix)),
- }
- copy(value_copy.handle, value.handle)
- copy(value_copy.prefix, value.prefix)
- parser.tag_directives = append(parser.tag_directives, value_copy)
- return true
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
deleted file mode 100644
index f450791..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/readerc.go
+++ /dev/null
@@ -1,394 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// Set the reader error and return 0.
-func yaml_parser_set_reader_error(parser *yaml_parser_t, problem string, offset int, value int) bool {
- parser.error = yaml_READER_ERROR
- parser.problem = problem
- parser.problem_offset = offset
- parser.problem_value = value
- return false
-}
-
-// Byte order marks.
-const (
- bom_UTF8 = "\xef\xbb\xbf"
- bom_UTF16LE = "\xff\xfe"
- bom_UTF16BE = "\xfe\xff"
-)
-
-// Determine the input stream encoding by checking the BOM symbol. If no BOM is
-// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure.
-func yaml_parser_determine_encoding(parser *yaml_parser_t) bool {
- // Ensure that we had enough bytes in the raw buffer.
- for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 {
- if !yaml_parser_update_raw_buffer(parser) {
- return false
- }
- }
-
- // Determine the encoding.
- buf := parser.raw_buffer
- pos := parser.raw_buffer_pos
- avail := len(buf) - pos
- if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] {
- parser.encoding = yaml_UTF16LE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] {
- parser.encoding = yaml_UTF16BE_ENCODING
- parser.raw_buffer_pos += 2
- parser.offset += 2
- } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] {
- parser.encoding = yaml_UTF8_ENCODING
- parser.raw_buffer_pos += 3
- parser.offset += 3
- } else {
- parser.encoding = yaml_UTF8_ENCODING
- }
- return true
-}
-
-// Update the raw buffer.
-func yaml_parser_update_raw_buffer(parser *yaml_parser_t) bool {
- size_read := 0
-
- // Return if the raw buffer is full.
- if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) {
- return true
- }
-
- // Return on EOF.
- if parser.eof {
- return true
- }
-
- // Move the remaining bytes in the raw buffer to the beginning.
- if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) {
- copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:])
- }
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos]
- parser.raw_buffer_pos = 0
-
- // Call the read handler to fill the buffer.
- size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)])
- parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read]
- if err == io.EOF {
- parser.eof = true
- } else if err != nil {
- return yaml_parser_set_reader_error(parser, "input error: "+err.Error(), parser.offset, -1)
- }
- return true
-}
-
-// Ensure that the buffer contains at least `length` characters.
-// Return true on success, false on failure.
-//
-// The length is supposed to be significantly less that the buffer size.
-func yaml_parser_update_buffer(parser *yaml_parser_t, length int) bool {
- if parser.read_handler == nil {
- panic("read handler must be set")
- }
-
- // If the EOF flag is set and the raw buffer is empty, do nothing.
- if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) {
- return true
- }
-
- // Return if the buffer contains enough characters.
- if parser.unread >= length {
- return true
- }
-
- // Determine the input encoding if it is not known yet.
- if parser.encoding == yaml_ANY_ENCODING {
- if !yaml_parser_determine_encoding(parser) {
- return false
- }
- }
-
- // Move the unread characters to the beginning of the buffer.
- buffer_len := len(parser.buffer)
- if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len {
- copy(parser.buffer, parser.buffer[parser.buffer_pos:])
- buffer_len -= parser.buffer_pos
- parser.buffer_pos = 0
- } else if parser.buffer_pos == buffer_len {
- buffer_len = 0
- parser.buffer_pos = 0
- }
-
- // Open the whole buffer for writing, and cut it before returning.
- parser.buffer = parser.buffer[:cap(parser.buffer)]
-
- // Fill the buffer until it has enough characters.
- first := true
- for parser.unread < length {
-
- // Fill the raw buffer if necessary.
- if !first || parser.raw_buffer_pos == len(parser.raw_buffer) {
- if !yaml_parser_update_raw_buffer(parser) {
- parser.buffer = parser.buffer[:buffer_len]
- return false
- }
- }
- first = false
-
- // Decode the raw buffer.
- inner:
- for parser.raw_buffer_pos != len(parser.raw_buffer) {
- var value rune
- var width int
-
- raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos
-
- // Decode the next character.
- switch parser.encoding {
- case yaml_UTF8_ENCODING:
- // Decode a UTF-8 character. Check RFC 3629
- // (http://www.ietf.org/rfc/rfc3629.txt) for more details.
- //
- // The following table (taken from the RFC) is used for
- // decoding.
- //
- // Char. number range | UTF-8 octet sequence
- // (hexadecimal) | (binary)
- // --------------------+------------------------------------
- // 0000 0000-0000 007F | 0xxxxxxx
- // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx
- // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx
- // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- //
- // Additionally, the characters in the range 0xD800-0xDFFF
- // are prohibited as they are reserved for use with UTF-16
- // surrogate pairs.
-
- // Determine the length of the UTF-8 sequence.
- octet := parser.raw_buffer[parser.raw_buffer_pos]
- switch {
- case octet&0x80 == 0x00:
- width = 1
- case octet&0xE0 == 0xC0:
- width = 2
- case octet&0xF0 == 0xE0:
- width = 3
- case octet&0xF8 == 0xF0:
- width = 4
- default:
- // The leading octet is invalid.
- return yaml_parser_set_reader_error(parser,
- "invalid leading UTF-8 octet",
- parser.offset, int(octet))
- }
-
- // Check if the raw buffer contains an incomplete character.
- if width > raw_unread {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-8 octet sequence",
- parser.offset, -1)
- }
- break inner
- }
-
- // Decode the leading octet.
- switch {
- case octet&0x80 == 0x00:
- value = rune(octet & 0x7F)
- case octet&0xE0 == 0xC0:
- value = rune(octet & 0x1F)
- case octet&0xF0 == 0xE0:
- value = rune(octet & 0x0F)
- case octet&0xF8 == 0xF0:
- value = rune(octet & 0x07)
- default:
- value = 0
- }
-
- // Check and decode the trailing octets.
- for k := 1; k < width; k++ {
- octet = parser.raw_buffer[parser.raw_buffer_pos+k]
-
- // Check if the octet is valid.
- if (octet & 0xC0) != 0x80 {
- return yaml_parser_set_reader_error(parser,
- "invalid trailing UTF-8 octet",
- parser.offset+k, int(octet))
- }
-
- // Decode the octet.
- value = (value << 6) + rune(octet&0x3F)
- }
-
- // Check the length of the sequence against the value.
- switch {
- case width == 1:
- case width == 2 && value >= 0x80:
- case width == 3 && value >= 0x800:
- case width == 4 && value >= 0x10000:
- default:
- return yaml_parser_set_reader_error(parser,
- "invalid length of a UTF-8 sequence",
- parser.offset, -1)
- }
-
- // Check the range of the value.
- if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF {
- return yaml_parser_set_reader_error(parser,
- "invalid Unicode character",
- parser.offset, int(value))
- }
-
- case yaml_UTF16LE_ENCODING, yaml_UTF16BE_ENCODING:
- var low, high int
- if parser.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- low, high = 1, 0
- }
-
- // The UTF-16 encoding is not as simple as one might
- // naively think. Check RFC 2781
- // (http://www.ietf.org/rfc/rfc2781.txt).
- //
- // Normally, two subsequent bytes describe a Unicode
- // character. However a special technique (called a
- // surrogate pair) is used for specifying character
- // values larger than 0xFFFF.
- //
- // A surrogate pair consists of two pseudo-characters:
- // high surrogate area (0xD800-0xDBFF)
- // low surrogate area (0xDC00-0xDFFF)
- //
- // The following formulas are used for decoding
- // and encoding characters using surrogate pairs:
- //
- // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF)
- // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF)
- // W1 = 110110yyyyyyyyyy
- // W2 = 110111xxxxxxxxxx
- //
- // where U is the character value, W1 is the high surrogate
- // area, W2 is the low surrogate area.
-
- // Check for incomplete UTF-16 character.
- if raw_unread < 2 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 character",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the character.
- value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8)
-
- // Check for unexpected low surrogate area.
- if value&0xFC00 == 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "unexpected low surrogate area",
- parser.offset, int(value))
- }
-
- // Check for a high surrogate area.
- if value&0xFC00 == 0xD800 {
- width = 4
-
- // Check for incomplete surrogate pair.
- if raw_unread < 4 {
- if parser.eof {
- return yaml_parser_set_reader_error(parser,
- "incomplete UTF-16 surrogate pair",
- parser.offset, -1)
- }
- break inner
- }
-
- // Get the next character.
- value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) +
- (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8)
-
- // Check for a low surrogate area.
- if value2&0xFC00 != 0xDC00 {
- return yaml_parser_set_reader_error(parser,
- "expected low surrogate area",
- parser.offset+2, int(value2))
- }
-
- // Generate the value of the surrogate pair.
- value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF)
- } else {
- width = 2
- }
-
- default:
- panic("impossible")
- }
-
- // Check if the character is in the allowed range:
- // #x9 | #xA | #xD | [#x20-#x7E] (8 bit)
- // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit)
- // | [#x10000-#x10FFFF] (32 bit)
- switch {
- case value == 0x09:
- case value == 0x0A:
- case value == 0x0D:
- case value >= 0x20 && value <= 0x7E:
- case value == 0x85:
- case value >= 0xA0 && value <= 0xD7FF:
- case value >= 0xE000 && value <= 0xFFFD:
- case value >= 0x10000 && value <= 0x10FFFF:
- default:
- return yaml_parser_set_reader_error(parser,
- "control characters are not allowed",
- parser.offset, int(value))
- }
-
- // Move the raw pointers.
- parser.raw_buffer_pos += width
- parser.offset += width
-
- // Finally put the character into the buffer.
- if value <= 0x7F {
- // 0000 0000-0000 007F . 0xxxxxxx
- parser.buffer[buffer_len+0] = byte(value)
- buffer_len += 1
- } else if value <= 0x7FF {
- // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6))
- parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F))
- buffer_len += 2
- } else if value <= 0xFFFF {
- // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F))
- buffer_len += 3
- } else {
- // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx
- parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18))
- parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F))
- parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F))
- parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F))
- buffer_len += 4
- }
-
- parser.unread++
- }
-
- // On EOF, put NUL into the buffer and return.
- if parser.eof {
- parser.buffer[buffer_len] = 0
- buffer_len++
- parser.unread++
- break
- }
- }
- parser.buffer = parser.buffer[:buffer_len]
- return true
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
deleted file mode 100644
index 93a8632..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/resolve.go
+++ /dev/null
@@ -1,203 +0,0 @@
-package yaml
-
-import (
- "encoding/base64"
- "math"
- "strconv"
- "strings"
- "unicode/utf8"
-)
-
-type resolveMapItem struct {
- value interface{}
- tag string
-}
-
-var resolveTable = make([]byte, 256)
-var resolveMap = make(map[string]resolveMapItem)
-
-func init() {
- t := resolveTable
- t[int('+')] = 'S' // Sign
- t[int('-')] = 'S'
- for _, c := range "0123456789" {
- t[int(c)] = 'D' // Digit
- }
- for _, c := range "yYnNtTfFoO~" {
- t[int(c)] = 'M' // In map
- }
- t[int('.')] = '.' // Float (potentially in map)
-
- var resolveMapList = []struct {
- v interface{}
- tag string
- l []string
- }{
- {true, yaml_BOOL_TAG, []string{"y", "Y", "yes", "Yes", "YES"}},
- {true, yaml_BOOL_TAG, []string{"true", "True", "TRUE"}},
- {true, yaml_BOOL_TAG, []string{"on", "On", "ON"}},
- {false, yaml_BOOL_TAG, []string{"n", "N", "no", "No", "NO"}},
- {false, yaml_BOOL_TAG, []string{"false", "False", "FALSE"}},
- {false, yaml_BOOL_TAG, []string{"off", "Off", "OFF"}},
- {nil, yaml_NULL_TAG, []string{"", "~", "null", "Null", "NULL"}},
- {math.NaN(), yaml_FLOAT_TAG, []string{".nan", ".NaN", ".NAN"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{".inf", ".Inf", ".INF"}},
- {math.Inf(+1), yaml_FLOAT_TAG, []string{"+.inf", "+.Inf", "+.INF"}},
- {math.Inf(-1), yaml_FLOAT_TAG, []string{"-.inf", "-.Inf", "-.INF"}},
- {"<<", yaml_MERGE_TAG, []string{"<<"}},
- }
-
- m := resolveMap
- for _, item := range resolveMapList {
- for _, s := range item.l {
- m[s] = resolveMapItem{item.v, item.tag}
- }
- }
-}
-
-const longTagPrefix = "tag:yaml.org,2002:"
-
-func shortTag(tag string) string {
- // TODO This can easily be made faster and produce less garbage.
- if strings.HasPrefix(tag, longTagPrefix) {
- return "!!" + tag[len(longTagPrefix):]
- }
- return tag
-}
-
-func longTag(tag string) string {
- if strings.HasPrefix(tag, "!!") {
- return longTagPrefix + tag[2:]
- }
- return tag
-}
-
-func resolvableTag(tag string) bool {
- switch tag {
- case "", yaml_STR_TAG, yaml_BOOL_TAG, yaml_INT_TAG, yaml_FLOAT_TAG, yaml_NULL_TAG:
- return true
- }
- return false
-}
-
-func resolve(tag string, in string) (rtag string, out interface{}) {
- if !resolvableTag(tag) {
- return tag, in
- }
-
- defer func() {
- switch tag {
- case "", rtag, yaml_STR_TAG, yaml_BINARY_TAG:
- return
- }
- failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag))
- }()
-
- // Any data is accepted as a !!str or !!binary.
- // Otherwise, the prefix is enough of a hint about what it might be.
- hint := byte('N')
- if in != "" {
- hint = resolveTable[in[0]]
- }
- if hint != 0 && tag != yaml_STR_TAG && tag != yaml_BINARY_TAG {
- // Handle things we can lookup in a map.
- if item, ok := resolveMap[in]; ok {
- return item.tag, item.value
- }
-
- // Base 60 floats are a bad idea, were dropped in YAML 1.2, and
- // are purposefully unsupported here. They're still quoted on
- // the way out for compatibility with other parser, though.
-
- switch hint {
- case 'M':
- // We've already checked the map above.
-
- case '.':
- // Not in the map, so maybe a normal float.
- floatv, err := strconv.ParseFloat(in, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
-
- case 'D', 'S':
- // Int, float, or timestamp.
- plain := strings.Replace(in, "_", "", -1)
- intv, err := strconv.ParseInt(plain, 0, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, int(intv)
- } else {
- return yaml_INT_TAG, intv
- }
- }
- uintv, err := strconv.ParseUint(plain, 0, 64)
- if err == nil {
- return yaml_INT_TAG, uintv
- }
- floatv, err := strconv.ParseFloat(plain, 64)
- if err == nil {
- return yaml_FLOAT_TAG, floatv
- }
- if strings.HasPrefix(plain, "0b") {
- intv, err := strconv.ParseInt(plain[2:], 2, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, int(intv)
- } else {
- return yaml_INT_TAG, intv
- }
- }
- uintv, err := strconv.ParseUint(plain[2:], 2, 64)
- if err == nil {
- return yaml_INT_TAG, uintv
- }
- } else if strings.HasPrefix(plain, "-0b") {
- intv, err := strconv.ParseInt(plain[3:], 2, 64)
- if err == nil {
- if intv == int64(int(intv)) {
- return yaml_INT_TAG, -int(intv)
- } else {
- return yaml_INT_TAG, -intv
- }
- }
- }
- // XXX Handle timestamps here.
-
- default:
- panic("resolveTable item not yet handled: " + string(rune(hint)) + " (with " + in + ")")
- }
- }
- if tag == yaml_BINARY_TAG {
- return yaml_BINARY_TAG, in
- }
- if utf8.ValidString(in) {
- return yaml_STR_TAG, in
- }
- return yaml_BINARY_TAG, encodeBase64(in)
-}
-
-// encodeBase64 encodes s as base64 that is broken up into multiple lines
-// as appropriate for the resulting length.
-func encodeBase64(s string) string {
- const lineLen = 70
- encLen := base64.StdEncoding.EncodedLen(len(s))
- lines := encLen/lineLen + 1
- buf := make([]byte, encLen*2+lines)
- in := buf[0:encLen]
- out := buf[encLen:]
- base64.StdEncoding.Encode(in, []byte(s))
- k := 0
- for i := 0; i < len(in); i += lineLen {
- j := i + lineLen
- if j > len(in) {
- j = len(in)
- }
- k += copy(out[k:], in[i:j])
- if lines > 1 {
- out[k] = '\n'
- k++
- }
- }
- return string(out[:k])
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
deleted file mode 100644
index d97d76f..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/scannerc.go
+++ /dev/null
@@ -1,2710 +0,0 @@
-package yaml
-
-import (
- "bytes"
- "fmt"
-)
-
-// Introduction
-// ************
-//
-// The following notes assume that you are familiar with the YAML specification
-// (http://yaml.org/spec/cvs/current.html). We mostly follow it, although in
-// some cases we are less restrictive that it requires.
-//
-// The process of transforming a YAML stream into a sequence of events is
-// divided on two steps: Scanning and Parsing.
-//
-// The Scanner transforms the input stream into a sequence of tokens, while the
-// parser transform the sequence of tokens produced by the Scanner into a
-// sequence of parsing events.
-//
-// The Scanner is rather clever and complicated. The Parser, on the contrary,
-// is a straightforward implementation of a recursive-descendant parser (or,
-// LL(1) parser, as it is usually called).
-//
-// Actually there are two issues of Scanning that might be called "clever", the
-// rest is quite straightforward. The issues are "block collection start" and
-// "simple keys". Both issues are explained below in details.
-//
-// Here the Scanning step is explained and implemented. We start with the list
-// of all the tokens produced by the Scanner together with short descriptions.
-//
-// Now, tokens:
-//
-// STREAM-START(encoding) # The stream start.
-// STREAM-END # The stream end.
-// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive.
-// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive.
-// DOCUMENT-START # '---'
-// DOCUMENT-END # '...'
-// BLOCK-SEQUENCE-START # Indentation increase denoting a block
-// BLOCK-MAPPING-START # sequence or a block mapping.
-// BLOCK-END # Indentation decrease.
-// FLOW-SEQUENCE-START # '['
-// FLOW-SEQUENCE-END # ']'
-// BLOCK-SEQUENCE-START # '{'
-// BLOCK-SEQUENCE-END # '}'
-// BLOCK-ENTRY # '-'
-// FLOW-ENTRY # ','
-// KEY # '?' or nothing (simple keys).
-// VALUE # ':'
-// ALIAS(anchor) # '*anchor'
-// ANCHOR(anchor) # '&anchor'
-// TAG(handle,suffix) # '!handle!suffix'
-// SCALAR(value,style) # A scalar.
-//
-// The following two tokens are "virtual" tokens denoting the beginning and the
-// end of the stream:
-//
-// STREAM-START(encoding)
-// STREAM-END
-//
-// We pass the information about the input stream encoding with the
-// STREAM-START token.
-//
-// The next two tokens are responsible for tags:
-//
-// VERSION-DIRECTIVE(major,minor)
-// TAG-DIRECTIVE(handle,prefix)
-//
-// Example:
-//
-// %YAML 1.1
-// %TAG ! !foo
-// %TAG !yaml! tag:yaml.org,2002:
-// ---
-//
-// The correspoding sequence of tokens:
-//
-// STREAM-START(utf-8)
-// VERSION-DIRECTIVE(1,1)
-// TAG-DIRECTIVE("!","!foo")
-// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:")
-// DOCUMENT-START
-// STREAM-END
-//
-// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole
-// line.
-//
-// The document start and end indicators are represented by:
-//
-// DOCUMENT-START
-// DOCUMENT-END
-//
-// Note that if a YAML stream contains an implicit document (without '---'
-// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be
-// produced.
-//
-// In the following examples, we present whole documents together with the
-// produced tokens.
-//
-// 1. An implicit document:
-//
-// 'a scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// STREAM-END
-//
-// 2. An explicit document:
-//
-// ---
-// 'a scalar'
-// ...
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-END
-// STREAM-END
-//
-// 3. Several documents in a stream:
-//
-// 'a scalar'
-// ---
-// 'another scalar'
-// ---
-// 'yet another scalar'
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// SCALAR("a scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("another scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("yet another scalar",single-quoted)
-// STREAM-END
-//
-// We have already introduced the SCALAR token above. The following tokens are
-// used to describe aliases, anchors, tag, and scalars:
-//
-// ALIAS(anchor)
-// ANCHOR(anchor)
-// TAG(handle,suffix)
-// SCALAR(value,style)
-//
-// The following series of examples illustrate the usage of these tokens:
-//
-// 1. A recursive sequence:
-//
-// &A [ *A ]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// ANCHOR("A")
-// FLOW-SEQUENCE-START
-// ALIAS("A")
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A tagged scalar:
-//
-// !!float "3.14" # A good approximation.
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// TAG("!!","float")
-// SCALAR("3.14",double-quoted)
-// STREAM-END
-//
-// 3. Various scalar styles:
-//
-// --- # Implicit empty plain scalars do not produce tokens.
-// --- a plain scalar
-// --- 'a single-quoted scalar'
-// --- "a double-quoted scalar"
-// --- |-
-// a literal scalar
-// --- >-
-// a folded
-// scalar
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// DOCUMENT-START
-// DOCUMENT-START
-// SCALAR("a plain scalar",plain)
-// DOCUMENT-START
-// SCALAR("a single-quoted scalar",single-quoted)
-// DOCUMENT-START
-// SCALAR("a double-quoted scalar",double-quoted)
-// DOCUMENT-START
-// SCALAR("a literal scalar",literal)
-// DOCUMENT-START
-// SCALAR("a folded scalar",folded)
-// STREAM-END
-//
-// Now it's time to review collection-related tokens. We will start with
-// flow collections:
-//
-// FLOW-SEQUENCE-START
-// FLOW-SEQUENCE-END
-// FLOW-MAPPING-START
-// FLOW-MAPPING-END
-// FLOW-ENTRY
-// KEY
-// VALUE
-//
-// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and
-// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}'
-// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the
-// indicators '?' and ':', which are used for denoting mapping keys and values,
-// are represented by the KEY and VALUE tokens.
-//
-// The following examples show flow collections:
-//
-// 1. A flow sequence:
-//
-// [item 1, item 2, item 3]
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-SEQUENCE-START
-// SCALAR("item 1",plain)
-// FLOW-ENTRY
-// SCALAR("item 2",plain)
-// FLOW-ENTRY
-// SCALAR("item 3",plain)
-// FLOW-SEQUENCE-END
-// STREAM-END
-//
-// 2. A flow mapping:
-//
-// {
-// a simple key: a value, # Note that the KEY token is produced.
-// ? a complex key: another value,
-// }
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// FLOW-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// FLOW-ENTRY
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// FLOW-ENTRY
-// FLOW-MAPPING-END
-// STREAM-END
-//
-// A simple key is a key which is not denoted by the '?' indicator. Note that
-// the Scanner still produce the KEY token whenever it encounters a simple key.
-//
-// For scanning block collections, the following tokens are used (note that we
-// repeat KEY and VALUE here):
-//
-// BLOCK-SEQUENCE-START
-// BLOCK-MAPPING-START
-// BLOCK-END
-// BLOCK-ENTRY
-// KEY
-// VALUE
-//
-// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation
-// increase that precedes a block collection (cf. the INDENT token in Python).
-// The token BLOCK-END denote indentation decrease that ends a block collection
-// (cf. the DEDENT token in Python). However YAML has some syntax pecularities
-// that makes detections of these tokens more complex.
-//
-// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators
-// '-', '?', and ':' correspondingly.
-//
-// The following examples show how the tokens BLOCK-SEQUENCE-START,
-// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner:
-//
-// 1. Block sequences:
-//
-// - item 1
-// - item 2
-// -
-// - item 3.1
-// - item 3.2
-// -
-// key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 3.1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 3.2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Block mappings:
-//
-// a simple key: a value # The KEY token is produced here.
-// ? a complex key
-// : another value
-// a mapping:
-// key 1: value 1
-// key 2: value 2
-// a sequence:
-// - item 1
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a simple key",plain)
-// VALUE
-// SCALAR("a value",plain)
-// KEY
-// SCALAR("a complex key",plain)
-// VALUE
-// SCALAR("another value",plain)
-// KEY
-// SCALAR("a mapping",plain)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML does not always require to start a new block collection from a new
-// line. If the current line contains only '-', '?', and ':' indicators, a new
-// block collection may start at the current line. The following examples
-// illustrate this case:
-//
-// 1. Collections in a sequence:
-//
-// - - item 1
-// - item 2
-// - key 1: value 1
-// key 2: value 2
-// - ? complex key
-// : complex value
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-ENTRY
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("complex key")
-// VALUE
-// SCALAR("complex value")
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// 2. Collections in a mapping:
-//
-// ? a sequence
-// : - item 1
-// - item 2
-// ? a mapping
-// : key 1: value 1
-// key 2: value 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("a sequence",plain)
-// VALUE
-// BLOCK-SEQUENCE-START
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-// KEY
-// SCALAR("a mapping",plain)
-// VALUE
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key 1",plain)
-// VALUE
-// SCALAR("value 1",plain)
-// KEY
-// SCALAR("key 2",plain)
-// VALUE
-// SCALAR("value 2",plain)
-// BLOCK-END
-// BLOCK-END
-// STREAM-END
-//
-// YAML also permits non-indented sequences if they are included into a block
-// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced:
-//
-// key:
-// - item 1 # BLOCK-SEQUENCE-START is NOT produced here.
-// - item 2
-//
-// Tokens:
-//
-// STREAM-START(utf-8)
-// BLOCK-MAPPING-START
-// KEY
-// SCALAR("key",plain)
-// VALUE
-// BLOCK-ENTRY
-// SCALAR("item 1",plain)
-// BLOCK-ENTRY
-// SCALAR("item 2",plain)
-// BLOCK-END
-//
-
-// Ensure that the buffer contains the required number of characters.
-// Return true on success, false on failure (reader error or memory error).
-func cache(parser *yaml_parser_t, length int) bool {
- // [Go] This was inlined: !cache(A, B) -> unread < B && !update(A, B)
- return parser.unread >= length || yaml_parser_update_buffer(parser, length)
-}
-
-// Advance the buffer pointer.
-func skip(parser *yaml_parser_t) {
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
-}
-
-func skip_line(parser *yaml_parser_t) {
- if is_crlf(parser.buffer, parser.buffer_pos) {
- parser.mark.index += 2
- parser.mark.column = 0
- parser.mark.line++
- parser.unread -= 2
- parser.buffer_pos += 2
- } else if is_break(parser.buffer, parser.buffer_pos) {
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- parser.buffer_pos += width(parser.buffer[parser.buffer_pos])
- }
-}
-
-// Copy a character to a string buffer and advance pointers.
-func read(parser *yaml_parser_t, s []byte) []byte {
- w := width(parser.buffer[parser.buffer_pos])
- if w == 0 {
- panic("invalid character sequence")
- }
- if len(s) == 0 {
- s = make([]byte, 0, 32)
- }
- if w == 1 && len(s)+w <= cap(s) {
- s = s[:len(s)+1]
- s[len(s)-1] = parser.buffer[parser.buffer_pos]
- parser.buffer_pos++
- } else {
- s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...)
- parser.buffer_pos += w
- }
- parser.mark.index++
- parser.mark.column++
- parser.unread--
- return s
-}
-
-// Copy a line break character to a string buffer and advance pointers.
-func read_line(parser *yaml_parser_t, s []byte) []byte {
- buf := parser.buffer
- pos := parser.buffer_pos
- switch {
- case buf[pos] == '\r' && buf[pos+1] == '\n':
- // CR LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- parser.mark.index++
- parser.unread--
- case buf[pos] == '\r' || buf[pos] == '\n':
- // CR|LF . LF
- s = append(s, '\n')
- parser.buffer_pos += 1
- case buf[pos] == '\xC2' && buf[pos+1] == '\x85':
- // NEL . LF
- s = append(s, '\n')
- parser.buffer_pos += 2
- case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'):
- // LS|PS . LS|PS
- s = append(s, buf[parser.buffer_pos:pos+3]...)
- parser.buffer_pos += 3
- default:
- return s
- }
- parser.mark.index++
- parser.mark.column = 0
- parser.mark.line++
- parser.unread--
- return s
-}
-
-// Get the next token.
-func yaml_parser_scan(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Erase the token object.
- *token = yaml_token_t{} // [Go] Is this necessary?
-
- // No tokens after STREAM-END or error.
- if parser.stream_end_produced || parser.error != yaml_NO_ERROR {
- return true
- }
-
- // Ensure that the tokens queue contains enough tokens.
- if !parser.token_available {
- if !yaml_parser_fetch_more_tokens(parser) {
- return false
- }
- }
-
- // Fetch the next token from the queue.
- *token = parser.tokens[parser.tokens_head]
- parser.tokens_head++
- parser.tokens_parsed++
- parser.token_available = false
-
- if token.typ == yaml_STREAM_END_TOKEN {
- parser.stream_end_produced = true
- }
- return true
-}
-
-// Set the scanner error and return false.
-func yaml_parser_set_scanner_error(parser *yaml_parser_t, context string, context_mark yaml_mark_t, problem string) bool {
- parser.error = yaml_SCANNER_ERROR
- parser.context = context
- parser.context_mark = context_mark
- parser.problem = problem
- parser.problem_mark = parser.mark
- return false
-}
-
-func yaml_parser_set_scanner_tag_error(parser *yaml_parser_t, directive bool, context_mark yaml_mark_t, problem string) bool {
- context := "while parsing a tag"
- if directive {
- context = "while parsing a %TAG directive"
- }
- return yaml_parser_set_scanner_error(parser, context, context_mark, "did not find URI escaped octet")
-}
-
-func trace(args ...interface{}) func() {
- pargs := append([]interface{}{"+++"}, args...)
- fmt.Println(pargs...)
- pargs = append([]interface{}{"---"}, args...)
- return func() { fmt.Println(pargs...) }
-}
-
-// Ensure that the tokens queue contains at least one token which can be
-// returned to the Parser.
-func yaml_parser_fetch_more_tokens(parser *yaml_parser_t) bool {
- // While we need more tokens to fetch, do it.
- for {
- // Check if we really need to fetch more tokens.
- need_more_tokens := false
-
- if parser.tokens_head == len(parser.tokens) {
- // Queue is empty.
- need_more_tokens = true
- } else {
- // Check if any potential simple key may occupy the head position.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
- if simple_key.possible && simple_key.token_number == parser.tokens_parsed {
- need_more_tokens = true
- break
- }
- }
- }
-
- // We are finished.
- if !need_more_tokens {
- break
- }
- // Fetch the next token.
- if !yaml_parser_fetch_next_token(parser) {
- return false
- }
- }
-
- parser.token_available = true
- return true
-}
-
-// The dispatcher for token fetchers.
-func yaml_parser_fetch_next_token(parser *yaml_parser_t) bool {
- // Ensure that the buffer is initialized.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check if we just started scanning. Fetch STREAM-START then.
- if !parser.stream_start_produced {
- return yaml_parser_fetch_stream_start(parser)
- }
-
- // Eat whitespaces and comments until we reach the next token.
- if !yaml_parser_scan_to_next_token(parser) {
- return false
- }
-
- // Remove obsolete potential simple keys.
- if !yaml_parser_stale_simple_keys(parser) {
- return false
- }
-
- // Check the indentation level against the current column.
- if !yaml_parser_unroll_indent(parser, parser.mark.column) {
- return false
- }
-
- // Ensure that the buffer contains at least 4 characters. 4 is the length
- // of the longest indicators ('--- ' and '... ').
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- // Is it the end of the stream?
- if is_z(parser.buffer, parser.buffer_pos) {
- return yaml_parser_fetch_stream_end(parser)
- }
-
- // Is it a directive?
- if parser.mark.column == 0 && parser.buffer[parser.buffer_pos] == '%' {
- return yaml_parser_fetch_directive(parser)
- }
-
- buf := parser.buffer
- pos := parser.buffer_pos
-
- // Is it the document start indicator?
- if parser.mark.column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_START_TOKEN)
- }
-
- // Is it the document end indicator?
- if parser.mark.column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && is_blankz(buf, pos+3) {
- return yaml_parser_fetch_document_indicator(parser, yaml_DOCUMENT_END_TOKEN)
- }
-
- // Is it the flow sequence start indicator?
- if buf[pos] == '[' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_SEQUENCE_START_TOKEN)
- }
-
- // Is it the flow mapping start indicator?
- if parser.buffer[parser.buffer_pos] == '{' {
- return yaml_parser_fetch_flow_collection_start(parser, yaml_FLOW_MAPPING_START_TOKEN)
- }
-
- // Is it the flow sequence end indicator?
- if parser.buffer[parser.buffer_pos] == ']' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_SEQUENCE_END_TOKEN)
- }
-
- // Is it the flow mapping end indicator?
- if parser.buffer[parser.buffer_pos] == '}' {
- return yaml_parser_fetch_flow_collection_end(parser,
- yaml_FLOW_MAPPING_END_TOKEN)
- }
-
- // Is it the flow entry indicator?
- if parser.buffer[parser.buffer_pos] == ',' {
- return yaml_parser_fetch_flow_entry(parser)
- }
-
- // Is it the block entry indicator?
- if parser.buffer[parser.buffer_pos] == '-' && is_blankz(parser.buffer, parser.buffer_pos+1) {
- return yaml_parser_fetch_block_entry(parser)
- }
-
- // Is it the key indicator?
- if parser.buffer[parser.buffer_pos] == '?' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_key(parser)
- }
-
- // Is it the value indicator?
- if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_value(parser)
- }
-
- // Is it an alias?
- if parser.buffer[parser.buffer_pos] == '*' {
- return yaml_parser_fetch_anchor(parser, yaml_ALIAS_TOKEN)
- }
-
- // Is it an anchor?
- if parser.buffer[parser.buffer_pos] == '&' {
- return yaml_parser_fetch_anchor(parser, yaml_ANCHOR_TOKEN)
- }
-
- // Is it a tag?
- if parser.buffer[parser.buffer_pos] == '!' {
- return yaml_parser_fetch_tag(parser)
- }
-
- // Is it a literal scalar?
- if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, true)
- }
-
- // Is it a folded scalar?
- if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 {
- return yaml_parser_fetch_block_scalar(parser, false)
- }
-
- // Is it a single-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '\'' {
- return yaml_parser_fetch_flow_scalar(parser, true)
- }
-
- // Is it a double-quoted scalar?
- if parser.buffer[parser.buffer_pos] == '"' {
- return yaml_parser_fetch_flow_scalar(parser, false)
- }
-
- // Is it a plain scalar?
- //
- // A plain scalar may start with any non-blank characters except
- //
- // '-', '?', ':', ',', '[', ']', '{', '}',
- // '#', '&', '*', '!', '|', '>', '\'', '\"',
- // '%', '@', '`'.
- //
- // In the block context (and, for the '-' indicator, in the flow context
- // too), it may also start with the characters
- //
- // '-', '?', ':'
- //
- // if it is followed by a non-space character.
- //
- // The last rule is more restrictive than the specification requires.
- // [Go] Make this logic more reasonable.
- //switch parser.buffer[parser.buffer_pos] {
- //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`':
- //}
- if !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' ||
- parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' ||
- parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') ||
- (parser.buffer[parser.buffer_pos] == '-' && !is_blank(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level == 0 &&
- (parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') &&
- !is_blankz(parser.buffer, parser.buffer_pos+1)) {
- return yaml_parser_fetch_plain_scalar(parser)
- }
-
- // If we don't determine the token type so far, it is an error.
- return yaml_parser_set_scanner_error(parser,
- "while scanning for the next token", parser.mark,
- "found character that cannot start any token")
-}
-
-// Check the list of potential simple keys and remove the positions that
-// cannot contain simple keys anymore.
-func yaml_parser_stale_simple_keys(parser *yaml_parser_t) bool {
- // Check for a potential simple key for each flow level.
- for i := range parser.simple_keys {
- simple_key := &parser.simple_keys[i]
-
- // The specification requires that a simple key
- //
- // - is limited to a single line,
- // - is shorter than 1024 characters.
- if simple_key.possible && (simple_key.mark.line < parser.mark.line || simple_key.mark.index+1024 < parser.mark.index) {
-
- // Check if the potential simple key to be removed is required.
- if simple_key.required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", simple_key.mark,
- "could not find expected ':'")
- }
- simple_key.possible = false
- }
- }
- return true
-}
-
-// Check if a simple key may start at the current position and add it if
-// needed.
-func yaml_parser_save_simple_key(parser *yaml_parser_t) bool {
- // A simple key is required at the current position if the scanner is in
- // the block context and the current column coincides with the indentation
- // level.
-
- required := parser.flow_level == 0 && parser.indent == parser.mark.column
-
- // A simple key is required only when it is the first token in the current
- // line. Therefore it is always allowed. But we add a check anyway.
- if required && !parser.simple_key_allowed {
- panic("should not happen")
- }
-
- //
- // If the current position may start a simple key, save it.
- //
- if parser.simple_key_allowed {
- simple_key := yaml_simple_key_t{
- possible: true,
- required: required,
- token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head),
- }
- simple_key.mark = parser.mark
-
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
- parser.simple_keys[len(parser.simple_keys)-1] = simple_key
- }
- return true
-}
-
-// Remove a potential simple key at the current flow level.
-func yaml_parser_remove_simple_key(parser *yaml_parser_t) bool {
- i := len(parser.simple_keys) - 1
- if parser.simple_keys[i].possible {
- // If the key is required, it is an error.
- if parser.simple_keys[i].required {
- return yaml_parser_set_scanner_error(parser,
- "while scanning a simple key", parser.simple_keys[i].mark,
- "could not find expected ':'")
- }
- }
- // Remove the key from the stack.
- parser.simple_keys[i].possible = false
- return true
-}
-
-// Increase the flow level and resize the simple key list if needed.
-func yaml_parser_increase_flow_level(parser *yaml_parser_t) bool {
- // Reset the simple key on the next level.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // Increase the flow level.
- parser.flow_level++
- return true
-}
-
-// Decrease the flow level.
-func yaml_parser_decrease_flow_level(parser *yaml_parser_t) bool {
- if parser.flow_level > 0 {
- parser.flow_level--
- parser.simple_keys = parser.simple_keys[:len(parser.simple_keys)-1]
- }
- return true
-}
-
-// Push the current indentation level to the stack and set the new level
-// the current column is greater than the indentation level. In this case,
-// append or insert the specified token into the token queue.
-func yaml_parser_roll_indent(parser *yaml_parser_t, column, number int, typ yaml_token_type_t, mark yaml_mark_t) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- if parser.indent < column {
- // Push the current indentation level to the stack and set the new
- // indentation level.
- parser.indents = append(parser.indents, parser.indent)
- parser.indent = column
-
- // Create a token and insert it into the queue.
- token := yaml_token_t{
- typ: typ,
- start_mark: mark,
- end_mark: mark,
- }
- if number > -1 {
- number -= parser.tokens_parsed
- }
- yaml_insert_token(parser, number, &token)
- }
- return true
-}
-
-// Pop indentation levels from the indents stack until the current level
-// becomes less or equal to the column. For each indentation level, append
-// the BLOCK-END token.
-func yaml_parser_unroll_indent(parser *yaml_parser_t, column int) bool {
- // In the flow context, do nothing.
- if parser.flow_level > 0 {
- return true
- }
-
- // Loop through the indentation levels in the stack.
- for parser.indent > column {
- // Create a token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
-
- // Pop the indentation level.
- parser.indent = parser.indents[len(parser.indents)-1]
- parser.indents = parser.indents[:len(parser.indents)-1]
- }
- return true
-}
-
-// Initialize the scanner and produce the STREAM-START token.
-func yaml_parser_fetch_stream_start(parser *yaml_parser_t) bool {
-
- // Set the initial indentation.
- parser.indent = -1
-
- // Initialize the simple key stack.
- parser.simple_keys = append(parser.simple_keys, yaml_simple_key_t{})
-
- // A simple key is allowed at the beginning of the stream.
- parser.simple_key_allowed = true
-
- // We have started.
- parser.stream_start_produced = true
-
- // Create the STREAM-START token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_START_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- encoding: parser.encoding,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the STREAM-END token and shut down the scanner.
-func yaml_parser_fetch_stream_end(parser *yaml_parser_t) bool {
-
- // Force new line.
- if parser.mark.column != 0 {
- parser.mark.column = 0
- parser.mark.line++
- }
-
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the STREAM-END token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_STREAM_END_TOKEN,
- start_mark: parser.mark,
- end_mark: parser.mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token.
-func yaml_parser_fetch_directive(parser *yaml_parser_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token.
- token := yaml_token_t{}
- if !yaml_parser_scan_directive(parser, &token) {
- return false
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the DOCUMENT-START or DOCUMENT-END token.
-func yaml_parser_fetch_document_indicator(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset the indentation level.
- if !yaml_parser_unroll_indent(parser, -1) {
- return false
- }
-
- // Reset simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- parser.simple_key_allowed = false
-
- // Consume the token.
- start_mark := parser.mark
-
- skip(parser)
- skip(parser)
- skip(parser)
-
- end_mark := parser.mark
-
- // Create the DOCUMENT-START or DOCUMENT-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token.
-func yaml_parser_fetch_flow_collection_start(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // The indicators '[' and '{' may start a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // Increase the flow level.
- if !yaml_parser_increase_flow_level(parser) {
- return false
- }
-
- // A simple key may follow the indicators '[' and '{'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token.
-func yaml_parser_fetch_flow_collection_end(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // Reset any potential simple key on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Decrease the flow level.
- if !yaml_parser_decrease_flow_level(parser) {
- return false
- }
-
- // No simple keys after the indicators ']' and '}'.
- parser.simple_key_allowed = false
-
- // Consume the token.
-
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token.
- token := yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- // Append the token to the queue.
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the FLOW-ENTRY token.
-func yaml_parser_fetch_flow_entry(parser *yaml_parser_t) bool {
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after ','.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the FLOW-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_FLOW_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the BLOCK-ENTRY token.
-func yaml_parser_fetch_block_entry(parser *yaml_parser_t) bool {
- // Check if the scanner is in the block context.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new entry.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "block sequence entries are not allowed in this context")
- }
- // Add the BLOCK-SEQUENCE-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_SEQUENCE_START_TOKEN, parser.mark) {
- return false
- }
- } else {
- // It is an error for the '-' indicator to occur in the flow context,
- // but we let the Parser detect and report about it because the Parser
- // is able to point to the context.
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '-'.
- parser.simple_key_allowed = true
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the BLOCK-ENTRY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_BLOCK_ENTRY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the KEY token.
-func yaml_parser_fetch_key(parser *yaml_parser_t) bool {
-
- // In the block context, additional checks are required.
- if parser.flow_level == 0 {
- // Check if we are allowed to start a new key (not nessesary simple).
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping keys are not allowed in this context")
- }
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Reset any potential simple keys on the current flow level.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // Simple keys are allowed after '?' in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the KEY token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the VALUE token.
-func yaml_parser_fetch_value(parser *yaml_parser_t) bool {
-
- simple_key := &parser.simple_keys[len(parser.simple_keys)-1]
-
- // Have we found a simple key?
- if simple_key.possible {
- // Create the KEY token and insert it into the queue.
- token := yaml_token_t{
- typ: yaml_KEY_TOKEN,
- start_mark: simple_key.mark,
- end_mark: simple_key.mark,
- }
- yaml_insert_token(parser, simple_key.token_number-parser.tokens_parsed, &token)
-
- // In the block context, we may need to add the BLOCK-MAPPING-START token.
- if !yaml_parser_roll_indent(parser, simple_key.mark.column,
- simple_key.token_number,
- yaml_BLOCK_MAPPING_START_TOKEN, simple_key.mark) {
- return false
- }
-
- // Remove the simple key.
- simple_key.possible = false
-
- // A simple key cannot follow another simple key.
- parser.simple_key_allowed = false
-
- } else {
- // The ':' indicator follows a complex key.
-
- // In the block context, extra checks are required.
- if parser.flow_level == 0 {
-
- // Check if we are allowed to start a complex value.
- if !parser.simple_key_allowed {
- return yaml_parser_set_scanner_error(parser, "", parser.mark,
- "mapping values are not allowed in this context")
- }
-
- // Add the BLOCK-MAPPING-START token if needed.
- if !yaml_parser_roll_indent(parser, parser.mark.column, -1, yaml_BLOCK_MAPPING_START_TOKEN, parser.mark) {
- return false
- }
- }
-
- // Simple keys after ':' are allowed in the block context.
- parser.simple_key_allowed = parser.flow_level == 0
- }
-
- // Consume the token.
- start_mark := parser.mark
- skip(parser)
- end_mark := parser.mark
-
- // Create the VALUE token and append it to the queue.
- token := yaml_token_t{
- typ: yaml_VALUE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the ALIAS or ANCHOR token.
-func yaml_parser_fetch_anchor(parser *yaml_parser_t, typ yaml_token_type_t) bool {
- // An anchor or an alias could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow an anchor or an alias.
- parser.simple_key_allowed = false
-
- // Create the ALIAS or ANCHOR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_anchor(parser, &token, typ) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the TAG token.
-func yaml_parser_fetch_tag(parser *yaml_parser_t) bool {
- // A tag could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a tag.
- parser.simple_key_allowed = false
-
- // Create the TAG token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_tag(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens.
-func yaml_parser_fetch_block_scalar(parser *yaml_parser_t, literal bool) bool {
- // Remove any potential simple keys.
- if !yaml_parser_remove_simple_key(parser) {
- return false
- }
-
- // A simple key may follow a block scalar.
- parser.simple_key_allowed = true
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_block_scalar(parser, &token, literal) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens.
-func yaml_parser_fetch_flow_scalar(parser *yaml_parser_t, single bool) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_flow_scalar(parser, &token, single) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Produce the SCALAR(...,plain) token.
-func yaml_parser_fetch_plain_scalar(parser *yaml_parser_t) bool {
- // A plain scalar could be a simple key.
- if !yaml_parser_save_simple_key(parser) {
- return false
- }
-
- // A simple key cannot follow a flow scalar.
- parser.simple_key_allowed = false
-
- // Create the SCALAR token and append it to the queue.
- var token yaml_token_t
- if !yaml_parser_scan_plain_scalar(parser, &token) {
- return false
- }
- yaml_insert_token(parser, -1, &token)
- return true
-}
-
-// Eat whitespaces and comments until the next token is found.
-func yaml_parser_scan_to_next_token(parser *yaml_parser_t) bool {
-
- // Until the next token is not found.
- for {
- // Allow the BOM mark to start a line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.mark.column == 0 && is_bom(parser.buffer, parser.buffer_pos) {
- skip(parser)
- }
-
- // Eat whitespaces.
- // Tabs are allowed:
- // - in the flow context
- // - in the block context, but not at the beginning of the line or
- // after '-', '?', or ':' (complex value).
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Eat a comment until a line break.
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // If it is a line break, eat it.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
-
- // In the block context, a new line may start a simple key.
- if parser.flow_level == 0 {
- parser.simple_key_allowed = true
- }
- } else {
- break // We have found a token.
- }
- }
-
- return true
-}
-
-// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_directive(parser *yaml_parser_t, token *yaml_token_t) bool {
- // Eat '%'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the directive name.
- var name []byte
- if !yaml_parser_scan_directive_name(parser, start_mark, &name) {
- return false
- }
-
- // Is it a YAML directive?
- if bytes.Equal(name, []byte("YAML")) {
- // Scan the VERSION directive value.
- var major, minor int8
- if !yaml_parser_scan_version_directive_value(parser, start_mark, &major, &minor) {
- return false
- }
- end_mark := parser.mark
-
- // Create a VERSION-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_VERSION_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- major: major,
- minor: minor,
- }
-
- // Is it a TAG directive?
- } else if bytes.Equal(name, []byte("TAG")) {
- // Scan the TAG directive value.
- var handle, prefix []byte
- if !yaml_parser_scan_tag_directive_value(parser, start_mark, &handle, &prefix) {
- return false
- }
- end_mark := parser.mark
-
- // Create a TAG-DIRECTIVE token.
- *token = yaml_token_t{
- typ: yaml_TAG_DIRECTIVE_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- prefix: prefix,
- }
-
- // Unknown directive.
- } else {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found uknown directive name")
- return false
- }
-
- // Eat the rest of the line including any comments.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- return true
-}
-
-// Scan the directive name.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^
-//
-func yaml_parser_scan_directive_name(parser *yaml_parser_t, start_mark yaml_mark_t, name *[]byte) bool {
- // Consume the directive name.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- var s []byte
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the name is empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "could not find expected directive name")
- return false
- }
-
- // Check for an blank character after the name.
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a directive",
- start_mark, "found unexpected non-alphabetical character")
- return false
- }
- *name = s
- return true
-}
-
-// Scan the value of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^^^^^^
-func yaml_parser_scan_version_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, major, minor *int8) bool {
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the major version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, major) {
- return false
- }
-
- // Eat '.'.
- if parser.buffer[parser.buffer_pos] != '.' {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected digit or '.' character")
- }
-
- skip(parser)
-
- // Consume the minor version number.
- if !yaml_parser_scan_version_directive_number(parser, start_mark, minor) {
- return false
- }
- return true
-}
-
-const max_number_length = 2
-
-// Scan the version number of VERSION-DIRECTIVE.
-//
-// Scope:
-// %YAML 1.1 # a comment \n
-// ^
-// %YAML 1.1 # a comment \n
-// ^
-func yaml_parser_scan_version_directive_number(parser *yaml_parser_t, start_mark yaml_mark_t, number *int8) bool {
-
- // Repeat while the next character is digit.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var value, length int8
- for is_digit(parser.buffer, parser.buffer_pos) {
- // Check if the number is too long.
- length++
- if length > max_number_length {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "found extremely long version number")
- }
- value = value*10 + int8(as_digit(parser.buffer, parser.buffer_pos))
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the number was present.
- if length == 0 {
- return yaml_parser_set_scanner_error(parser, "while scanning a %YAML directive",
- start_mark, "did not find expected version number")
- }
- *number = value
- return true
-}
-
-// Scan the value of a TAG-DIRECTIVE token.
-//
-// Scope:
-// %TAG !yaml! tag:yaml.org,2002: \n
-// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-//
-func yaml_parser_scan_tag_directive_value(parser *yaml_parser_t, start_mark yaml_mark_t, handle, prefix *[]byte) bool {
- var handle_value, prefix_value []byte
-
- // Eat whitespaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a handle.
- if !yaml_parser_scan_tag_handle(parser, true, start_mark, &handle_value) {
- return false
- }
-
- // Expect a whitespace.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blank(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace")
- return false
- }
-
- // Eat whitespaces.
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Scan a prefix.
- if !yaml_parser_scan_tag_uri(parser, true, nil, start_mark, &prefix_value) {
- return false
- }
-
- // Expect a whitespace or line break.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a %TAG directive",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- *handle = handle_value
- *prefix = prefix_value
- return true
-}
-
-func yaml_parser_scan_anchor(parser *yaml_parser_t, token *yaml_token_t, typ yaml_token_type_t) bool {
- var s []byte
-
- // Eat the indicator character.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the value.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- end_mark := parser.mark
-
- /*
- * Check if length of the anchor is greater than 0 and it is followed by
- * a whitespace character or one of the indicators:
- *
- * '?', ':', ',', ']', '}', '%', '@', '`'.
- */
-
- if len(s) == 0 ||
- !(is_blankz(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' ||
- parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '`') {
- context := "while scanning an alias"
- if typ == yaml_ANCHOR_TOKEN {
- context = "while scanning an anchor"
- }
- yaml_parser_set_scanner_error(parser, context, start_mark,
- "did not find expected alphabetic or numeric character")
- return false
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: typ,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- }
-
- return true
-}
-
-/*
- * Scan a TAG token.
- */
-
-func yaml_parser_scan_tag(parser *yaml_parser_t, token *yaml_token_t) bool {
- var handle, suffix []byte
-
- start_mark := parser.mark
-
- // Check if the tag is in the canonical form.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- if parser.buffer[parser.buffer_pos+1] == '<' {
- // Keep the handle as ''
-
- // Eat '!<'
- skip(parser)
- skip(parser)
-
- // Consume the tag value.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
-
- // Check for '>' and eat it.
- if parser.buffer[parser.buffer_pos] != '>' {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find the expected '>'")
- return false
- }
-
- skip(parser)
- } else {
- // The tag has either the '!suffix' or the '!handle!suffix' form.
-
- // First, try to scan a handle.
- if !yaml_parser_scan_tag_handle(parser, false, start_mark, &handle) {
- return false
- }
-
- // Check if it is, indeed, handle.
- if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' {
- // Scan the suffix now.
- if !yaml_parser_scan_tag_uri(parser, false, nil, start_mark, &suffix) {
- return false
- }
- } else {
- // It wasn't a handle after all. Scan the rest of the tag.
- if !yaml_parser_scan_tag_uri(parser, false, handle, start_mark, &suffix) {
- return false
- }
-
- // Set the handle to '!'.
- handle = []byte{'!'}
-
- // A special case: the '!' tag. Set the handle to '' and the
- // suffix to '!'.
- if len(suffix) == 0 {
- handle, suffix = suffix, handle
- }
- }
- }
-
- // Check the character which ends the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if !is_blankz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a tag",
- start_mark, "did not find expected whitespace or line break")
- return false
- }
-
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_TAG_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: handle,
- suffix: suffix,
- }
- return true
-}
-
-// Scan a tag handle.
-func yaml_parser_scan_tag_handle(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, handle *[]byte) bool {
- // Check the initial '!' character.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] != '!' {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
-
- var s []byte
-
- // Copy the '!' character.
- s = read(parser, s)
-
- // Copy all subsequent alphabetical and numerical characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_alpha(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the trailing character is '!' and copy it.
- if parser.buffer[parser.buffer_pos] == '!' {
- s = read(parser, s)
- } else {
- // It's either the '!' tag or not really a tag handle. If it's a %TAG
- // directive, it's an error. If it's a tag token, it must be a part of URI.
- if directive && !(s[0] == '!' && s[1] == 0) {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected '!'")
- return false
- }
- }
-
- *handle = s
- return true
-}
-
-// Scan a tag.
-func yaml_parser_scan_tag_uri(parser *yaml_parser_t, directive bool, head []byte, start_mark yaml_mark_t, uri *[]byte) bool {
- //size_t length = head ? strlen((char *)head) : 0
- var s []byte
-
- // Copy the head if needed.
- //
- // Note that we don't copy the leading '!' character.
- if len(head) > 1 {
- s = append(s, head[1:]...)
- }
-
- // Scan the tag.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // The set of characters that may appear in URI is as follows:
- //
- // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&',
- // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']',
- // '%'.
- // [Go] Convert this into more reasonable logic.
- for is_alpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' ||
- parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' ||
- parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' ||
- parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' ||
- parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' ||
- parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' ||
- parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' ||
- parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' ||
- parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' ||
- parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' ||
- parser.buffer[parser.buffer_pos] == '%' {
- // Check if it is a URI-escape sequence.
- if parser.buffer[parser.buffer_pos] == '%' {
- if !yaml_parser_scan_uri_escapes(parser, directive, start_mark, &s) {
- return false
- }
- } else {
- s = read(parser, s)
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check if the tag is non-empty.
- if len(s) == 0 {
- yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find expected tag URI")
- return false
- }
- *uri = s
- return true
-}
-
-// Decode an URI-escape sequence corresponding to a single UTF-8 character.
-func yaml_parser_scan_uri_escapes(parser *yaml_parser_t, directive bool, start_mark yaml_mark_t, s *[]byte) bool {
-
- // Decode the required number of characters.
- w := 1024
- for w > 0 {
- // Check for a URI-escaped octet.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
-
- if !(parser.buffer[parser.buffer_pos] == '%' &&
- is_hex(parser.buffer, parser.buffer_pos+1) &&
- is_hex(parser.buffer, parser.buffer_pos+2)) {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "did not find URI escaped octet")
- }
-
- // Get the octet.
- octet := byte((as_hex(parser.buffer, parser.buffer_pos+1) << 4) + as_hex(parser.buffer, parser.buffer_pos+2))
-
- // If it is the leading octet, determine the length of the UTF-8 sequence.
- if w == 1024 {
- w = width(octet)
- if w == 0 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect leading UTF-8 octet")
- }
- } else {
- // Check if the trailing octet is correct.
- if octet&0xC0 != 0x80 {
- return yaml_parser_set_scanner_tag_error(parser, directive,
- start_mark, "found an incorrect trailing UTF-8 octet")
- }
- }
-
- // Copy the octet and move the pointers.
- *s = append(*s, octet)
- skip(parser)
- skip(parser)
- skip(parser)
- w--
- }
- return true
-}
-
-// Scan a block scalar.
-func yaml_parser_scan_block_scalar(parser *yaml_parser_t, token *yaml_token_t, literal bool) bool {
- // Eat the indicator '|' or '>'.
- start_mark := parser.mark
- skip(parser)
-
- // Scan the additional block scalar indicators.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- // Check for a chomping indicator.
- var chomping, increment int
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- // Set the chomping method and eat the indicator.
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
-
- // Check for an indentation indicator.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if is_digit(parser.buffer, parser.buffer_pos) {
- // Check that the indentation is greater than 0.
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an indentation indicator equal to 0")
- return false
- }
-
- // Get the indentation level and eat the indicator.
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
- }
-
- } else if is_digit(parser.buffer, parser.buffer_pos) {
- // Do the same as above, but in the opposite order.
-
- if parser.buffer[parser.buffer_pos] == '0' {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found an indentation indicator equal to 0")
- return false
- }
- increment = as_digit(parser.buffer, parser.buffer_pos)
- skip(parser)
-
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' {
- if parser.buffer[parser.buffer_pos] == '+' {
- chomping = +1
- } else {
- chomping = -1
- }
- skip(parser)
- }
- }
-
- // Eat whitespaces and comments to the end of the line.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for is_blank(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.buffer[parser.buffer_pos] == '#' {
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- }
-
- // Check if we are at the end of the line.
- if !is_breakz(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "did not find expected comment or line break")
- return false
- }
-
- // Eat a line break.
- if is_break(parser.buffer, parser.buffer_pos) {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- skip_line(parser)
- }
-
- end_mark := parser.mark
-
- // Set the indentation level if it was specified.
- var indent int
- if increment > 0 {
- if parser.indent >= 0 {
- indent = parser.indent + increment
- } else {
- indent = increment
- }
- }
-
- // Scan the leading line breaks and determine the indentation level if needed.
- var s, leading_break, trailing_breaks []byte
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
-
- // Scan the block scalar content.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- var leading_blank, trailing_blank bool
- for parser.mark.column == indent && !is_z(parser.buffer, parser.buffer_pos) {
- // We are at the beginning of a non-empty line.
-
- // Is it a trailing whitespace?
- trailing_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Check if we need to fold the leading line break.
- if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' {
- // Do we need to join the lines by space?
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- }
- } else {
- s = append(s, leading_break...)
- }
- leading_break = leading_break[:0]
-
- // Append the remaining line breaks.
- s = append(s, trailing_breaks...)
- trailing_breaks = trailing_breaks[:0]
-
- // Is it a leading whitespace?
- leading_blank = is_blank(parser.buffer, parser.buffer_pos)
-
- // Consume the current line.
- for !is_breakz(parser.buffer, parser.buffer_pos) {
- s = read(parser, s)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- leading_break = read_line(parser, leading_break)
-
- // Eat the following indentation spaces and line breaks.
- if !yaml_parser_scan_block_scalar_breaks(parser, &indent, &trailing_breaks, start_mark, &end_mark) {
- return false
- }
- }
-
- // Chomp the tail.
- if chomping != -1 {
- s = append(s, leading_break...)
- }
- if chomping == 1 {
- s = append(s, trailing_breaks...)
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_LITERAL_SCALAR_STYLE,
- }
- if !literal {
- token.style = yaml_FOLDED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan indentation spaces and line breaks for a block scalar. Determine the
-// indentation level if needed.
-func yaml_parser_scan_block_scalar_breaks(parser *yaml_parser_t, indent *int, breaks *[]byte, start_mark yaml_mark_t, end_mark *yaml_mark_t) bool {
- *end_mark = parser.mark
-
- // Eat the indentation spaces and line breaks.
- max_indent := 0
- for {
- // Eat the indentation spaces.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- for (*indent == 0 || parser.mark.column < *indent) && is_space(parser.buffer, parser.buffer_pos) {
- skip(parser)
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
- if parser.mark.column > max_indent {
- max_indent = parser.mark.column
- }
-
- // Check for a tab character messing the indentation.
- if (*indent == 0 || parser.mark.column < *indent) && is_tab(parser.buffer, parser.buffer_pos) {
- return yaml_parser_set_scanner_error(parser, "while scanning a block scalar",
- start_mark, "found a tab character where an indentation space is expected")
- }
-
- // Have we found a non-empty line?
- if !is_break(parser.buffer, parser.buffer_pos) {
- break
- }
-
- // Consume the line break.
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- // [Go] Should really be returning breaks instead.
- *breaks = read_line(parser, *breaks)
- *end_mark = parser.mark
- }
-
- // Determine the indentation level if needed.
- if *indent == 0 {
- *indent = max_indent
- if *indent < parser.indent+1 {
- *indent = parser.indent + 1
- }
- if *indent < 1 {
- *indent = 1
- }
- }
- return true
-}
-
-// Scan a quoted scalar.
-func yaml_parser_scan_flow_scalar(parser *yaml_parser_t, token *yaml_token_t, single bool) bool {
- // Eat the left quote.
- start_mark := parser.mark
- skip(parser)
-
- // Consume the content of the quoted scalar.
- var s, leading_break, trailing_breaks, whitespaces []byte
- for {
- // Check that there are no document indicators at the beginning of the line.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
-
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected document indicator")
- return false
- }
-
- // Check for EOF.
- if is_z(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a quoted scalar",
- start_mark, "found unexpected end of stream")
- return false
- }
-
- // Consume non-blank characters.
- leading_blanks := false
- for !is_blankz(parser.buffer, parser.buffer_pos) {
- if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' {
- // Is is an escaped single quote.
- s = append(s, '\'')
- skip(parser)
- skip(parser)
-
- } else if single && parser.buffer[parser.buffer_pos] == '\'' {
- // It is a right single quote.
- break
- } else if !single && parser.buffer[parser.buffer_pos] == '"' {
- // It is a right double quote.
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' && is_break(parser.buffer, parser.buffer_pos+1) {
- // It is an escaped line break.
- if parser.unread < 3 && !yaml_parser_update_buffer(parser, 3) {
- return false
- }
- skip(parser)
- skip_line(parser)
- leading_blanks = true
- break
-
- } else if !single && parser.buffer[parser.buffer_pos] == '\\' {
- // It is an escape sequence.
- code_length := 0
-
- // Check the escape character.
- switch parser.buffer[parser.buffer_pos+1] {
- case '0':
- s = append(s, 0)
- case 'a':
- s = append(s, '\x07')
- case 'b':
- s = append(s, '\x08')
- case 't', '\t':
- s = append(s, '\x09')
- case 'n':
- s = append(s, '\x0A')
- case 'v':
- s = append(s, '\x0B')
- case 'f':
- s = append(s, '\x0C')
- case 'r':
- s = append(s, '\x0D')
- case 'e':
- s = append(s, '\x1B')
- case ' ':
- s = append(s, '\x20')
- case '"':
- s = append(s, '"')
- case '\'':
- s = append(s, '\'')
- case '\\':
- s = append(s, '\\')
- case 'N': // NEL (#x85)
- s = append(s, '\xC2')
- s = append(s, '\x85')
- case '_': // #xA0
- s = append(s, '\xC2')
- s = append(s, '\xA0')
- case 'L': // LS (#x2028)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA8')
- case 'P': // PS (#x2029)
- s = append(s, '\xE2')
- s = append(s, '\x80')
- s = append(s, '\xA9')
- case 'x':
- code_length = 2
- case 'u':
- code_length = 4
- case 'U':
- code_length = 8
- default:
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found unknown escape character")
- return false
- }
-
- skip(parser)
- skip(parser)
-
- // Consume an arbitrary escape code.
- if code_length > 0 {
- var value int
-
- // Scan the character value.
- if parser.unread < code_length && !yaml_parser_update_buffer(parser, code_length) {
- return false
- }
- for k := 0; k < code_length; k++ {
- if !is_hex(parser.buffer, parser.buffer_pos+k) {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "did not find expected hexdecimal number")
- return false
- }
- value = (value << 4) + as_hex(parser.buffer, parser.buffer_pos+k)
- }
-
- // Check the value and write the character.
- if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF {
- yaml_parser_set_scanner_error(parser, "while parsing a quoted scalar",
- start_mark, "found invalid Unicode character escape code")
- return false
- }
- if value <= 0x7F {
- s = append(s, byte(value))
- } else if value <= 0x7FF {
- s = append(s, byte(0xC0+(value>>6)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else if value <= 0xFFFF {
- s = append(s, byte(0xE0+(value>>12)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- } else {
- s = append(s, byte(0xF0+(value>>18)))
- s = append(s, byte(0x80+((value>>12)&0x3F)))
- s = append(s, byte(0x80+((value>>6)&0x3F)))
- s = append(s, byte(0x80+(value&0x3F)))
- }
-
- // Advance the pointer.
- for k := 0; k < code_length; k++ {
- skip(parser)
- }
- }
- } else {
- // It is a non-escaped non-blank character.
- s = read(parser, s)
- }
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Check if we are at the end of the scalar.
- if single {
- if parser.buffer[parser.buffer_pos] == '\'' {
- break
- }
- } else {
- if parser.buffer[parser.buffer_pos] == '"' {
- break
- }
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Join the whitespaces or fold line breaks.
- if leading_blanks {
- // Do we need to fold line breaks?
- if len(leading_break) > 0 && leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Eat the right quote.
- skip(parser)
- end_mark := parser.mark
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_SINGLE_QUOTED_SCALAR_STYLE,
- }
- if !single {
- token.style = yaml_DOUBLE_QUOTED_SCALAR_STYLE
- }
- return true
-}
-
-// Scan a plain scalar.
-func yaml_parser_scan_plain_scalar(parser *yaml_parser_t, token *yaml_token_t) bool {
-
- var s, leading_break, trailing_breaks, whitespaces []byte
- var leading_blanks bool
- var indent = parser.indent + 1
-
- start_mark := parser.mark
- end_mark := parser.mark
-
- // Consume the content of the plain scalar.
- for {
- // Check for a document indicator.
- if parser.unread < 4 && !yaml_parser_update_buffer(parser, 4) {
- return false
- }
- if parser.mark.column == 0 &&
- ((parser.buffer[parser.buffer_pos+0] == '-' &&
- parser.buffer[parser.buffer_pos+1] == '-' &&
- parser.buffer[parser.buffer_pos+2] == '-') ||
- (parser.buffer[parser.buffer_pos+0] == '.' &&
- parser.buffer[parser.buffer_pos+1] == '.' &&
- parser.buffer[parser.buffer_pos+2] == '.')) &&
- is_blankz(parser.buffer, parser.buffer_pos+3) {
- break
- }
-
- // Check for a comment.
- if parser.buffer[parser.buffer_pos] == '#' {
- break
- }
-
- // Consume non-blank characters.
- for !is_blankz(parser.buffer, parser.buffer_pos) {
-
- // Check for 'x:x' in the flow context. TODO: Fix the test "spec-08-13".
- if parser.flow_level > 0 &&
- parser.buffer[parser.buffer_pos] == ':' &&
- !is_blankz(parser.buffer, parser.buffer_pos+1) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found unexpected ':'")
- return false
- }
-
- // Check for indicators that may end a plain scalar.
- if (parser.buffer[parser.buffer_pos] == ':' && is_blankz(parser.buffer, parser.buffer_pos+1)) ||
- (parser.flow_level > 0 &&
- (parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == ':' ||
- parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == '[' ||
- parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' ||
- parser.buffer[parser.buffer_pos] == '}')) {
- break
- }
-
- // Check if we need to join whitespaces and breaks.
- if leading_blanks || len(whitespaces) > 0 {
- if leading_blanks {
- // Do we need to fold line breaks?
- if leading_break[0] == '\n' {
- if len(trailing_breaks) == 0 {
- s = append(s, ' ')
- } else {
- s = append(s, trailing_breaks...)
- }
- } else {
- s = append(s, leading_break...)
- s = append(s, trailing_breaks...)
- }
- trailing_breaks = trailing_breaks[:0]
- leading_break = leading_break[:0]
- leading_blanks = false
- } else {
- s = append(s, whitespaces...)
- whitespaces = whitespaces[:0]
- }
- }
-
- // Copy the character.
- s = read(parser, s)
-
- end_mark = parser.mark
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
- }
-
- // Is it the end?
- if !(is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos)) {
- break
- }
-
- // Consume blank characters.
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
-
- for is_blank(parser.buffer, parser.buffer_pos) || is_break(parser.buffer, parser.buffer_pos) {
- if is_blank(parser.buffer, parser.buffer_pos) {
-
- // Check for tab character that abuse indentation.
- if leading_blanks && parser.mark.column < indent && is_tab(parser.buffer, parser.buffer_pos) {
- yaml_parser_set_scanner_error(parser, "while scanning a plain scalar",
- start_mark, "found a tab character that violate indentation")
- return false
- }
-
- // Consume a space or a tab character.
- if !leading_blanks {
- whitespaces = read(parser, whitespaces)
- } else {
- skip(parser)
- }
- } else {
- if parser.unread < 2 && !yaml_parser_update_buffer(parser, 2) {
- return false
- }
-
- // Check if it is a first line break.
- if !leading_blanks {
- whitespaces = whitespaces[:0]
- leading_break = read_line(parser, leading_break)
- leading_blanks = true
- } else {
- trailing_breaks = read_line(parser, trailing_breaks)
- }
- }
- if parser.unread < 1 && !yaml_parser_update_buffer(parser, 1) {
- return false
- }
- }
-
- // Check indentation level.
- if parser.flow_level == 0 && parser.mark.column < indent {
- break
- }
- }
-
- // Create a token.
- *token = yaml_token_t{
- typ: yaml_SCALAR_TOKEN,
- start_mark: start_mark,
- end_mark: end_mark,
- value: s,
- style: yaml_PLAIN_SCALAR_STYLE,
- }
-
- // Note that we change the 'simple_key_allowed' flag.
- if leading_blanks {
- parser.simple_key_allowed = true
- }
- return true
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
deleted file mode 100644
index 5958822..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/sorter.go
+++ /dev/null
@@ -1,104 +0,0 @@
-package yaml
-
-import (
- "reflect"
- "unicode"
-)
-
-type keyList []reflect.Value
-
-func (l keyList) Len() int { return len(l) }
-func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
-func (l keyList) Less(i, j int) bool {
- a := l[i]
- b := l[j]
- ak := a.Kind()
- bk := b.Kind()
- for (ak == reflect.Interface || ak == reflect.Ptr) && !a.IsNil() {
- a = a.Elem()
- ak = a.Kind()
- }
- for (bk == reflect.Interface || bk == reflect.Ptr) && !b.IsNil() {
- b = b.Elem()
- bk = b.Kind()
- }
- af, aok := keyFloat(a)
- bf, bok := keyFloat(b)
- if aok && bok {
- if af != bf {
- return af < bf
- }
- if ak != bk {
- return ak < bk
- }
- return numLess(a, b)
- }
- if ak != reflect.String || bk != reflect.String {
- return ak < bk
- }
- ar, br := []rune(a.String()), []rune(b.String())
- for i := 0; i < len(ar) && i < len(br); i++ {
- if ar[i] == br[i] {
- continue
- }
- al := unicode.IsLetter(ar[i])
- bl := unicode.IsLetter(br[i])
- if al && bl {
- return ar[i] < br[i]
- }
- if al || bl {
- return bl
- }
- var ai, bi int
- var an, bn int64
- for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ {
- an = an*10 + int64(ar[ai]-'0')
- }
- for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ {
- bn = bn*10 + int64(br[bi]-'0')
- }
- if an != bn {
- return an < bn
- }
- if ai != bi {
- return ai < bi
- }
- return ar[i] < br[i]
- }
- return len(ar) < len(br)
-}
-
-// keyFloat returns a float value for v if it is a number/bool
-// and whether it is a number/bool or not.
-func keyFloat(v reflect.Value) (f float64, ok bool) {
- switch v.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return float64(v.Int()), true
- case reflect.Float32, reflect.Float64:
- return v.Float(), true
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return float64(v.Uint()), true
- case reflect.Bool:
- if v.Bool() {
- return 1, true
- }
- return 0, true
- }
- return 0, false
-}
-
-// numLess returns whether a < b.
-// a and b must necessarily have the same kind.
-func numLess(a, b reflect.Value) bool {
- switch a.Kind() {
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return a.Int() < b.Int()
- case reflect.Float32, reflect.Float64:
- return a.Float() < b.Float()
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return a.Uint() < b.Uint()
- case reflect.Bool:
- return !a.Bool() && b.Bool()
- }
- panic("not a number")
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
deleted file mode 100644
index 190362f..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/writerc.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package yaml
-
-// Set the writer error and return false.
-func yaml_emitter_set_writer_error(emitter *yaml_emitter_t, problem string) bool {
- emitter.error = yaml_WRITER_ERROR
- emitter.problem = problem
- return false
-}
-
-// Flush the output buffer.
-func yaml_emitter_flush(emitter *yaml_emitter_t) bool {
- if emitter.write_handler == nil {
- panic("write handler not set")
- }
-
- // Check if the buffer is empty.
- if emitter.buffer_pos == 0 {
- return true
- }
-
- // If the output encoding is UTF-8, we don't need to recode the buffer.
- if emitter.encoding == yaml_UTF8_ENCODING {
- if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- return true
- }
-
- // Recode the buffer into the raw buffer.
- var low, high int
- if emitter.encoding == yaml_UTF16LE_ENCODING {
- low, high = 0, 1
- } else {
- high, low = 1, 0
- }
-
- pos := 0
- for pos < emitter.buffer_pos {
- // See the "reader.c" code for more details on UTF-8 encoding. Note
- // that we assume that the buffer contains a valid UTF-8 sequence.
-
- // Read the next UTF-8 character.
- octet := emitter.buffer[pos]
-
- var w int
- var value rune
- switch {
- case octet&0x80 == 0x00:
- w, value = 1, rune(octet&0x7F)
- case octet&0xE0 == 0xC0:
- w, value = 2, rune(octet&0x1F)
- case octet&0xF0 == 0xE0:
- w, value = 3, rune(octet&0x0F)
- case octet&0xF8 == 0xF0:
- w, value = 4, rune(octet&0x07)
- }
- for k := 1; k < w; k++ {
- octet = emitter.buffer[pos+k]
- value = (value << 6) + (rune(octet) & 0x3F)
- }
- pos += w
-
- // Write the character.
- if value < 0x10000 {
- var b [2]byte
- b[high] = byte(value >> 8)
- b[low] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1])
- } else {
- // Write the character using a surrogate pair (check "reader.c").
- var b [4]byte
- value -= 0x10000
- b[high] = byte(0xD8 + (value >> 18))
- b[low] = byte((value >> 10) & 0xFF)
- b[high+2] = byte(0xDC + ((value >> 8) & 0xFF))
- b[low+2] = byte(value & 0xFF)
- emitter.raw_buffer = append(emitter.raw_buffer, b[0], b[1], b[2], b[3])
- }
- }
-
- // Write the raw buffer.
- if err := emitter.write_handler(emitter, emitter.raw_buffer); err != nil {
- return yaml_emitter_set_writer_error(emitter, "write error: "+err.Error())
- }
- emitter.buffer_pos = 0
- emitter.raw_buffer = emitter.raw_buffer[:0]
- return true
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
deleted file mode 100644
index d133edf..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yaml.go
+++ /dev/null
@@ -1,346 +0,0 @@
-// Package yaml implements YAML support for the Go language.
-//
-// Source code and other details for the project are available at GitHub:
-//
-// https://github.com/go-yaml/yaml
-//
-package yaml
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// MapSlice encodes and decodes as a YAML map.
-// The order of keys is preserved when encoding and decoding.
-type MapSlice []MapItem
-
-// MapItem is an item in a MapSlice.
-type MapItem struct {
- Key, Value interface{}
-}
-
-// The Unmarshaler interface may be implemented by types to customize their
-// behavior when being unmarshaled from a YAML document. The UnmarshalYAML
-// method receives a function that may be called to unmarshal the original
-// YAML value into a field or variable. It is safe to call the unmarshal
-// function parameter more than once if necessary.
-type Unmarshaler interface {
- UnmarshalYAML(unmarshal func(interface{}) error) error
-}
-
-// The Marshaler interface may be implemented by types to customize their
-// behavior when being marshaled into a YAML document. The returned value
-// is marshaled in place of the original value implementing Marshaler.
-//
-// If an error is returned by MarshalYAML, the marshaling procedure stops
-// and returns with the provided error.
-type Marshaler interface {
- MarshalYAML() (interface{}, error)
-}
-
-// Unmarshal decodes the first document found within the in byte slice
-// and assigns decoded values into the out value.
-//
-// Maps and pointers (to a struct, string, int, etc) are accepted as out
-// values. If an internal pointer within a struct is not initialized,
-// the yaml package will initialize it if necessary for unmarshalling
-// the provided data. The out parameter must not be nil.
-//
-// The type of the decoded values should be compatible with the respective
-// values in out. If one or more values cannot be decoded due to a type
-// mismatches, decoding continues partially until the end of the YAML
-// content, and a *yaml.TypeError is returned with details for all
-// missed values.
-//
-// Struct fields are only unmarshalled if they are exported (have an
-// upper case first letter), and are unmarshalled using the field name
-// lowercased as the default key. Custom keys may be defined via the
-// "yaml" name in the field tag: the content preceding the first comma
-// is used as the key, and the following comma-separated options are
-// used to tweak the marshalling process (see Marshal).
-// Conflicting names result in a runtime error.
-//
-// For example:
-//
-// type T struct {
-// F int `yaml:"a,omitempty"`
-// B int
-// }
-// var t T
-// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t)
-//
-// See the documentation of Marshal for the format of tags and a list of
-// supported tag options.
-//
-func Unmarshal(in []byte, out interface{}) (err error) {
- defer handleErr(&err)
- d := newDecoder()
- p := newParser(in)
- defer p.destroy()
- node := p.parse()
- if node != nil {
- v := reflect.ValueOf(out)
- if v.Kind() == reflect.Ptr && !v.IsNil() {
- v = v.Elem()
- }
- d.unmarshal(node, v)
- }
- if len(d.terrors) > 0 {
- return &TypeError{d.terrors}
- }
- return nil
-}
-
-// Marshal serializes the value provided into a YAML document. The structure
-// of the generated document will reflect the structure of the value itself.
-// Maps and pointers (to struct, string, int, etc) are accepted as the in value.
-//
-// Struct fields are only unmarshalled if they are exported (have an upper case
-// first letter), and are unmarshalled using the field name lowercased as the
-// default key. Custom keys may be defined via the "yaml" name in the field
-// tag: the content preceding the first comma is used as the key, and the
-// following comma-separated options are used to tweak the marshalling process.
-// Conflicting names result in a runtime error.
-//
-// The field tag format accepted is:
-//
-// `(...) yaml:"[][,[,]]" (...)`
-//
-// The following flags are currently supported:
-//
-// omitempty Only include the field if it's not set to the zero
-// value for the type or to empty slices or maps.
-// Does not apply to zero valued structs.
-//
-// flow Marshal using a flow style (useful for structs,
-// sequences and maps).
-//
-// inline Inline the field, which must be a struct or a map,
-// causing all of its fields or keys to be processed as if
-// they were part of the outer struct. For maps, keys must
-// not conflict with the yaml keys of other struct fields.
-//
-// In addition, if the key is "-", the field is ignored.
-//
-// For example:
-//
-// type T struct {
-// F int "a,omitempty"
-// B int
-// }
-// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n"
-// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n"
-//
-func Marshal(in interface{}) (out []byte, err error) {
- defer handleErr(&err)
- e := newEncoder()
- defer e.destroy()
- e.marshal("", reflect.ValueOf(in))
- e.finish()
- out = e.out
- return
-}
-
-func handleErr(err *error) {
- if v := recover(); v != nil {
- if e, ok := v.(yamlError); ok {
- *err = e.err
- } else {
- panic(v)
- }
- }
-}
-
-type yamlError struct {
- err error
-}
-
-func fail(err error) {
- panic(yamlError{err})
-}
-
-func failf(format string, args ...interface{}) {
- panic(yamlError{fmt.Errorf("yaml: "+format, args...)})
-}
-
-// A TypeError is returned by Unmarshal when one or more fields in
-// the YAML document cannot be properly decoded into the requested
-// types. When this error is returned, the value is still
-// unmarshaled partially.
-type TypeError struct {
- Errors []string
-}
-
-func (e *TypeError) Error() string {
- return fmt.Sprintf("yaml: unmarshal errors:\n %s", strings.Join(e.Errors, "\n "))
-}
-
-// --------------------------------------------------------------------------
-// Maintain a mapping of keys to structure field indexes
-
-// The code in this section was copied from mgo/bson.
-
-// structInfo holds details for the serialization of fields of
-// a given struct.
-type structInfo struct {
- FieldsMap map[string]fieldInfo
- FieldsList []fieldInfo
-
- // InlineMap is the number of the field in the struct that
- // contains an ,inline map, or -1 if there's none.
- InlineMap int
-}
-
-type fieldInfo struct {
- Key string
- Num int
- OmitEmpty bool
- Flow bool
-
- // Inline holds the field index if the field is part of an inlined struct.
- Inline []int
-}
-
-var structMap = make(map[reflect.Type]*structInfo)
-var fieldMapMutex sync.RWMutex
-
-func getStructInfo(st reflect.Type) (*structInfo, error) {
- fieldMapMutex.RLock()
- sinfo, found := structMap[st]
- fieldMapMutex.RUnlock()
- if found {
- return sinfo, nil
- }
-
- n := st.NumField()
- fieldsMap := make(map[string]fieldInfo)
- fieldsList := make([]fieldInfo, 0, n)
- inlineMap := -1
- for i := 0; i != n; i++ {
- field := st.Field(i)
- if field.PkgPath != "" {
- continue // Private field
- }
-
- info := fieldInfo{Num: i}
-
- tag := field.Tag.Get("yaml")
- if tag == "" && strings.Index(string(field.Tag), ":") < 0 {
- tag = string(field.Tag)
- }
- if tag == "-" {
- continue
- }
-
- inline := false
- fields := strings.Split(tag, ",")
- if len(fields) > 1 {
- for _, flag := range fields[1:] {
- switch flag {
- case "omitempty":
- info.OmitEmpty = true
- case "flow":
- info.Flow = true
- case "inline":
- inline = true
- default:
- return nil, errors.New(fmt.Sprintf("Unsupported flag %q in tag %q of type %s", flag, tag, st))
- }
- }
- tag = fields[0]
- }
-
- if inline {
- switch field.Type.Kind() {
- case reflect.Map:
- if inlineMap >= 0 {
- return nil, errors.New("Multiple ,inline maps in struct " + st.String())
- }
- if field.Type.Key() != reflect.TypeOf("") {
- return nil, errors.New("Option ,inline needs a map with string keys in struct " + st.String())
- }
- inlineMap = info.Num
- case reflect.Struct:
- sinfo, err := getStructInfo(field.Type)
- if err != nil {
- return nil, err
- }
- for _, finfo := range sinfo.FieldsList {
- if _, found := fieldsMap[finfo.Key]; found {
- msg := "Duplicated key '" + finfo.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
- if finfo.Inline == nil {
- finfo.Inline = []int{i, finfo.Num}
- } else {
- finfo.Inline = append([]int{i}, finfo.Inline...)
- }
- fieldsMap[finfo.Key] = finfo
- fieldsList = append(fieldsList, finfo)
- }
- default:
- //return nil, errors.New("Option ,inline needs a struct value or map field")
- return nil, errors.New("Option ,inline needs a struct value field")
- }
- continue
- }
-
- if tag != "" {
- info.Key = tag
- } else {
- info.Key = strings.ToLower(field.Name)
- }
-
- if _, found = fieldsMap[info.Key]; found {
- msg := "Duplicated key '" + info.Key + "' in struct " + st.String()
- return nil, errors.New(msg)
- }
-
- fieldsList = append(fieldsList, info)
- fieldsMap[info.Key] = info
- }
-
- sinfo = &structInfo{fieldsMap, fieldsList, inlineMap}
-
- fieldMapMutex.Lock()
- structMap[st] = sinfo
- fieldMapMutex.Unlock()
- return sinfo, nil
-}
-
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.String:
- return len(v.String()) == 0
- case reflect.Interface, reflect.Ptr:
- return v.IsNil()
- case reflect.Slice:
- return v.Len() == 0
- case reflect.Map:
- return v.Len() == 0
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Float32, reflect.Float64:
- return v.Float() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Bool:
- return !v.Bool()
- case reflect.Struct:
- vt := v.Type()
- for i := v.NumField() - 1; i >= 0; i-- {
- if vt.Field(i).PkgPath != "" {
- continue // Private field
- }
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- }
- return false
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
deleted file mode 100644
index d60a6b6..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlh.go
+++ /dev/null
@@ -1,716 +0,0 @@
-package yaml
-
-import (
- "io"
-)
-
-// The version directive data.
-type yaml_version_directive_t struct {
- major int8 // The major version number.
- minor int8 // The minor version number.
-}
-
-// The tag directive data.
-type yaml_tag_directive_t struct {
- handle []byte // The tag handle.
- prefix []byte // The tag prefix.
-}
-
-type yaml_encoding_t int
-
-// The stream encoding.
-const (
- // Let the parser choose the encoding.
- yaml_ANY_ENCODING yaml_encoding_t = iota
-
- yaml_UTF8_ENCODING // The default UTF-8 encoding.
- yaml_UTF16LE_ENCODING // The UTF-16-LE encoding with BOM.
- yaml_UTF16BE_ENCODING // The UTF-16-BE encoding with BOM.
-)
-
-type yaml_break_t int
-
-// Line break types.
-const (
- // Let the parser choose the break type.
- yaml_ANY_BREAK yaml_break_t = iota
-
- yaml_CR_BREAK // Use CR for line breaks (Mac style).
- yaml_LN_BREAK // Use LN for line breaks (Unix style).
- yaml_CRLN_BREAK // Use CR LN for line breaks (DOS style).
-)
-
-type yaml_error_type_t int
-
-// Many bad things could happen with the parser and emitter.
-const (
- // No error is produced.
- yaml_NO_ERROR yaml_error_type_t = iota
-
- yaml_MEMORY_ERROR // Cannot allocate or reallocate a block of memory.
- yaml_READER_ERROR // Cannot read or decode the input stream.
- yaml_SCANNER_ERROR // Cannot scan the input stream.
- yaml_PARSER_ERROR // Cannot parse the input stream.
- yaml_COMPOSER_ERROR // Cannot compose a YAML document.
- yaml_WRITER_ERROR // Cannot write to the output stream.
- yaml_EMITTER_ERROR // Cannot emit a YAML stream.
-)
-
-// The pointer position.
-type yaml_mark_t struct {
- index int // The position index.
- line int // The position line.
- column int // The position column.
-}
-
-// Node Styles
-
-type yaml_style_t int8
-
-type yaml_scalar_style_t yaml_style_t
-
-// Scalar styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SCALAR_STYLE yaml_scalar_style_t = iota
-
- yaml_PLAIN_SCALAR_STYLE // The plain scalar style.
- yaml_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style.
- yaml_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style.
- yaml_LITERAL_SCALAR_STYLE // The literal scalar style.
- yaml_FOLDED_SCALAR_STYLE // The folded scalar style.
-)
-
-type yaml_sequence_style_t yaml_style_t
-
-// Sequence styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_SEQUENCE_STYLE yaml_sequence_style_t = iota
-
- yaml_BLOCK_SEQUENCE_STYLE // The block sequence style.
- yaml_FLOW_SEQUENCE_STYLE // The flow sequence style.
-)
-
-type yaml_mapping_style_t yaml_style_t
-
-// Mapping styles.
-const (
- // Let the emitter choose the style.
- yaml_ANY_MAPPING_STYLE yaml_mapping_style_t = iota
-
- yaml_BLOCK_MAPPING_STYLE // The block mapping style.
- yaml_FLOW_MAPPING_STYLE // The flow mapping style.
-)
-
-// Tokens
-
-type yaml_token_type_t int
-
-// Token types.
-const (
- // An empty token.
- yaml_NO_TOKEN yaml_token_type_t = iota
-
- yaml_STREAM_START_TOKEN // A STREAM-START token.
- yaml_STREAM_END_TOKEN // A STREAM-END token.
-
- yaml_VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token.
- yaml_TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token.
- yaml_DOCUMENT_START_TOKEN // A DOCUMENT-START token.
- yaml_DOCUMENT_END_TOKEN // A DOCUMENT-END token.
-
- yaml_BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token.
- yaml_BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token.
- yaml_BLOCK_END_TOKEN // A BLOCK-END token.
-
- yaml_FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token.
- yaml_FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token.
- yaml_FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token.
- yaml_FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token.
-
- yaml_BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token.
- yaml_FLOW_ENTRY_TOKEN // A FLOW-ENTRY token.
- yaml_KEY_TOKEN // A KEY token.
- yaml_VALUE_TOKEN // A VALUE token.
-
- yaml_ALIAS_TOKEN // An ALIAS token.
- yaml_ANCHOR_TOKEN // An ANCHOR token.
- yaml_TAG_TOKEN // A TAG token.
- yaml_SCALAR_TOKEN // A SCALAR token.
-)
-
-func (tt yaml_token_type_t) String() string {
- switch tt {
- case yaml_NO_TOKEN:
- return "yaml_NO_TOKEN"
- case yaml_STREAM_START_TOKEN:
- return "yaml_STREAM_START_TOKEN"
- case yaml_STREAM_END_TOKEN:
- return "yaml_STREAM_END_TOKEN"
- case yaml_VERSION_DIRECTIVE_TOKEN:
- return "yaml_VERSION_DIRECTIVE_TOKEN"
- case yaml_TAG_DIRECTIVE_TOKEN:
- return "yaml_TAG_DIRECTIVE_TOKEN"
- case yaml_DOCUMENT_START_TOKEN:
- return "yaml_DOCUMENT_START_TOKEN"
- case yaml_DOCUMENT_END_TOKEN:
- return "yaml_DOCUMENT_END_TOKEN"
- case yaml_BLOCK_SEQUENCE_START_TOKEN:
- return "yaml_BLOCK_SEQUENCE_START_TOKEN"
- case yaml_BLOCK_MAPPING_START_TOKEN:
- return "yaml_BLOCK_MAPPING_START_TOKEN"
- case yaml_BLOCK_END_TOKEN:
- return "yaml_BLOCK_END_TOKEN"
- case yaml_FLOW_SEQUENCE_START_TOKEN:
- return "yaml_FLOW_SEQUENCE_START_TOKEN"
- case yaml_FLOW_SEQUENCE_END_TOKEN:
- return "yaml_FLOW_SEQUENCE_END_TOKEN"
- case yaml_FLOW_MAPPING_START_TOKEN:
- return "yaml_FLOW_MAPPING_START_TOKEN"
- case yaml_FLOW_MAPPING_END_TOKEN:
- return "yaml_FLOW_MAPPING_END_TOKEN"
- case yaml_BLOCK_ENTRY_TOKEN:
- return "yaml_BLOCK_ENTRY_TOKEN"
- case yaml_FLOW_ENTRY_TOKEN:
- return "yaml_FLOW_ENTRY_TOKEN"
- case yaml_KEY_TOKEN:
- return "yaml_KEY_TOKEN"
- case yaml_VALUE_TOKEN:
- return "yaml_VALUE_TOKEN"
- case yaml_ALIAS_TOKEN:
- return "yaml_ALIAS_TOKEN"
- case yaml_ANCHOR_TOKEN:
- return "yaml_ANCHOR_TOKEN"
- case yaml_TAG_TOKEN:
- return "yaml_TAG_TOKEN"
- case yaml_SCALAR_TOKEN:
- return "yaml_SCALAR_TOKEN"
- }
- return ""
-}
-
-// The token structure.
-type yaml_token_t struct {
- // The token type.
- typ yaml_token_type_t
-
- // The start/end of the token.
- start_mark, end_mark yaml_mark_t
-
- // The stream encoding (for yaml_STREAM_START_TOKEN).
- encoding yaml_encoding_t
-
- // The alias/anchor/scalar value or tag/tag directive handle
- // (for yaml_ALIAS_TOKEN, yaml_ANCHOR_TOKEN, yaml_SCALAR_TOKEN, yaml_TAG_TOKEN, yaml_TAG_DIRECTIVE_TOKEN).
- value []byte
-
- // The tag suffix (for yaml_TAG_TOKEN).
- suffix []byte
-
- // The tag directive prefix (for yaml_TAG_DIRECTIVE_TOKEN).
- prefix []byte
-
- // The scalar style (for yaml_SCALAR_TOKEN).
- style yaml_scalar_style_t
-
- // The version directive major/minor (for yaml_VERSION_DIRECTIVE_TOKEN).
- major, minor int8
-}
-
-// Events
-
-type yaml_event_type_t int8
-
-// Event types.
-const (
- // An empty event.
- yaml_NO_EVENT yaml_event_type_t = iota
-
- yaml_STREAM_START_EVENT // A STREAM-START event.
- yaml_STREAM_END_EVENT // A STREAM-END event.
- yaml_DOCUMENT_START_EVENT // A DOCUMENT-START event.
- yaml_DOCUMENT_END_EVENT // A DOCUMENT-END event.
- yaml_ALIAS_EVENT // An ALIAS event.
- yaml_SCALAR_EVENT // A SCALAR event.
- yaml_SEQUENCE_START_EVENT // A SEQUENCE-START event.
- yaml_SEQUENCE_END_EVENT // A SEQUENCE-END event.
- yaml_MAPPING_START_EVENT // A MAPPING-START event.
- yaml_MAPPING_END_EVENT // A MAPPING-END event.
-)
-
-// The event structure.
-type yaml_event_t struct {
-
- // The event type.
- typ yaml_event_type_t
-
- // The start and end of the event.
- start_mark, end_mark yaml_mark_t
-
- // The document encoding (for yaml_STREAM_START_EVENT).
- encoding yaml_encoding_t
-
- // The version directive (for yaml_DOCUMENT_START_EVENT).
- version_directive *yaml_version_directive_t
-
- // The list of tag directives (for yaml_DOCUMENT_START_EVENT).
- tag_directives []yaml_tag_directive_t
-
- // The anchor (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_ALIAS_EVENT).
- anchor []byte
-
- // The tag (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- tag []byte
-
- // The scalar value (for yaml_SCALAR_EVENT).
- value []byte
-
- // Is the document start/end indicator implicit, or the tag optional?
- // (for yaml_DOCUMENT_START_EVENT, yaml_DOCUMENT_END_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT, yaml_SCALAR_EVENT).
- implicit bool
-
- // Is the tag optional for any non-plain style? (for yaml_SCALAR_EVENT).
- quoted_implicit bool
-
- // The style (for yaml_SCALAR_EVENT, yaml_SEQUENCE_START_EVENT, yaml_MAPPING_START_EVENT).
- style yaml_style_t
-}
-
-func (e *yaml_event_t) scalar_style() yaml_scalar_style_t { return yaml_scalar_style_t(e.style) }
-func (e *yaml_event_t) sequence_style() yaml_sequence_style_t { return yaml_sequence_style_t(e.style) }
-func (e *yaml_event_t) mapping_style() yaml_mapping_style_t { return yaml_mapping_style_t(e.style) }
-
-// Nodes
-
-const (
- yaml_NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null.
- yaml_BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false.
- yaml_STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values.
- yaml_INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values.
- yaml_FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values.
- yaml_TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values.
-
- yaml_SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences.
- yaml_MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping.
-
- // Not in original libyaml.
- yaml_BINARY_TAG = "tag:yaml.org,2002:binary"
- yaml_MERGE_TAG = "tag:yaml.org,2002:merge"
-
- yaml_DEFAULT_SCALAR_TAG = yaml_STR_TAG // The default scalar tag is !!str.
- yaml_DEFAULT_SEQUENCE_TAG = yaml_SEQ_TAG // The default sequence tag is !!seq.
- yaml_DEFAULT_MAPPING_TAG = yaml_MAP_TAG // The default mapping tag is !!map.
-)
-
-type yaml_node_type_t int
-
-// Node types.
-const (
- // An empty node.
- yaml_NO_NODE yaml_node_type_t = iota
-
- yaml_SCALAR_NODE // A scalar node.
- yaml_SEQUENCE_NODE // A sequence node.
- yaml_MAPPING_NODE // A mapping node.
-)
-
-// An element of a sequence node.
-type yaml_node_item_t int
-
-// An element of a mapping node.
-type yaml_node_pair_t struct {
- key int // The key of the element.
- value int // The value of the element.
-}
-
-// The node structure.
-type yaml_node_t struct {
- typ yaml_node_type_t // The node type.
- tag []byte // The node tag.
-
- // The node data.
-
- // The scalar parameters (for yaml_SCALAR_NODE).
- scalar struct {
- value []byte // The scalar value.
- length int // The length of the scalar value.
- style yaml_scalar_style_t // The scalar style.
- }
-
- // The sequence parameters (for YAML_SEQUENCE_NODE).
- sequence struct {
- items_data []yaml_node_item_t // The stack of sequence items.
- style yaml_sequence_style_t // The sequence style.
- }
-
- // The mapping parameters (for yaml_MAPPING_NODE).
- mapping struct {
- pairs_data []yaml_node_pair_t // The stack of mapping pairs (key, value).
- pairs_start *yaml_node_pair_t // The beginning of the stack.
- pairs_end *yaml_node_pair_t // The end of the stack.
- pairs_top *yaml_node_pair_t // The top of the stack.
- style yaml_mapping_style_t // The mapping style.
- }
-
- start_mark yaml_mark_t // The beginning of the node.
- end_mark yaml_mark_t // The end of the node.
-
-}
-
-// The document structure.
-type yaml_document_t struct {
-
- // The document nodes.
- nodes []yaml_node_t
-
- // The version directive.
- version_directive *yaml_version_directive_t
-
- // The list of tag directives.
- tag_directives_data []yaml_tag_directive_t
- tag_directives_start int // The beginning of the tag directives list.
- tag_directives_end int // The end of the tag directives list.
-
- start_implicit int // Is the document start indicator implicit?
- end_implicit int // Is the document end indicator implicit?
-
- // The start/end of the document.
- start_mark, end_mark yaml_mark_t
-}
-
-// The prototype of a read handler.
-//
-// The read handler is called when the parser needs to read more bytes from the
-// source. The handler should write not more than size bytes to the buffer.
-// The number of written bytes should be set to the size_read variable.
-//
-// [in,out] data A pointer to an application data specified by
-// yaml_parser_set_input().
-// [out] buffer The buffer to write the data from the source.
-// [in] size The size of the buffer.
-// [out] size_read The actual number of bytes read from the source.
-//
-// On success, the handler should return 1. If the handler failed,
-// the returned value should be 0. On EOF, the handler should set the
-// size_read to 0 and return 1.
-type yaml_read_handler_t func(parser *yaml_parser_t, buffer []byte) (n int, err error)
-
-// This structure holds information about a potential simple key.
-type yaml_simple_key_t struct {
- possible bool // Is a simple key possible?
- required bool // Is a simple key required?
- token_number int // The number of the token.
- mark yaml_mark_t // The position mark.
-}
-
-// The states of the parser.
-type yaml_parser_state_t int
-
-const (
- yaml_PARSE_STREAM_START_STATE yaml_parser_state_t = iota
-
- yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document.
- yaml_PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START.
- yaml_PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_PARSE_BLOCK_NODE_STATE // Expect a block node.
- yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE // Expect a block node or indentless sequence.
- yaml_PARSE_FLOW_NODE_STATE // Expect a flow node.
- yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence.
- yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence.
- yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence.
- yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key.
- yaml_PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value.
- yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping.
- yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry.
- yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping.
- yaml_PARSE_END_STATE // Expect nothing.
-)
-
-func (ps yaml_parser_state_t) String() string {
- switch ps {
- case yaml_PARSE_STREAM_START_STATE:
- return "yaml_PARSE_STREAM_START_STATE"
- case yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE:
- return "yaml_PARSE_IMPLICIT_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_START_STATE:
- return "yaml_PARSE_DOCUMENT_START_STATE"
- case yaml_PARSE_DOCUMENT_CONTENT_STATE:
- return "yaml_PARSE_DOCUMENT_CONTENT_STATE"
- case yaml_PARSE_DOCUMENT_END_STATE:
- return "yaml_PARSE_DOCUMENT_END_STATE"
- case yaml_PARSE_BLOCK_NODE_STATE:
- return "yaml_PARSE_BLOCK_NODE_STATE"
- case yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE:
- return "yaml_PARSE_BLOCK_NODE_OR_INDENTLESS_SEQUENCE_STATE"
- case yaml_PARSE_FLOW_NODE_STATE:
- return "yaml_PARSE_FLOW_NODE_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_BLOCK_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_KEY_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_KEY_STATE"
- case yaml_PARSE_BLOCK_MAPPING_VALUE_STATE:
- return "yaml_PARSE_BLOCK_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE:
- return "yaml_PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE"
- case yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_FIRST_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_KEY_STATE:
- return "yaml_PARSE_FLOW_MAPPING_KEY_STATE"
- case yaml_PARSE_FLOW_MAPPING_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_VALUE_STATE"
- case yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE:
- return "yaml_PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE"
- case yaml_PARSE_END_STATE:
- return "yaml_PARSE_END_STATE"
- }
- return ""
-}
-
-// This structure holds aliases data.
-type yaml_alias_data_t struct {
- anchor []byte // The anchor.
- index int // The node id.
- mark yaml_mark_t // The anchor mark.
-}
-
-// The parser structure.
-//
-// All members are internal. Manage the structure using the
-// yaml_parser_ family of functions.
-type yaml_parser_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
-
- problem string // Error description.
-
- // The byte about which the problem occured.
- problem_offset int
- problem_value int
- problem_mark yaml_mark_t
-
- // The error context.
- context string
- context_mark yaml_mark_t
-
- // Reader stuff
-
- read_handler yaml_read_handler_t // Read handler.
-
- input_file io.Reader // File input data.
- input []byte // String input data.
- input_pos int
-
- eof bool // EOF flag
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- unread int // The number of unread characters in the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The input encoding.
-
- offset int // The offset of the current position (in bytes).
- mark yaml_mark_t // The mark of the current position.
-
- // Scanner stuff
-
- stream_start_produced bool // Have we started to scan the input stream?
- stream_end_produced bool // Have we reached the end of the input stream?
-
- flow_level int // The number of unclosed '[' and '{' indicators.
-
- tokens []yaml_token_t // The tokens queue.
- tokens_head int // The head of the tokens queue.
- tokens_parsed int // The number of tokens fetched from the queue.
- token_available bool // Does the tokens queue contain a token ready for dequeueing.
-
- indent int // The current indentation level.
- indents []int // The indentation levels stack.
-
- simple_key_allowed bool // May a simple key occur at the current position?
- simple_keys []yaml_simple_key_t // The stack of simple keys.
-
- // Parser stuff
-
- state yaml_parser_state_t // The current parser state.
- states []yaml_parser_state_t // The parser states stack.
- marks []yaml_mark_t // The stack of marks.
- tag_directives []yaml_tag_directive_t // The list of TAG directives.
-
- // Dumper stuff
-
- aliases []yaml_alias_data_t // The alias data.
-
- document *yaml_document_t // The currently parsed document.
-}
-
-// Emitter Definitions
-
-// The prototype of a write handler.
-//
-// The write handler is called when the emitter needs to flush the accumulated
-// characters to the output. The handler should write @a size bytes of the
-// @a buffer to the output.
-//
-// @param[in,out] data A pointer to an application data specified by
-// yaml_emitter_set_output().
-// @param[in] buffer The buffer with bytes to be written.
-// @param[in] size The size of the buffer.
-//
-// @returns On success, the handler should return @c 1. If the handler failed,
-// the returned value should be @c 0.
-//
-type yaml_write_handler_t func(emitter *yaml_emitter_t, buffer []byte) error
-
-type yaml_emitter_state_t int
-
-// The emitter states.
-const (
- // Expect STREAM-START.
- yaml_EMIT_STREAM_START_STATE yaml_emitter_state_t = iota
-
- yaml_EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END.
- yaml_EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document.
- yaml_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END.
- yaml_EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence.
- yaml_EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence.
- yaml_EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping.
- yaml_EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping.
- yaml_EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence.
- yaml_EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence.
- yaml_EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping.
- yaml_EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping.
- yaml_EMIT_END_STATE // Expect nothing.
-)
-
-// The emitter structure.
-//
-// All members are internal. Manage the structure using the @c yaml_emitter_
-// family of functions.
-type yaml_emitter_t struct {
-
- // Error handling
-
- error yaml_error_type_t // Error type.
- problem string // Error description.
-
- // Writer stuff
-
- write_handler yaml_write_handler_t // Write handler.
-
- output_buffer *[]byte // String output data.
- output_file io.Writer // File output data.
-
- buffer []byte // The working buffer.
- buffer_pos int // The current position of the buffer.
-
- raw_buffer []byte // The raw buffer.
- raw_buffer_pos int // The current position of the buffer.
-
- encoding yaml_encoding_t // The stream encoding.
-
- // Emitter stuff
-
- canonical bool // If the output is in the canonical style?
- best_indent int // The number of indentation spaces.
- best_width int // The preferred width of the output lines.
- unicode bool // Allow unescaped non-ASCII characters?
- line_break yaml_break_t // The preferred line break.
-
- state yaml_emitter_state_t // The current emitter state.
- states []yaml_emitter_state_t // The stack of states.
-
- events []yaml_event_t // The event queue.
- events_head int // The head of the event queue.
-
- indents []int // The stack of indentation levels.
-
- tag_directives []yaml_tag_directive_t // The list of tag directives.
-
- indent int // The current indentation level.
-
- flow_level int // The current flow level.
-
- root_context bool // Is it the document root context?
- sequence_context bool // Is it a sequence context?
- mapping_context bool // Is it a mapping context?
- simple_key_context bool // Is it a simple mapping key context?
-
- line int // The current line.
- column int // The current column.
- whitespace bool // If the last character was a whitespace?
- indention bool // If the last character was an indentation character (' ', '-', '?', ':')?
- open_ended bool // If an explicit document end is required?
-
- // Anchor analysis.
- anchor_data struct {
- anchor []byte // The anchor value.
- alias bool // Is it an alias?
- }
-
- // Tag analysis.
- tag_data struct {
- handle []byte // The tag handle.
- suffix []byte // The tag suffix.
- }
-
- // Scalar analysis.
- scalar_data struct {
- value []byte // The scalar value.
- multiline bool // Does the scalar contain line breaks?
- flow_plain_allowed bool // Can the scalar be expessed in the flow plain style?
- block_plain_allowed bool // Can the scalar be expressed in the block plain style?
- single_quoted_allowed bool // Can the scalar be expressed in the single quoted style?
- block_allowed bool // Can the scalar be expressed in the literal or folded styles?
- style yaml_scalar_style_t // The output style.
- }
-
- // Dumper stuff
-
- opened bool // If the stream was already opened?
- closed bool // If the stream was already closed?
-
- // The information associated with the document nodes.
- anchors *struct {
- references int // The number of references.
- anchor int // The anchor id.
- serialized bool // If the node has been emitted?
- }
-
- last_anchor_id int // The last assigned anchor id.
-
- document *yaml_document_t // The currently emitted document.
-}
diff --git a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go b/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
deleted file mode 100644
index 8110ce3..0000000
--- a/src/cfdnsupdater/Godeps/_workspace/src/gopkg.in/yaml.v2/yamlprivateh.go
+++ /dev/null
@@ -1,173 +0,0 @@
-package yaml
-
-const (
- // The size of the input raw buffer.
- input_raw_buffer_size = 512
-
- // The size of the input buffer.
- // It should be possible to decode the whole raw buffer.
- input_buffer_size = input_raw_buffer_size * 3
-
- // The size of the output buffer.
- output_buffer_size = 128
-
- // The size of the output raw buffer.
- // It should be possible to encode the whole output buffer.
- output_raw_buffer_size = (output_buffer_size*2 + 2)
-
- // The size of other stacks and queues.
- initial_stack_size = 16
- initial_queue_size = 16
- initial_string_size = 16
-)
-
-// Check if the character at the specified position is an alphabetical
-// character, a digit, '_', or '-'.
-func is_alpha(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-'
-}
-
-// Check if the character at the specified position is a digit.
-func is_digit(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9'
-}
-
-// Get the value of a digit.
-func as_digit(b []byte, i int) int {
- return int(b[i]) - '0'
-}
-
-// Check if the character at the specified position is a hex-digit.
-func is_hex(b []byte, i int) bool {
- return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || b[i] >= 'a' && b[i] <= 'f'
-}
-
-// Get the value of a hex-digit.
-func as_hex(b []byte, i int) int {
- bi := b[i]
- if bi >= 'A' && bi <= 'F' {
- return int(bi) - 'A' + 10
- }
- if bi >= 'a' && bi <= 'f' {
- return int(bi) - 'a' + 10
- }
- return int(bi) - '0'
-}
-
-// Check if the character is ASCII.
-func is_ascii(b []byte, i int) bool {
- return b[i] <= 0x7F
-}
-
-// Check if the character at the start of the buffer can be printed unescaped.
-func is_printable(b []byte, i int) bool {
- return ((b[i] == 0x0A) || // . == #x0A
- (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E
- (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF
- (b[i] > 0xC2 && b[i] < 0xED) ||
- (b[i] == 0xED && b[i+1] < 0xA0) ||
- (b[i] == 0xEE) ||
- (b[i] == 0xEF && // #xE000 <= . <= #xFFFD
- !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF
- !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF))))
-}
-
-// Check if the character at the specified position is NUL.
-func is_z(b []byte, i int) bool {
- return b[i] == 0x00
-}
-
-// Check if the beginning of the buffer is a BOM.
-func is_bom(b []byte, i int) bool {
- return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF
-}
-
-// Check if the character at the specified position is space.
-func is_space(b []byte, i int) bool {
- return b[i] == ' '
-}
-
-// Check if the character at the specified position is tab.
-func is_tab(b []byte, i int) bool {
- return b[i] == '\t'
-}
-
-// Check if the character at the specified position is blank (space or tab).
-func is_blank(b []byte, i int) bool {
- //return is_space(b, i) || is_tab(b, i)
- return b[i] == ' ' || b[i] == '\t'
-}
-
-// Check if the character at the specified position is a line break.
-func is_break(b []byte, i int) bool {
- return (b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029)
-}
-
-func is_crlf(b []byte, i int) bool {
- return b[i] == '\r' && b[i+1] == '\n'
-}
-
-// Check if the character is a line break or NUL.
-func is_breakz(b []byte, i int) bool {
- //return is_break(b, i) || is_z(b, i)
- return ( // is_break:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- // is_z:
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, or NUL.
-func is_spacez(b []byte, i int) bool {
- //return is_space(b, i) || is_breakz(b, i)
- return ( // is_space:
- b[i] == ' ' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Check if the character is a line break, space, tab, or NUL.
-func is_blankz(b []byte, i int) bool {
- //return is_blank(b, i) || is_breakz(b, i)
- return ( // is_blank:
- b[i] == ' ' || b[i] == '\t' ||
- // is_breakz:
- b[i] == '\r' || // CR (#xD)
- b[i] == '\n' || // LF (#xA)
- b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028)
- b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029)
- b[i] == 0)
-}
-
-// Determine the width of the character.
-func width(b byte) int {
- // Don't replace these by a switch without first
- // confirming that it is being inlined.
- if b&0x80 == 0x00 {
- return 1
- }
- if b&0xE0 == 0xC0 {
- return 2
- }
- if b&0xF0 == 0xE0 {
- return 3
- }
- if b&0xF8 == 0xF0 {
- return 4
- }
- return 0
-
-}