diff --git a/kadai3-2/hioki-daichi/.gitignore b/kadai3-2/hioki-daichi/.gitignore
new file mode 100644
index 0000000..711ebad
--- /dev/null
+++ b/kadai3-2/hioki-daichi/.gitignore
@@ -0,0 +1,3 @@
+/parallel-download
+/coverage/
+/vendor/
diff --git a/kadai3-2/hioki-daichi/Gopkg.lock b/kadai3-2/hioki-daichi/Gopkg.lock
new file mode 100644
index 0000000..ba074e2
--- /dev/null
+++ b/kadai3-2/hioki-daichi/Gopkg.lock
@@ -0,0 +1,25 @@
+# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
+
+
+[[projects]]
+ branch = "master"
+ digest = "1:76ee51c3f468493aff39dbacc401e8831fbb765104cbf613b89bef01cf4bad70"
+ name = "golang.org/x/net"
+ packages = ["context"]
+ pruneopts = "UT"
+ revision = "4dfa2610cdf3b287375bbba5b8f2a14d3b01d8de"
+
+[[projects]]
+ branch = "master"
+ digest = "1:39ebcc2b11457b703ae9ee2e8cca0f68df21969c6102cb3b705f76cca0ea0239"
+ name = "golang.org/x/sync"
+ packages = ["errgroup"]
+ pruneopts = "UT"
+ revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca"
+
+[solve-meta]
+ analyzer-name = "dep"
+ analyzer-version = 1
+ input-imports = ["golang.org/x/sync/errgroup"]
+ solver-name = "gps-cdcl"
+ solver-version = 1
diff --git a/kadai3-2/hioki-daichi/Gopkg.toml b/kadai3-2/hioki-daichi/Gopkg.toml
new file mode 100644
index 0000000..d7072c2
--- /dev/null
+++ b/kadai3-2/hioki-daichi/Gopkg.toml
@@ -0,0 +1,30 @@
+# Gopkg.toml example
+#
+# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html
+# for detailed Gopkg.toml documentation.
+#
+# required = ["github.com/user/thing/cmd/thing"]
+# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
+#
+# [[constraint]]
+# name = "github.com/user/project"
+# version = "1.0.0"
+#
+# [[constraint]]
+# name = "github.com/user/project2"
+# branch = "dev"
+# source = "github.com/myfork/project2"
+#
+# [[override]]
+# name = "github.com/x/y"
+# version = "2.4.0"
+#
+# [prune]
+# non-go = false
+# go-tests = true
+# unused-packages = true
+
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/kadai3-2/hioki-daichi/LICENSE b/kadai3-2/hioki-daichi/LICENSE
new file mode 100644
index 0000000..218ee44
--- /dev/null
+++ b/kadai3-2/hioki-daichi/LICENSE
@@ -0,0 +1,2 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
diff --git a/kadai3-2/hioki-daichi/Makefile b/kadai3-2/hioki-daichi/Makefile
new file mode 100644
index 0000000..e4365c4
--- /dev/null
+++ b/kadai3-2/hioki-daichi/Makefile
@@ -0,0 +1,25 @@
+GOCMD=go
+GOBUILD=$(GOCMD) build
+GOCLEAN=$(GOCMD) clean
+GOTEST=$(GOCMD) test
+GOTOOL=$(GOCMD) tool
+GODOCCMD=godoc
+GODOCPORT=6060
+BINARY_NAME=parallel-download
+
+all: test build
+build:
+ dep ensure
+ $(GOBUILD) -o $(BINARY_NAME) -v
+test:
+ $(GOTEST) ./...
+cov:
+ $(GOTEST) ./... -race -coverprofile=coverage/c.out -covermode=atomic
+ $(GOTOOL) cover -html=coverage/c.out -o coverage/index.html
+ open coverage/index.html
+clean:
+ $(GOCLEAN)
+ rm -f $(BINARY_NAME)
+doc:
+ (sleep 1; open http://localhost:$(GODOCPORT)/pkg/github.com/gopherdojo/dojo3) &
+ $(GODOCCMD) -http ":$(GODOCPORT)"
diff --git a/kadai3-2/hioki-daichi/README.md b/kadai3-2/hioki-daichi/README.md
new file mode 100644
index 0000000..131937a
--- /dev/null
+++ b/kadai3-2/hioki-daichi/README.md
@@ -0,0 +1,80 @@
+# parallel-download
+
+## Overview
+
+`parallel-download` is a command that can download the resources on the web in parallel.
+
+Available options are below.
+
+| Option | Description |
+| --- | --- |
+| `-p` | Download files in parallel according to the specified number. (default 8) |
+| `-o` | Save the downloaded file in the specified path. (Overwrite if duplicates.) |
+| `-t` | Terminate when the specified value has elapsed since download started. (default 30s) |
+
+## How to develop
+
+### 1. Install packages
+
+Execute `$ dep ensure` to install dependent packages.
+
+### 2. Start a dummy server
+
+Execute `$ ./bin/dummy_server.go` to start a dummy server that returns a Gopher image.
+
+```
+$ ./bin/dummy_server.go
+--------------------------------------------------------------------------------
+# Endpoint
+
+ GET /foo.png // Get a gopher image
+
+# Command-line options**
+
+ -failure-rate int
+ Probability to return InternalServerError.
+ -max-delay duration
+ Maximum time delay randomly applied from receiving a request until returning a response. (default 1s)
+ -port int
+ Port on which the dummy server listens. (default 8080)
+--------------------------------------------------------------------------------
+2018/09/30 00:25:05 Server starting on http://localhost:8080
+```
+
+### 3. Execute
+
+Execute the command with specifying the Gopher image endpoint of the dummy server (and some options).
+
+```
+$ go run main.go -p=3 -t=3s -o=bar.png http://localhost:8080/foo.png
+start HEAD request to get Content-Length
+got: Accept-Ranges: bytes
+got: Content-Length: 169406
+start GET request with header: "Range: bytes=112936-169405"
+start GET request with header: "Range: bytes=56468-112935"
+start GET request with header: "Range: bytes=0-56467"
+downloaded: "/var/folders/f8/1n0bk4tj4ll6clyj868k_nqh0000gn/T/parallel-download301219462/f4ec179a35"
+downloaded: "/var/folders/f8/1n0bk4tj4ll6clyj868k_nqh0000gn/T/parallel-download301219462/f8d59617fb"
+downloaded: "/var/folders/f8/1n0bk4tj4ll6clyj868k_nqh0000gn/T/parallel-download301219462/9e9b203414"
+concatenate downloaded files to tempfile: "/var/folders/f8/1n0bk4tj4ll6clyj868k_nqh0000gn/T/parallel-download301219462/814ff17dbf"
+rename "/var/folders/f8/1n0bk4tj4ll6clyj868k_nqh0000gn/T/parallel-download301219462/814ff17dbf" to "bar.png"
+completed: "bar.png"
+```
+
+## How to run the test
+
+```shell
+$ make test
+```
+
+## How to read GoDoc
+
+```shell
+$ make doc
+```
+
+## How to see code coverage
+
+```shell
+$ make cov
+```
diff --git a/kadai3-2/hioki-daichi/bin/dummy_server.go b/kadai3-2/hioki-daichi/bin/dummy_server.go
new file mode 100755
index 0000000..fa1350f
--- /dev/null
+++ b/kadai3-2/hioki-daichi/bin/dummy_server.go
@@ -0,0 +1,93 @@
+//usr/bin/env go run $0 $@ ; exit
+
+package main
+
+import (
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "os"
+ "strconv"
+ "strings"
+ "time"
+)
+
+func main() {
+ rand.Seed(time.Now().UnixNano())
+
+ flg := flag.NewFlagSet("test", flag.ExitOnError)
+
+ port := flg.Int("port", 8080, "Port on which the dummy server listens.")
+ failureRate := flg.Int("failure-rate", 0, "Probability to return InternalServerError.")
+ maxDelay := flg.Duration("max-delay", time.Second, "Maximum time delay randomly applied from receiving a request until returning a response.")
+
+ flg.Parse(os.Args[1:])
+
+ fmt.Print(`--------------------------------------------------------------------------------
+# Endpoint
+
+ GET /foo.png // Get a gopher image
+
+# Command-line options**
+
+`)
+ flg.PrintDefaults()
+ fmt.Println("--------------------------------------------------------------------------------")
+
+ contents := func() string {
+ b, err := ioutil.ReadFile("./downloading/testdata/foo.png")
+ if err != nil {
+ panic(err)
+ }
+ return string(b)
+ }()
+
+ handler := func(w http.ResponseWriter, req *http.Request) {
+ if req.Method != "HEAD" {
+ time.Sleep(time.Duration(rand.Intn(int(*maxDelay))))
+ }
+
+ w.Header().Set("Accept-Ranges", "bytes")
+
+ var body string
+ var statusCode int
+ if req.Method == "GET" && rand.Intn(100) < *failureRate {
+ body = "Internal Server Error"
+ statusCode = http.StatusInternalServerError
+ } else {
+ body = func(req *http.Request) string {
+ rangeHeader := req.Header.Get("Range") // e.g. "bytes=0-99"
+ if rangeHeader == "" {
+ return contents
+ }
+ c := strings.Split(strings.Split(rangeHeader, "=")[1], "-")
+ min, _ := strconv.Atoi(c[0])
+ max, _ := strconv.Atoi(c[1])
+ return contents[min : max+1]
+ }(req)
+ statusCode = http.StatusPartialContent
+ }
+
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(body)))
+ w.WriteHeader(statusCode)
+ fmt.Fprint(w, body)
+
+ log.Printf("%s %s %d %s\n", req.Method, req.RequestURI, statusCode, req.Header.Get("Range"))
+ }
+
+ http.HandleFunc("/foo.png", handler)
+
+ if *failureRate > 0 {
+ log.Printf("Server starting with a failure rate of %d%% on http://localhost:%d\n", *failureRate, *port)
+ } else {
+ log.Printf("Server starting on http://localhost:%d\n", *port)
+ }
+
+ err := http.ListenAndServe(fmt.Sprintf(":%d", *port), nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
diff --git a/kadai3-2/hioki-daichi/coverage.html b/kadai3-2/hioki-daichi/coverage.html
new file mode 100644
index 0000000..1e7f8a2
--- /dev/null
+++ b/kadai3-2/hioki-daichi/coverage.html
@@ -0,0 +1,526 @@
+
+
+
+
+
+
+
/*
+Package downloading provides download function.
+*/
+package downloading
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/opt"
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/termination"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ errResponseDoesNotIncludeAcceptRangesHeader = errors.New("response does not include Accept-Ranges header")
+ errValueOfAcceptRangesHeaderIsNotBytes = errors.New("the value of Accept-Ranges header is not bytes")
+ errNoContent = errors.New("no content")
+)
+
+// Downloader has the information for the download.
+type Downloader struct {
+ outStream io.Writer
+ url *url.URL
+ parallelism int
+ output string
+ timeout time.Duration
+}
+
+// NewDownloader generates Downloader based on Options.
+func NewDownloader(w io.Writer, opts *opt.Options) *Downloader {
+ return &Downloader{
+ outStream: w,
+ url: opts.URL,
+ parallelism: opts.Parallelism,
+ output: opts.Output,
+ timeout: opts.Timeout,
+ }
+}
+
+// Download performs parallel download.
+func (d *Downloader) Download(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+
+ contentLength, err := d.getContentLength(ctx)
+ if err != nil {
+ return err
+ }
+
+ rangeHeaders := d.toRangeHeaders(contentLength)
+
+ tempDir, err := ioutil.TempDir("", "parallel-download")
+ if err != nil {
+ return err
+ }
+ clean := func() { os.RemoveAll(tempDir) }
+ defer clean()
+ termination.CleanFunc(clean)
+
+ filenames, err := d.parallelDownload(ctx, rangeHeaders, tempDir)
+ if err != nil {
+ return err
+ }
+
+ filename, err := d.concat(filenames, tempDir)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(d.outStream, "rename %q to %q\n", filename, d.output)
+
+ err = os.Rename(filename, d.output)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(d.outStream, "completed: %q\n", d.output)
+
+ return nil
+}
+
+// getContentLength returns the value of Content-Length received by making a HEAD request.
+func (d *Downloader) getContentLength(ctx context.Context) (int, error) {
+ fmt.Fprintf(d.outStream, "start HEAD request to get Content-Length\n")
+
+ req, err := http.NewRequest("HEAD", d.url.String(), nil)
+ if err != nil {
+ return 0, err
+ }
+ req = req.WithContext(ctx)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ err = d.validateAcceptRangesHeader(resp)
+ if err != nil {
+ return 0, err
+ }
+
+ contentLength := int(resp.ContentLength)
+
+ fmt.Fprintf(d.outStream, "got: Content-Length: %d\n", contentLength)
+
+ if contentLength < 1 {
+ return 0, errNoContent
+ }
+
+ return contentLength, nil
+}
+
+// validateAcceptRangesHeader validates the following.
+// - The presence of an Accept-Ranges header
+// - The value of the Accept-Ranges header is "bytes"
+func (d *Downloader) validateAcceptRangesHeader(resp *http.Response) error {
+ acceptRangesHeader := resp.Header.Get("Accept-Ranges")
+
+ fmt.Fprintf(d.outStream, "got: Accept-Ranges: %s\n", acceptRangesHeader)
+
+ if acceptRangesHeader == "" {
+ return errResponseDoesNotIncludeAcceptRangesHeader
+ }
+
+ if acceptRangesHeader != "bytes" {
+ return errValueOfAcceptRangesHeaderIsNotBytes
+ }
+
+ return nil
+}
+
+// toRangeHeaders converts the value of Content-Length to the value of Range header.
+func (d *Downloader) toRangeHeaders(contentLength int) []string {
+ parallelism := d.parallelism
+
+ // 1 <= parallelism <= Content-Length
+ if parallelism < 1 {
+ parallelism = 1
+ }
+ if contentLength < parallelism {
+ parallelism = contentLength
+ }
+
+ unitLength := contentLength / parallelism
+ remainingLength := contentLength % parallelism
+
+ rangeHeaders := make([]string, 0)
+
+ cntr := 0
+ for n := parallelism; n > 0; n-- {
+ min := cntr
+ max := cntr + unitLength - 1
+
+ // Add the remaining length to the last chunk
+ if n == 1 && remainingLength != 0 {
+ max += remainingLength
+ }
+
+ rangeHeaders = append(rangeHeaders, fmt.Sprintf("bytes=%d-%d", min, max))
+
+ cntr += unitLength
+ }
+
+ return rangeHeaders
+}
+
+// parallelDownload downloads in parallel for each specified rangeHeaders and saves it in the specified dir.
+func (d *Downloader) parallelDownload(ctx context.Context, rangeHeaders []string, dir string) (map[int]string, error) {
+ filenames := map[int]string{}
+
+ filenameCh := make(chan map[int]string)
+ errCh := make(chan error)
+
+ for i, rangeHeader := range rangeHeaders {
+ go d.partialDownloadAndSendToChannel(ctx, i, rangeHeader, filenameCh, errCh, dir)
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ var mu sync.Mutex
+ for i := 0; i < len(rangeHeaders); i++ {
+ eg.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case m := <-filenameCh:
+ for k, v := range m {
+ mu.Lock()
+ filenames[k] = v
+ mu.Unlock()
+ }
+ return nil
+ case err := <-errCh:
+ return err
+ }
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return filenames, nil
+}
+
+// partialDownloadAndSendToChannel performs partialDownload and sends it to the appropriate channel according to the result.
+func (d *Downloader) partialDownloadAndSendToChannel(ctx context.Context, i int, rangeHeader string, filenameCh chan<- map[int]string, errCh chan<- error, dir string) {
+ filename, err := d.partialDownload(ctx, rangeHeader, dir)
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ filenameCh <- map[int]string{i: filename}
+
+ return
+}
+
+// partialDownload sends a partial request with the specified rangeHeader,
+// and saves the response body in the file under the specified dir,
+// and returns the filename.
+func (d *Downloader) partialDownload(ctx context.Context, rangeHeader string, dir string) (string, error) {
+ req, err := http.NewRequest("GET", d.url.String(), nil)
+ if err != nil {
+ return "", err
+ }
+ req = req.WithContext(ctx)
+
+ req.Header.Set("Range", rangeHeader)
+
+ fmt.Fprintf(d.outStream, "start GET request with header: \"Range: %s\"\n", rangeHeader)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusPartialContent {
+ return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ fp, err := os.Create(path.Join(dir, randomHexStr()))
+ if err != nil {
+ return "", err
+ }
+
+ _, err = io.Copy(fp, resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ filename := fp.Name()
+
+ fmt.Fprintf(d.outStream, "downloaded: %q\n", filename)
+
+ return filename, nil
+}
+
+// concat concatenates the files in order based on the mapping of the specified filenames,
+// and creates the concatenated file under the specified dir,
+// and returns the filename.
+func (d *Downloader) concat(filenames map[int]string, dir string) (string, error) {
+ fp, err := os.Create(filepath.Join(dir, randomHexStr()))
+ if err != nil {
+ return "", err
+ }
+ defer fp.Close()
+
+ filename := fp.Name()
+
+ fmt.Fprintf(d.outStream, "concatenate downloaded files to tempfile: %q\n", filename)
+
+ for i := 0; i < len(filenames); i++ {
+ src, err := os.Open(filenames[i])
+ if err != nil {
+ return "", err
+ }
+
+ _, err = io.Copy(fp, src)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return filename, nil
+}
+
+// randomHexStr returns a random hex string of length 10.
+// 10 is a length which does not duplicate enough.
+func randomHexStr() string {
+ b := make([]byte, 5)
+ _, err := rand.Read(b)
+ if err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%x", b)
+}
+
+
+
/*
+Package opt deals with CLI options.
+*/
+package opt
+
+import (
+ "errors"
+ "flag"
+ "net/url"
+ "path"
+ "time"
+)
+
+var errExist = errors.New("file already exists")
+
+// Options has the options required for parallel-download.
+type Options struct {
+ Parallelism int
+ Output string
+ URL *url.URL
+ Timeout time.Duration
+}
+
+// Parse parses args and returns Options.
+func Parse(args ...string) (*Options, error) {
+ flg := flag.NewFlagSet("parallel-download", flag.ExitOnError)
+
+ parallelism := flg.Int("p", 8, "Download files in parallel according to the specified number.")
+ output := flg.String("o", "", "Save the downloaded file in the specified path. (Overwrite if duplicates.)")
+ timeout := flg.Duration("t", 30*time.Second, "Terminate when the specified value has elapsed since download started.")
+
+ flg.Parse(args)
+
+ u, err := url.ParseRequestURI(flg.Arg(0))
+ if err != nil {
+ return nil, err
+ }
+
+ if *output == "" {
+ _, filename := path.Split(u.Path)
+
+ // Inspired by the --default-page option of wget
+ if filename == "" {
+ filename = "index.html"
+ }
+
+ *output = filename
+ }
+
+ return &Options{
+ Parallelism: *parallelism,
+ Output: *output,
+ URL: u,
+ Timeout: *timeout,
+ }, nil
+}
+
+
+
/*
+Package termination deals with Ctrl+C termination.
+*/
+package termination
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+var cleanFns []func()
+
+// for testing
+var osExit = os.Exit
+
+// Listen listens signals.
+func Listen(ctx context.Context, w io.Writer) (context.Context, func()) {
+ ctx, cancel := context.WithCancel(ctx)
+
+ ch := make(chan os.Signal, 2)
+ signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ <-ch
+ fmt.Fprintln(w, "\rCtrl+C pressed in Terminal")
+ cancel()
+ for _, f := range cleanFns {
+ f()
+ }
+ osExit(0)
+ }()
+
+ return ctx, cancel
+}
+
+// CleanFunc registers clean function.
+func CleanFunc(f func()) {
+ cleanFns = append(cleanFns, f)
+}
+
+
+
+
+
+
diff --git a/kadai3-2/hioki-daichi/downloading/downloading.go b/kadai3-2/hioki-daichi/downloading/downloading.go
new file mode 100644
index 0000000..bc82356
--- /dev/null
+++ b/kadai3-2/hioki-daichi/downloading/downloading.go
@@ -0,0 +1,309 @@
+/*
+Package downloading provides download function.
+*/
+package downloading
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "sync"
+ "time"
+
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/opt"
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/termination"
+ "golang.org/x/sync/errgroup"
+)
+
+var (
+ errResponseDoesNotIncludeAcceptRangesHeader = errors.New("response does not include Accept-Ranges header")
+ errValueOfAcceptRangesHeaderIsNotBytes = errors.New("the value of Accept-Ranges header is not bytes")
+ errNoContent = errors.New("no content")
+)
+
+// Downloader has the information for the download.
+type Downloader struct {
+ outStream io.Writer
+ url *url.URL
+ parallelism int
+ output string
+ timeout time.Duration
+}
+
+// NewDownloader generates Downloader based on Options.
+func NewDownloader(w io.Writer, opts *opt.Options) *Downloader {
+ return &Downloader{
+ outStream: w,
+ url: opts.URL,
+ parallelism: opts.Parallelism,
+ output: opts.Output,
+ timeout: opts.Timeout,
+ }
+}
+
+// Download performs parallel download.
+func (d *Downloader) Download(ctx context.Context) error {
+ ctx, cancel := context.WithTimeout(ctx, d.timeout)
+ defer cancel()
+
+ contentLength, err := d.getContentLength(ctx)
+ if err != nil {
+ return err
+ }
+
+ rangeHeaders := d.toRangeHeaders(contentLength)
+
+ tempDir, err := ioutil.TempDir("", "parallel-download")
+ if err != nil {
+ return err
+ }
+ clean := func() { os.RemoveAll(tempDir) }
+ defer clean()
+ termination.CleanFunc(clean)
+
+ filenames, err := d.parallelDownload(ctx, rangeHeaders, tempDir)
+ if err != nil {
+ return err
+ }
+
+ filename, err := d.concat(filenames, tempDir)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(d.outStream, "rename %q to %q\n", filename, d.output)
+
+ err = os.Rename(filename, d.output)
+ if err != nil {
+ return err
+ }
+
+ fmt.Fprintf(d.outStream, "completed: %q\n", d.output)
+
+ return nil
+}
+
+// getContentLength returns the value of Content-Length received by making a HEAD request.
+func (d *Downloader) getContentLength(ctx context.Context) (int, error) {
+ fmt.Fprintf(d.outStream, "start HEAD request to get Content-Length\n")
+
+ req, err := http.NewRequest("HEAD", d.url.String(), nil)
+ if err != nil {
+ return 0, err
+ }
+ req = req.WithContext(ctx)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return 0, err
+ }
+
+ err = d.validateAcceptRangesHeader(resp)
+ if err != nil {
+ return 0, err
+ }
+
+ contentLength := int(resp.ContentLength)
+
+ fmt.Fprintf(d.outStream, "got: Content-Length: %d\n", contentLength)
+
+ if contentLength < 1 {
+ return 0, errNoContent
+ }
+
+ return contentLength, nil
+}
+
+// validateAcceptRangesHeader validates the following.
+// - The presence of an Accept-Ranges header
+// - The value of the Accept-Ranges header is "bytes"
+func (d *Downloader) validateAcceptRangesHeader(resp *http.Response) error {
+ acceptRangesHeader := resp.Header.Get("Accept-Ranges")
+
+ fmt.Fprintf(d.outStream, "got: Accept-Ranges: %s\n", acceptRangesHeader)
+
+ if acceptRangesHeader == "" {
+ return errResponseDoesNotIncludeAcceptRangesHeader
+ }
+
+ if acceptRangesHeader != "bytes" {
+ return errValueOfAcceptRangesHeaderIsNotBytes
+ }
+
+ return nil
+}
+
+// toRangeHeaders converts the value of Content-Length to the value of Range header.
+func (d *Downloader) toRangeHeaders(contentLength int) []string {
+ parallelism := d.parallelism
+
+ // 1 <= parallelism <= Content-Length
+ if parallelism < 1 {
+ parallelism = 1
+ }
+ if contentLength < parallelism {
+ parallelism = contentLength
+ }
+
+ unitLength := contentLength / parallelism
+ remainingLength := contentLength % parallelism
+
+ rangeHeaders := make([]string, 0)
+
+ cntr := 0
+ for n := parallelism; n > 0; n-- {
+ min := cntr
+ max := cntr + unitLength - 1
+
+ // Add the remaining length to the last chunk
+ if n == 1 && remainingLength != 0 {
+ max += remainingLength
+ }
+
+ rangeHeaders = append(rangeHeaders, fmt.Sprintf("bytes=%d-%d", min, max))
+
+ cntr += unitLength
+ }
+
+ return rangeHeaders
+}
+
+// parallelDownload downloads in parallel for each specified rangeHeaders and saves it in the specified dir.
+func (d *Downloader) parallelDownload(ctx context.Context, rangeHeaders []string, dir string) (map[int]string, error) {
+ filenames := map[int]string{}
+
+ filenameCh := make(chan map[int]string)
+ errCh := make(chan error)
+
+ for i, rangeHeader := range rangeHeaders {
+ go d.partialDownloadAndSendToChannel(ctx, i, rangeHeader, filenameCh, errCh, dir)
+ }
+
+ eg, ctx := errgroup.WithContext(ctx)
+ var mu sync.Mutex
+ for i := 0; i < len(rangeHeaders); i++ {
+ eg.Go(func() error {
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case m := <-filenameCh:
+ for k, v := range m {
+ mu.Lock()
+ filenames[k] = v
+ mu.Unlock()
+ }
+ return nil
+ case err := <-errCh:
+ return err
+ }
+ })
+ }
+
+ if err := eg.Wait(); err != nil {
+ return nil, err
+ }
+
+ return filenames, nil
+}
+
+// partialDownloadAndSendToChannel performs partialDownload and sends it to the appropriate channel according to the result.
+func (d *Downloader) partialDownloadAndSendToChannel(ctx context.Context, i int, rangeHeader string, filenameCh chan<- map[int]string, errCh chan<- error, dir string) {
+ filename, err := d.partialDownload(ctx, rangeHeader, dir)
+ if err != nil {
+ errCh <- err
+ return
+ }
+
+ filenameCh <- map[int]string{i: filename}
+
+ return
+}
+
+// partialDownload sends a partial request with the specified rangeHeader,
+// and saves the response body in the file under the specified dir,
+// and returns the filename.
+func (d *Downloader) partialDownload(ctx context.Context, rangeHeader string, dir string) (string, error) {
+ req, err := http.NewRequest("GET", d.url.String(), nil)
+ if err != nil {
+ return "", err
+ }
+ req = req.WithContext(ctx)
+
+ req.Header.Set("Range", rangeHeader)
+
+ fmt.Fprintf(d.outStream, "start GET request with header: \"Range: %s\"\n", rangeHeader)
+
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ if resp.StatusCode != http.StatusPartialContent {
+ return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
+ }
+
+ fp, err := os.Create(path.Join(dir, randomHexStr()))
+ if err != nil {
+ return "", err
+ }
+
+ _, err = io.Copy(fp, resp.Body)
+ if err != nil {
+ return "", err
+ }
+
+ filename := fp.Name()
+
+ fmt.Fprintf(d.outStream, "downloaded: %q\n", filename)
+
+ return filename, nil
+}
+
+// concat concatenates the files in order based on the mapping of the specified filenames,
+// and creates the concatenated file under the specified dir,
+// and returns the filename.
+func (d *Downloader) concat(filenames map[int]string, dir string) (string, error) {
+ fp, err := os.Create(filepath.Join(dir, randomHexStr()))
+ if err != nil {
+ return "", err
+ }
+ defer fp.Close()
+
+ filename := fp.Name()
+
+ fmt.Fprintf(d.outStream, "concatenate downloaded files to tempfile: %q\n", filename)
+
+ for i := 0; i < len(filenames); i++ {
+ src, err := os.Open(filenames[i])
+ if err != nil {
+ return "", err
+ }
+
+ _, err = io.Copy(fp, src)
+ if err != nil {
+ return "", err
+ }
+ }
+
+ return filename, nil
+}
+
+// randomHexStr returns a random hex string of length 10.
+// 10 is a length which does not duplicate enough.
+func randomHexStr() string {
+ b := make([]byte, 5)
+ _, err := rand.Read(b)
+ if err != nil {
+ panic(err)
+ }
+ return fmt.Sprintf("%x", b)
+}
diff --git a/kadai3-2/hioki-daichi/downloading/downloading_test.go b/kadai3-2/hioki-daichi/downloading/downloading_test.go
new file mode 100644
index 0000000..8882343
--- /dev/null
+++ b/kadai3-2/hioki-daichi/downloading/downloading_test.go
@@ -0,0 +1,308 @@
+package downloading
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "regexp"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/opt"
+)
+
+var registeredTestdatum = map[string]string{
+ "foo.png": readTestdata("foo.png"),
+ "a.txt": readTestdata("a.txt"),
+ "empty.txt": readTestdata("empty.txt"),
+}
+
+var currentTestdataName string
+
+func TestDownloading_Download_Success(t *testing.T) {
+ cases := map[string]struct {
+ parallelism int
+ currentTestdataName string
+ }{
+ "normal": {parallelism: 3, currentTestdataName: "foo.png"},
+ "parallelism < 1": {parallelism: 0, currentTestdataName: "a.txt"},
+ "contentLength < parallelism": {parallelism: 4, currentTestdataName: "a.txt"},
+ }
+
+ for n, c := range cases {
+ c := c
+ t.Run(n, func(t *testing.T) {
+ parallelism := c.parallelism
+ currentTestdataName = c.currentTestdataName
+
+ output, clean := createTempOutput(t)
+ defer clean()
+
+ ts, clean := newTestServer(t, normalHandler)
+ defer clean()
+
+ err := newDownloader(t, output, ts, parallelism).Download(context.Background())
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+ })
+ }
+
+}
+
+func TestDownloading_Download_NoContent(t *testing.T) {
+ expected := errNoContent
+
+ currentTestdataName = "empty.txt"
+
+ output, clean := createTempOutput(t)
+ defer clean()
+
+ ts, clean := newTestServer(t, normalHandler)
+ defer clean()
+
+ actual := newDownloader(t, output, ts, 1).Download(context.Background())
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
+
+func TestDownloading_Download_AcceptRangesHeaderNotFound(t *testing.T) {
+ expected := errResponseDoesNotIncludeAcceptRangesHeader
+
+ output, clean := createTempOutput(t)
+ defer clean()
+
+ ts, clean := newTestServer(t, func(t *testing.T, w http.ResponseWriter, r *http.Request) { fmt.Fprint(w, "") })
+ defer clean()
+
+ actual := newDownloader(t, output, ts, 8).Download(context.Background())
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
+
+func TestDownloading_Download_AcceptRangesHeaderSupportsBytesOnly(t *testing.T) {
+ expected := errValueOfAcceptRangesHeaderIsNotBytes
+
+ output, clean := createTempOutput(t)
+ defer clean()
+
+ ts, clean := newTestServer(t, func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Accept-Ranges", "none")
+ fmt.Fprint(w, "")
+ })
+ defer clean()
+
+ actual := newDownloader(t, output, ts, 8).Download(context.Background())
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
+
+func TestDownloading_Download_BadRequest(t *testing.T) {
+ expected := "unexpected status code: 400"
+
+ output, clean := createTempOutput(t)
+ defer clean()
+
+ ts, clean := newTestServer(t, func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Accept-Ranges", "bytes")
+ w.WriteHeader(http.StatusBadRequest)
+ fmt.Fprint(w, "bad request")
+ })
+ defer clean()
+
+ err := newDownloader(t, output, ts, 8).Download(context.Background())
+ if err == nil {
+ t.Fatalf("unexpectedly err is nil")
+ }
+ actual := err.Error()
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
+
+func TestDownloading_Download_RenameError(t *testing.T) {
+ currentTestdataName = "foo.png"
+
+ ts, clean := newTestServer(t, normalHandler)
+ defer clean()
+
+ err := newDownloader(t, "/non/existent/path", ts, 1).Download(context.Background())
+ if err == nil {
+ t.Fatal("unexpectedly err is nil")
+ }
+
+ if !regexp.MustCompile("/non/existent/path: no such file or directory").MatchString(err.Error()) {
+ t.Errorf("unexpectedly not matched: %s", err.Error())
+ }
+}
+
+func TestDownloading_getContentLength_DoError(t *testing.T) {
+ expected := `Head A: unsupported protocol scheme ""`
+
+ ts, clean := newTestServer(t, noopHandler)
+ defer clean()
+
+ d := newDownloader(t, "path/to/output", ts, 2)
+
+ u, err := url.Parse("A")
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ d.url = u
+
+ _, err = d.getContentLength(context.Background())
+
+ actual := err.Error()
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
+
+func TestDownloading_partialDownload_osCreateError(t *testing.T) {
+ ts, clean := newTestServer(t, func(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(http.StatusPartialContent)
+ })
+ defer clean()
+
+ _, err := newDownloader(t, "", ts, 2).partialDownload(context.Background(), "bytes=0-1", "non/existent/path")
+ if !regexp.MustCompile("no such file or directory").MatchString(err.Error()) {
+ t.Errorf("unexpectedly not matched: %s", err.Error())
+ }
+}
+
+func TestDownloading_concat_osCreateError(t *testing.T) {
+ ts, clean := newTestServer(t, noopHandler)
+ defer clean()
+
+ d := newDownloader(t, "", ts, 2)
+ _, err := d.concat(map[int]string{}, "non/existent/path")
+
+ if !regexp.MustCompile("no such file or directory").MatchString(err.Error()) {
+ t.Errorf("unexpectedly not matched: %s", err.Error())
+ }
+}
+
+func TestDownloading_concat_osOpenError(t *testing.T) {
+ ts, clean := newTestServer(t, noopHandler)
+ defer clean()
+
+ dir, err := ioutil.TempDir("", "parallel-download")
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+ defer os.RemoveAll(dir)
+
+ _, err = newDownloader(t, "", ts, 2).concat(map[int]string{0: "non/existent/path"}, dir)
+
+ if !regexp.MustCompile("no such file or directory").MatchString(err.Error()) {
+ t.Errorf("unexpectedly not matched: %s", err.Error())
+ }
+}
+
+func newTestServer(t *testing.T, handler func(t *testing.T, w http.ResponseWriter, r *http.Request)) (*httptest.Server, func()) {
+ t.Helper()
+
+ ts := httptest.NewServer(http.HandlerFunc(
+ func(w http.ResponseWriter, r *http.Request) {
+ handler(t, w, r)
+ },
+ ))
+
+ return ts, func() { ts.Close() }
+}
+
+func normalHandler(t *testing.T, w http.ResponseWriter, r *http.Request) {
+ t.Helper()
+
+ w.Header().Set("Accept-Ranges", "bytes")
+
+ rangeHdr := r.Header.Get("Range")
+
+ body := func() string {
+ if rangeHdr == "" {
+ return registeredTestdatum[currentTestdataName]
+ }
+
+ eqlSplitVals := strings.Split(rangeHdr, "=")
+ if eqlSplitVals[0] != "bytes" {
+ t.Fatalf("err %s", eqlSplitVals[0])
+ }
+
+ c := strings.Split(eqlSplitVals[1], "-")
+
+ min, err := strconv.Atoi(c[0])
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ max, err := strconv.Atoi(c[1])
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ return registeredTestdatum[currentTestdataName][min : max+1]
+ }()
+
+ w.Header().Set("Content-Length", fmt.Sprintf("%d", len(body)))
+
+ w.WriteHeader(http.StatusPartialContent)
+
+ fmt.Fprint(w, body)
+}
+
+func noopHandler(t *testing.T, w http.ResponseWriter, r *http.Request) {}
+
+func newDownloader(t *testing.T, output string, ts *httptest.Server, parallelism int) *Downloader {
+ t.Helper()
+
+ opts := &opt.Options{
+ Parallelism: parallelism,
+ Output: output,
+ URL: mustParseRequestURI(t, ts.URL),
+ Timeout: 60 * time.Second,
+ }
+
+ return NewDownloader(ioutil.Discard, opts)
+}
+
+func mustParseRequestURI(t *testing.T, s string) *url.URL {
+ t.Helper()
+
+ u, err := url.ParseRequestURI(s)
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ return u
+}
+
+func readTestdata(filename string) string {
+ b, err := ioutil.ReadFile(path.Join("testdata", filename))
+ if err != nil {
+ panic(err)
+ }
+ return string(b)
+}
+
+func createTempOutput(t *testing.T) (string, func()) {
+ t.Helper()
+
+ dir, err := ioutil.TempDir("", "parallel-download")
+ if err != nil {
+ panic(err)
+ }
+
+ return filepath.Join(dir, "output.txt"), func() { os.RemoveAll(dir) }
+}
diff --git a/kadai3-2/hioki-daichi/downloading/testdata/a.txt b/kadai3-2/hioki-daichi/downloading/testdata/a.txt
new file mode 100644
index 0000000..7898192
--- /dev/null
+++ b/kadai3-2/hioki-daichi/downloading/testdata/a.txt
@@ -0,0 +1 @@
+a
diff --git a/kadai3-2/hioki-daichi/downloading/testdata/b.txt b/kadai3-2/hioki-daichi/downloading/testdata/b.txt
new file mode 100644
index 0000000..6178079
--- /dev/null
+++ b/kadai3-2/hioki-daichi/downloading/testdata/b.txt
@@ -0,0 +1 @@
+b
diff --git a/kadai3-2/hioki-daichi/downloading/testdata/empty.txt b/kadai3-2/hioki-daichi/downloading/testdata/empty.txt
new file mode 100644
index 0000000..e69de29
diff --git a/kadai3-2/hioki-daichi/downloading/testdata/foo.png b/kadai3-2/hioki-daichi/downloading/testdata/foo.png
new file mode 100644
index 0000000..b5f8d01
Binary files /dev/null and b/kadai3-2/hioki-daichi/downloading/testdata/foo.png differ
diff --git a/kadai3-2/hioki-daichi/main.go b/kadai3-2/hioki-daichi/main.go
new file mode 100644
index 0000000..25e13a5
--- /dev/null
+++ b/kadai3-2/hioki-daichi/main.go
@@ -0,0 +1,33 @@
+package main
+
+import (
+ "context"
+ "io"
+ "log"
+ "os"
+
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/downloading"
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/opt"
+ "github.com/gopherdojo/dojo3/kadai3-2/hioki-daichi/termination"
+)
+
+func main() {
+ err := execute(os.Stdout, os.Args[1:])
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func execute(w io.Writer, args []string) error {
+ ctx := context.Background()
+
+ ctx, clean := termination.Listen(ctx, w)
+ defer clean()
+
+ opts, err := opt.Parse(args...)
+ if err != nil {
+ return err
+ }
+
+ return downloading.NewDownloader(w, opts).Download(ctx)
+}
diff --git a/kadai3-2/hioki-daichi/opt/opt.go b/kadai3-2/hioki-daichi/opt/opt.go
new file mode 100644
index 0000000..64382fe
--- /dev/null
+++ b/kadai3-2/hioki-daichi/opt/opt.go
@@ -0,0 +1,56 @@
+/*
+Package opt deals with CLI options.
+*/
+package opt
+
+import (
+ "errors"
+ "flag"
+ "net/url"
+ "path"
+ "time"
+)
+
+var errExist = errors.New("file already exists")
+
+// Options has the options required for parallel-download.
+type Options struct {
+ Parallelism int
+ Output string
+ URL *url.URL
+ Timeout time.Duration
+}
+
+// Parse parses args and returns Options.
+func Parse(args ...string) (*Options, error) {
+ flg := flag.NewFlagSet("parallel-download", flag.ExitOnError)
+
+ parallelism := flg.Int("p", 8, "Download files in parallel according to the specified number.")
+ output := flg.String("o", "", "Save the downloaded file in the specified path. (Overwrite if duplicates.)")
+ timeout := flg.Duration("t", 30*time.Second, "Terminate when the specified value has elapsed since download started.")
+
+ flg.Parse(args)
+
+ u, err := url.ParseRequestURI(flg.Arg(0))
+ if err != nil {
+ return nil, err
+ }
+
+ if *output == "" {
+ _, filename := path.Split(u.Path)
+
+ // Inspired by the --default-page option of wget
+ if filename == "" {
+ filename = "index.html"
+ }
+
+ *output = filename
+ }
+
+ return &Options{
+ Parallelism: *parallelism,
+ Output: *output,
+ URL: u,
+ Timeout: *timeout,
+ }, nil
+}
diff --git a/kadai3-2/hioki-daichi/opt/opt_test.go b/kadai3-2/hioki-daichi/opt/opt_test.go
new file mode 100644
index 0000000..0733a79
--- /dev/null
+++ b/kadai3-2/hioki-daichi/opt/opt_test.go
@@ -0,0 +1,71 @@
+package opt
+
+import (
+ "net/url"
+ "reflect"
+ "testing"
+)
+
+func TestMain_parse(t *testing.T) {
+ cases := map[string]struct {
+ args []string
+ expectedParallelism int
+ expectedOutput string
+ expectedURLString string
+ }{
+ "no options": {args: []string{"http://example.com/foo.png"}, expectedParallelism: 8, expectedOutput: "foo.png", expectedURLString: "http://example.com/foo.png"},
+ "-p=2": {args: []string{"-p=2", "http://example.com/foo.png"}, expectedParallelism: 2, expectedOutput: "foo.png", expectedURLString: "http://example.com/foo.png"},
+ "-o=bar.png": {args: []string{"-o=bar.png", "http://example.com/foo.png"}, expectedParallelism: 8, expectedOutput: "bar.png", expectedURLString: "http://example.com/foo.png"},
+ "index.html": {args: []string{"http://example.com/"}, expectedParallelism: 8, expectedOutput: "index.html", expectedURLString: "http://example.com/"},
+ }
+
+ for n, c := range cases {
+ c := c
+ t.Run(n, func(t *testing.T) {
+ args := c.args
+ expectedParallelism := c.expectedParallelism
+ expectedOutput := c.expectedOutput
+ expectedURL, err := url.ParseRequestURI(c.expectedURLString)
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ opts, err := Parse(args...)
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ actualParallelism := opts.Parallelism
+ actualOutput := opts.Output
+ actualURL := opts.URL
+
+ if actualParallelism != expectedParallelism {
+ t.Errorf(`unexpected parallelism: expected: %d actual: %d`, expectedParallelism, actualParallelism)
+ }
+
+ if actualOutput != expectedOutput {
+ t.Errorf(`unexpected output: expected: "%s" actual: "%s"`, expectedOutput, actualOutput)
+ }
+
+ if !reflect.DeepEqual(actualURL, expectedURL) {
+ t.Errorf(`unexpected URL: expected: "%s" actual: "%s"`, expectedURL, actualURL)
+ }
+ })
+ }
+}
+
+func TestMain_parse_InvalidURL(t *testing.T) {
+ t.Parallel()
+
+ expected := "parse %: invalid URI for request"
+
+ _, err := Parse([]string{"%"}...)
+ if err == nil {
+ t.Fatal("Unexpectedly err was nil")
+ }
+
+ actual := err.Error()
+ if actual != expected {
+ t.Errorf(`unexpected error: expected: "%s" actual: "%s"`, expected, actual)
+ }
+}
diff --git a/kadai3-2/hioki-daichi/termination/termination.go b/kadai3-2/hioki-daichi/termination/termination.go
new file mode 100644
index 0000000..b2b2b6c
--- /dev/null
+++ b/kadai3-2/hioki-daichi/termination/termination.go
@@ -0,0 +1,42 @@
+/*
+Package termination deals with Ctrl+C termination.
+*/
+package termination
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "os"
+ "os/signal"
+ "syscall"
+)
+
+var cleanFns []func()
+
+// for testing
+var osExit = os.Exit
+
+// Listen listens signals.
+func Listen(ctx context.Context, w io.Writer) (context.Context, func()) {
+ ctx, cancel := context.WithCancel(ctx)
+
+ ch := make(chan os.Signal, 2)
+ signal.Notify(ch, os.Interrupt, syscall.SIGTERM)
+ go func() {
+ <-ch
+ fmt.Fprintln(w, "\rCtrl+C pressed in Terminal")
+ cancel()
+ for _, f := range cleanFns {
+ f()
+ }
+ osExit(0)
+ }()
+
+ return ctx, cancel
+}
+
+// CleanFunc registers clean function.
+func CleanFunc(f func()) {
+ cleanFns = append(cleanFns, f)
+}
diff --git a/kadai3-2/hioki-daichi/termination/termination_test.go b/kadai3-2/hioki-daichi/termination/termination_test.go
new file mode 100644
index 0000000..d78d5f1
--- /dev/null
+++ b/kadai3-2/hioki-daichi/termination/termination_test.go
@@ -0,0 +1,36 @@
+package termination
+
+import (
+ "context"
+ "io/ioutil"
+ "os"
+ "testing"
+ "time"
+)
+
+func TestTermination_Listen(t *testing.T) {
+ CleanFunc(func() {})
+
+ doneCh := make(chan struct{})
+ osExit = func(code int) { doneCh <- struct{}{} }
+
+ _, clean := Listen(context.Background(), ioutil.Discard)
+ defer clean()
+
+ proc, err := os.FindProcess(os.Getpid())
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ err = proc.Signal(os.Interrupt)
+ if err != nil {
+ t.Fatalf("err %s", err)
+ }
+
+ select {
+ case <-doneCh:
+ return
+ case <-time.After(100 * time.Millisecond):
+ t.Fatal("timeout")
+ }
+}