// Copyright (c) 2017-2022 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
//     http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

package containers

import (
	"bufio"
	"bytes"
	"encoding/json"
	"fmt"
	"io"
	"net"
	"os"
	"os/exec"
	"regexp"
	"sort"
	"strconv"
	"strings"
	"sync"
	"time"

	"github.com/onsi/ginkgo"

	//nolint:staticcheck // Ignore ST1001: should not use dot imports
	. "github.com/onsi/gomega"
	log "github.com/sirupsen/logrus"

	"github.com/projectcalico/calico/felix/fv/connectivity"
	"github.com/projectcalico/calico/felix/fv/tcpdump"
	"github.com/projectcalico/calico/felix/fv/utils"
	"github.com/projectcalico/calico/libcalico-go/lib/set"
	"github.com/projectcalico/calico/libcalico-go/lib/testutils/stacktrace"
)

type Container struct {
	Name           string
	IP             string
	ExtraSourceIPs []string
	IPPrefix       string
	IPv6           string
	IPv6Prefix     string
	Hostname       string
	runCmd         *exec.Cmd
	Stdin          io.WriteCloser

	mutex                 sync.Mutex
	binaries              set.Set[string]
	stdoutWatches         []*watch
	stderrWatches         []*watch
	dataRaces             []string
	raceDetectionDisabled bool

	logFinished      sync.WaitGroup
	dropAllLogs      bool
	ignoreEmptyLines bool
	logLimitBytes    int
}

type watch struct {
	regexp *regexp.Regexp
	c      chan struct{}
}

var containerIdx = 0

func (c *Container) StopLogs() {
	if c == nil {
		log.Info("StopLogs no-op because nil container")
		return
	}

	c.mutex.Lock()
	c.dropAllLogs = true
	c.mutex.Unlock()
}

func (c *Container) Stop() {
	if c == nil {
		log.Info("Stop no-op because nil container")
		return
	}

	logCxt := log.WithField("container", c.Name)
	c.mutex.Lock()
	if c.runCmd == nil {
		logCxt.Info("Stop no-op because container is not running")
		c.mutex.Unlock()
		return
	}
	c.mutex.Unlock()

	logCxt.Info("Stopping...")

	// Ask docker to stop the container.
	withTimeoutPanic(logCxt, 30*time.Second, c.execDockerStop)
	// Shut down the docker run process (if needed).
	withTimeoutPanic(logCxt, 5*time.Second, func() { c.signalDockerRun(os.Interrupt) })

	// Wait for the container to exit, then escalate to killing it.
	startTime := time.Now()
	for {
		if !c.ListedInDockerPS() {
			// Container has stopped.  Make sure the docker CLI command is dead (it should be already)
			// and wait for its log.
			logCxt.Info("Container stopped (no longer listed in 'docker ps')")
			withTimeoutPanic(logCxt, 5*time.Second, func() { c.signalDockerRun(os.Kill) })
			withTimeoutPanic(logCxt, 10*time.Second, func() { c.logFinished.Wait() })
			return
		}
		if time.Since(startTime) > 2*time.Second {
			logCxt.Info("Container didn't stop, asking docker to kill it")
			// `docker kill` asks the docker daemon to kill the container but, on a
			// resource constrained system, we've seen that fail because the CLI command
			// was blocked so we kill the CLI command too.
			err := exec.Command("docker", "kill", c.Name).Run()
			logCxt.WithError(err).Info("Ran 'docker kill'")
			withTimeoutPanic(logCxt, 5*time.Second, func() { c.signalDockerRun(os.Kill) })
			break
		}
		time.Sleep(200 * time.Millisecond)
	}
	c.WaitNotRunning(60 * time.Second)
	withTimeoutPanic(logCxt, 5*time.Second, func() { c.signalDockerRun(os.Kill) })
	withTimeoutPanic(logCxt, 10*time.Second, func() { c.logFinished.Wait() })

	logCxt.Info("Container stopped")
}

func withTimeoutPanic(logCxt *log.Entry, t time.Duration, f func()) {
	done := make(chan struct{})
	go func() {
		defer close(done)
		f()
	}()

	select {
	case <-done:
		return
	case <-time.After(t):
		logCxt.Panic("Timeout!")
	}
}

func (c *Container) execDockerStop() {
	logCxt := log.WithField("container", c.Name)
	logCxt.Info("Executing 'docker stop'")
	cmd := exec.Command("docker", "stop", "-t0", c.Name)
	err := cmd.Run()
	if err != nil {
		logCxt.WithError(err).WithField("cmd", cmd).Error("docker stop command failed")
		return
	}
	logCxt.Info("'docker stop' returned success")
}

func (c *Container) signalDockerRun(sig os.Signal) {
	logCxt := log.WithFields(log.Fields{
		"container": c.Name,
		"signal":    sig,
	})
	logCxt.Info("Sending signal to 'docker run' process")
	c.mutex.Lock()
	defer c.mutex.Unlock()
	if c.runCmd == nil {
		return
	}
	err := c.runCmd.Process.Signal(sig)
	if err != nil {
		logCxt.WithError(err).Error("failed to signal 'docker run' process")
		return
	}
	logCxt.Info("Signalled docker run")
}

func (c *Container) Signal(sig os.Signal) {
	c.signalDockerRun(sig)
}

type RunOpts struct {
	AutoRemove       bool
	WithStdinPipe    bool
	IgnoreEmptyLines bool
	SameNamespace    *Container
	StopTimeoutSecs  int
	StopSignal       string
	LogLimitBytes    int
}

func NextContainerIndex() int {
	return containerIdx + 1
}

func Run(namePrefix string, opts RunOpts, args ...string) (c *Container) {
	name := UniqueName(namePrefix)
	return RunWithFixedName(name, opts, args...)
}

func UniqueName(namePrefix string) string {
	// Build unique container name and struct.
	containerIdx++
	name := fmt.Sprintf("%v-%d-%d-felixfv", namePrefix, os.Getpid(), containerIdx)
	return name
}

func RunWithFixedName(name string, opts RunOpts, args ...string) (c *Container) {
	c = &Container{
		Name:             name,
		ignoreEmptyLines: opts.IgnoreEmptyLines,
		logLimitBytes:    opts.LogLimitBytes,
	}

	// Prep command to run the container.
	log.WithField("container", c).Info("About to run container")
	runArgs := []string{"run", "--init", "--cgroupns", "host", "--name", c.Name, "--stop-timeout", fmt.Sprint(opts.StopTimeoutSecs)}

	if opts.StopSignal != "" {
		runArgs = append(runArgs, "--stop-signal", opts.StopSignal)
	}

	if opts.AutoRemove {
		runArgs = append(runArgs, "--rm")
	}

	if opts.SameNamespace != nil {
		runArgs = append(runArgs, "--network=container:"+opts.SameNamespace.Name)
	} else {
		runArgs = append(runArgs, "--hostname", c.Name)
	}

	// Add remaining args
	runArgs = append(runArgs, args...)

	c.runCmd = utils.Command("docker", runArgs...)

	if opts.WithStdinPipe {
		var err error
		c.Stdin, err = c.runCmd.StdinPipe()
		Expect(err).NotTo(HaveOccurred())
	}

	// Get the command's output pipes, so we can merge those into the test's own logging.
	stdout, err := c.runCmd.StdoutPipe()
	Expect(err).NotTo(HaveOccurred())
	stderr, err := c.runCmd.StderrPipe()
	Expect(err).NotTo(HaveOccurred())

	// Start the container running.
	err = c.runCmd.Start()
	Expect(err).NotTo(HaveOccurred())

	// Merge container's output into our own logging.
	c.logFinished.Add(2)

	go c.copyOutputToLog("stdout", stdout, &c.logFinished, &c.stdoutWatches, nil)
	go c.copyOutputToLog("stderr", stderr, &c.logFinished, &c.stderrWatches, nil)

	// Note: it might take a long time for the container to start running, e.g. if the image
	// needs to be downloaded.
	c.WaitUntilRunning()

	// Fill in rest of container struct.
	c.IP = c.GetIP()
	c.IPPrefix = c.GetIPPrefix()
	c.IPv6 = c.GetIPv6()
	c.IPv6Prefix = c.GetIPv6Prefix()
	c.Hostname = c.GetHostname()
	c.binaries = set.New[string]()
	log.WithField("container", c).Info("Container now running")
	return
}

func (c *Container) WatchStderrFor(re *regexp.Regexp) chan struct{} {
	c.mutex.Lock()
	defer c.mutex.Unlock()

	log.WithFields(log.Fields{
		"container": c.Name,
		"regex":     re,
	}).Info("Start watching stderr")

	ch := make(chan struct{})
	c.stderrWatches = append(c.stderrWatches, &watch{
		regexp: re,
		c:      ch,
	})
	return ch
}

func (c *Container) WatchStdoutFor(re *regexp.Regexp) chan struct{} {
	c.mutex.Lock()
	defer c.mutex.Unlock()

	log.WithFields(log.Fields{
		"container": c.Name,
		"regex":     re,
	}).Info("Start watching stdout")

	ch := make(chan struct{})
	c.stdoutWatches = append(c.stdoutWatches, &watch{
		regexp: re,
		c:      ch,
	})
	return ch
}

// Start executes "docker start" on a container. Useful when used after Stop()
// to restart a container.
func (c *Container) Start() {
	c.runCmd = utils.Command("docker", "start", "--attach", c.Name)

	stdout, err := c.runCmd.StdoutPipe()
	Expect(err).NotTo(HaveOccurred())
	stderr, err := c.runCmd.StderrPipe()
	Expect(err).NotTo(HaveOccurred())

	// Start the container running.
	err = c.runCmd.Start()
	Expect(err).NotTo(HaveOccurred())

	// Merge container's output into our own logging.
	c.logFinished.Add(2)
	go c.copyOutputToLog("stdout", stdout, &c.logFinished, &c.stdoutWatches, nil)
	go c.copyOutputToLog("stderr", stderr, &c.logFinished, nil, nil)

	c.WaitUntilRunning()

	log.WithField("container", c).Info("Container now running")
}

// Remove deletes a container. Should be manually called after a non-auto-removed container
// is stopped.
func (c *Container) Remove() {
	c.runCmd = utils.Command("docker", "rm", "-f", c.Name)
	log.WithField("container", c).Info("Removing... container.")
	// Do the deletion in the background so we don't hold things up.
	err := c.runCmd.Start()
	cmd := c.runCmd
	Expect(err).NotTo(HaveOccurred())

	// Make sure we wait on the deletion process.
	go func() {
		err := cmd.Wait()
		if err != nil {
			log.WithError(err).Infof("Error from docker rm -f %s.", c.Name)
		} else {
			log.Infof("Container removed: %s.", c.Name)
		}
	}()
}

func (c *Container) copyOutputToLog(streamName string, stream io.Reader, done *sync.WaitGroup, watches *[]*watch, extraWriter io.Writer) {
	defer done.Done()
	scanner := bufio.NewScanner(stream)
	scanner.Buffer(nil, 10*1024*1024) // Increase maximum buffer size (but don't pre-alloc).

	// Felix is configured with the race detector enabled. When the race detector fires, we get output like this:
	//
	// ==================
	// WARNING: DATA RACE
	// <stack trace>
	// ==================
	//
	// We capture that output and emit it to a dedicated log file so that the CI job can save it off.
	// foundDataRace is set to true when we see the WARNING line and then it is set back to false when we
	// see the trailing "==================".  We collect the text of the warning in dataRaceText.
	//
	// We do this for all containers because we already have the machinery here.
	foundDataRace := false
	dataRaceText := ""
	dataRaceFile, err := os.OpenFile("data-races.log", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0o644)
	if err != nil {
		log.WithError(err).Error("Failed to open data race log file.")
	}
	defer func() {
		err := dataRaceFile.Close()
		Expect(err).NotTo(HaveOccurred(), "Failed to write to data race log (close).")
	}()

	bytesSeen := 0
	for scanner.Scan() {
		line := scanner.Text()
		if extraWriter != nil {
			_, err := extraWriter.Write([]byte(line + "\n"))
			if err != nil {
				log.WithError(err).Error("Failed to write to extra writer.")
			}
		}

		if c.ignoreEmptyLines && strings.Trim(line, " \r\n\t") == "" {
			continue
		}

		// Check if we're dropping logs (e.g. because we're tearing down the container at the end of the test).
		c.mutex.Lock()
		droppingLogs := c.dropAllLogs
		c.mutex.Unlock()

		// Or because we hit the limit.
		wasDropping := c.logLimitBytes > 0 && bytesSeen > c.logLimitBytes
		bytesSeen += len(line)
		nowDropping := c.logLimitBytes > 0 && bytesSeen > c.logLimitBytes
		if nowDropping {
			if !wasDropping {
				// We just hit the limit.
				fmt.Fprintf(ginkgo.GinkgoWriter, "%v[%v] %v\n", c.Name, streamName, "...truncated...")
			}
			droppingLogs = true
		}

		if !droppingLogs {
			fmt.Fprintf(ginkgo.GinkgoWriter, "%v[%v] %v\n", c.Name, streamName, line)
		}

		// Capture data race warnings and log to file.
		if strings.Contains(line, "WARNING: DATA RACE") {
			_, err := fmt.Fprintf(dataRaceFile, "Detected data race (in %s) while running test: %s\n",
				c.Name, ginkgo.CurrentGinkgoTestDescription().FullTestText)
			Expect(err).NotTo(HaveOccurred(), "Failed to write to data race log.")
			foundDataRace = true
		}
		if foundDataRace {
			var err error
			if strings.Contains(line, "==================") {
				foundDataRace = false
				c.mutex.Lock()
				c.dataRaces = append(c.dataRaces, dataRaceText)
				c.mutex.Unlock()
				dataRaceText = ""
				_, err = dataRaceFile.WriteString("\n\n")
			} else {
				dataRaceText += line + "\n"
				_, err = dataRaceFile.WriteString(line + "\n")
			}
			Expect(err).NotTo(HaveOccurred(), "Failed to write to data race log.")
		}

		if watches == nil {
			continue
		}
		c.mutex.Lock()
		for _, w := range *watches {
			if w.c == nil {
				continue
			}
			if !w.regexp.MatchString(line) {
				continue
			}

			log.Info(c.Name, "[", streamName, "] ", "Watch triggered:", w.regexp.String())
			close(w.c)
			w.c = nil
		}
		c.mutex.Unlock()
	}
	logCxt := log.WithFields(log.Fields{
		"name":   c.Name,
		"stream": streamName,
	})
	if scanner.Err() != nil {
		logCxt.WithError(scanner.Err()).Error("Non-EOF error reading container stream")
	}
	logCxt.Info("Stream finished")
}

func (c *Container) DataRaces() []string {
	c.mutex.Lock()
	defer c.mutex.Unlock()

	if c.raceDetectionDisabled {
		return nil
	}
	return c.dataRaces
}

// DisableRaceDetector disables race detection for this test.  This is useful
// for testing the race detection logic.
func (c *Container) DisableRaceDetector() {
	c.mutex.Lock()
	defer c.mutex.Unlock()

	c.raceDetectionDisabled = true
}

func (c *Container) DockerInspect(format string) string {
	inspectCmd := utils.Command("docker", "inspect",
		"--format="+format,
		c.Name,
	)
	outputBytes, err := inspectCmd.CombinedOutput()
	Expect(err).NotTo(HaveOccurred(), fmt.Sprintf("Failed to run %q", inspectCmd))
	return string(outputBytes)
}

func (c *Container) GetID() string {
	output := c.DockerInspect("{{.Id}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetIP() string {
	output := c.DockerInspect("{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetIPPrefix() string {
	output := c.DockerInspect("{{range .NetworkSettings.Networks}}{{.IPPrefixLen}}{{end}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetIPv6() string {
	output := c.DockerInspect("{{range .NetworkSettings.Networks}}{{.GlobalIPv6Address}}{{end}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetIPv6Prefix() string {
	output := c.DockerInspect("{{range .NetworkSettings.Networks}}{{.GlobalIPv6PrefixLen}}{{end}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetHostname() string {
	output := c.DockerInspect("{{.Config.Hostname}}")
	return strings.TrimSpace(output)
}

func (c *Container) GetPIDs(processName string) []int {
	out, err := c.ExecOutput("pgrep", "-f", fmt.Sprintf("^%s$", processName))
	if err != nil {
		log.WithError(err).Warn("pgrep failed, assuming no PIDs")
		return nil
	}
	var pids []int
	for _, line := range strings.Split(out, "\n") {
		if line == "" {
			continue
		}
		pid, err := strconv.Atoi(line)
		Expect(err).NotTo(HaveOccurred())
		pids = append(pids, pid)
	}
	return pids
}

type ProcInfo struct {
	PID  int
	PPID int
}

var psRegexp = regexp.MustCompile(`^\s*(\d+)\s+(\d+)\s+(\S+)$`)

func (c *Container) GetProcInfo(processName string) []ProcInfo {
	out, err := c.ExecOutput("ps", "wwxo", "pid,ppid,comm")
	if err != nil {
		log.WithError(err).WithField("out", out).Warn("ps failed, assuming no PIDs")
		return nil
	}
	var pids []ProcInfo
	for _, line := range strings.Split(out, "\n") {
		log.WithField("line", line).Debug("Parsing ps line")
		matches := psRegexp.FindStringSubmatch(line)
		if len(matches) == 0 {
			continue
		}
		name := matches[3]
		if name != processName {
			continue
		}
		pid, err := strconv.Atoi(matches[1])
		if err != nil {
			log.WithError(err).WithField("line", line).Panic("Failed to parse ps output")
		}
		ppid, err := strconv.Atoi(matches[2])
		if err != nil {
			log.WithError(err).WithField("line", line).Panic("Failed to parse ps output")
		}
		pids = append(pids, ProcInfo{PID: pid, PPID: ppid})

	}
	return pids
}

func (c *Container) GetSinglePID(processName string) int {
	// Get the process's PID.  This retry loop ensures that we don't get tripped up if we see multiple
	// PIDs, which can happen transiently when a process restarts.
	start := time.Now()
	for {
		// Get the PID and parent PID of all processes with the right name.
		procs := c.GetProcInfo(processName)
		log.WithField("procs", procs).Debug("Got ProcInfos")
		// Collect all the pids so we can detect forked child processes by their PPID.
		pids := set.New[int]()
		for _, p := range procs {
			pids.Add(p.PID)
		}
		// Filter the procs, ignore any that are children of another proc in the set.
		var filteredProcs []ProcInfo
		for _, p := range procs {
			if pids.Contains(p.PPID) {
				continue
			}
			filteredProcs = append(filteredProcs, p)
		}
		if len(filteredProcs) == 1 {
			// Success, there's one process.
			return filteredProcs[0].PID
		}
		ExpectWithOffset(1, time.Since(start)).To(BeNumerically("<", 5*time.Second),
			fmt.Sprintf("Timed out waiting for there to be a single PID for %s", processName))
		time.Sleep(50 * time.Millisecond)
	}
}

func (c *Container) WaitUntilRunning() {
	log.Info("Wait for container to be listed in docker ps")

	// Set up so we detect if container startup fails.
	stoppedChan := make(chan struct{})
	go func() {
		defer close(stoppedChan)
		err := c.runCmd.Wait()
		log.WithError(err).WithField("name", c.Name).Info("Container stopped ('docker run' exited)")
		c.mutex.Lock()
		defer c.mutex.Unlock()
		c.runCmd = nil
	}()

	for {
		Expect(stoppedChan).NotTo(BeClosed(), fmt.Sprintf("Container %s failed before being listed in 'docker ps'", c.Name))

		cmd := utils.Command("docker", "ps")
		out, err := cmd.CombinedOutput()
		Expect(err).NotTo(HaveOccurred(), "Failed to run 'docker ps'")
		if strings.Contains(string(out), c.Name) {
			break
		}
		time.Sleep(1000 * time.Millisecond)
	}
}

func (c *Container) Stopped() bool {
	c.mutex.Lock()
	defer c.mutex.Unlock()
	return c.runCmd == nil
}

func (c *Container) ListedInDockerPS() bool {
	cmd := utils.Command("docker", "ps")
	out, err := cmd.CombinedOutput()
	Expect(err).NotTo(HaveOccurred(), "Failed to run 'docker ps'")
	return strings.Contains(string(out), c.Name)
}

func (c *Container) WaitNotRunning(timeout time.Duration) {
	log.Info("Wait for container not to be listed in docker ps")
	start := time.Now()
	for c.ListedInDockerPS() {
		if time.Since(start) > timeout {
			log.Panic("Timed out waiting for container not to be listed.")
		}
		time.Sleep(1000 * time.Millisecond)
	}
}

func (c *Container) CopyFileIntoContainer(hostPath, containerPath string) error {
	cmd := utils.Command("docker", "cp", hostPath, c.Name+":"+containerPath)
	return cmd.Run()
}

func (c *Container) FileExists(path string) bool {
	err := c.ExecMayFail("test", "-e", path)
	return err == nil
}

func (c *Container) Exec(cmd ...string) {
	log.WithField("container", c.Name).WithFields(log.Fields{"command": cmd, "stack": miniStackTrace()}).Info("Exec: Running command")
	arg := []string{"exec", c.Name}
	arg = append(arg, cmd...)
	utils.Run("docker", arg...)
}

func (c *Container) ExecWithInput(input []byte, cmd ...string) {
	log.WithField("container", c.Name).WithFields(log.Fields{"command": cmd, "stack": miniStackTrace()}).Info("ExecWithInput: Running command")
	arg := []string{"exec", "-i", c.Name}
	arg = append(arg, cmd...)
	utils.RunWithInput(input, "docker", arg...)
}

func (c *Container) ExecMayFail(cmd ...string) error {
	log.WithField("container", c.Name).WithFields(log.Fields{"command": cmd, "stack": miniStackTrace()}).Info("ExecMayFail: Running command")
	arg := []string{"exec", c.Name}
	arg = append(arg, cmd...)
	return utils.RunMayFail("docker", arg...)
}

func (c *Container) ExecBestEffort(cmd ...string) {
	err := c.ExecMayFail(cmd...)
	if err != nil {
		log.WithError(err).Errorf("Command (%s) failed, ignoring.", strings.Join(cmd, " "))
	}
}

func miniStackTrace() string {
	return stacktrace.MiniStackStrace("/containers/")
}

func (c *Container) ExecOutput(args ...string) (string, error) {
	arg := []string{"exec", c.Name}
	arg = append(arg, args...)
	cmd := utils.Command("docker", arg...)
	stderr, err := cmd.StderrPipe()
	if err != nil {
		return "", fmt.Errorf("failed to make a pipe for stderr %q: %w", cmd, err)
	}
	var wg sync.WaitGroup
	wg.Add(1)
	var errBuf bytes.Buffer
	go c.copyOutputToLog("exec-err", stderr, &wg, nil, &errBuf)
	out, err := cmd.Output()
	stdoutStr := string(out)
	wg.Wait() // Wait for the stderr copy to finish so errBuf is safe to read.
	if err != nil {
		stderrStr := errBuf.String()
		return stdoutStr, fmt.Errorf("command failed %q with stdout=%q stderr=%q: %w", cmd, stdoutStr, stderrStr, err)
	}
	return stdoutStr, nil
}

func (c *Container) ExecOutputFn(args ...string) func() (string, error) {
	return func() (string, error) {
		return c.ExecOutput(args...)
	}
}

func (c *Container) ExecCombinedOutput(args ...string) (string, error) {
	arg := []string{"exec", c.Name}
	arg = append(arg, args...)
	cmd := utils.Command("docker", arg...)
	out, err := cmd.CombinedOutput()
	if err != nil {
		if out == nil {
			return "", fmt.Errorf("command failed with no output %q: %w", cmd, err)
		}
		outStr := string(out)
		return outStr, fmt.Errorf("command failed %q: %w output=%q", cmd, err, outStr)
	}
	return string(out), nil
}

func (c *Container) SourceName() string {
	return c.Name
}

func (c *Container) SourceIPs() []string {
	ips := []string{c.IP}
	if c.IPv6 != "" {
		ips = append(ips, c.IPv6)
	}
	ips = append(ips, c.ExtraSourceIPs...)
	return ips
}

func (c *Container) PreRetryCleanup(ip, port, protocol string, opts ...connectivity.CheckOption) {
}

func (c *Container) CanConnectTo(ip, port, protocol string, opts ...connectivity.CheckOption) *connectivity.Result {
	return connectivity.Check(c.Name, "Connection test", ip, port, protocol, opts...)
}

// AttachTCPDump returns tcpdump attached to the container
func (c *Container) AttachTCPDump(iface string) *tcpdump.TCPDump {
	return tcpdump.AttachUnavailable(c.GetID(), iface)
}

// IPSetSize returns the size of the given (netfilter) IP set (or 0 is it is not present).
func (c *Container) IPSetSize(ipSetName string) int {
	// If we later optimize this to use 'ipset list <name>' note that the
	// <name> variant fails with non-zero RC if the ipset doesn't exist.
	return c.IPSetSizes()[ipSetName]
}

func (c *Container) IPSetSizeFn(ipSetName string) func() int {
	return func() int {
		return c.IPSetSize(ipSetName)
	}
}

func (c *Container) IPSetSizes() map[string]int {
	numMembers := map[string]int{}
	args := []string{"ipset", "list"}
	ipsetsOutput, err := c.ExecOutput(args...)
	Expect(err).NotTo(HaveOccurred())
	currentName := ""
	membersSeen := false
	log.WithField("ipsets", ipsetsOutput).Info("IP sets state")
	for _, line := range strings.Split(ipsetsOutput, "\n") {
		log.WithField("line", line).Debug("Parsing line")
		if strings.HasPrefix(line, "Name:") {
			currentName = strings.Split(line, " ")[1]
			membersSeen = false
		} else if strings.HasPrefix(line, "Members:") {
			membersSeen = true
		} else if membersSeen && len(strings.TrimSpace(line)) > 0 {
			log.Debugf("IP set %s has member %s", currentName, line)
			numMembers[currentName]++
		}
	}
	return numMembers
}

func (c *Container) NFTSetSizes() map[string]int {
	numMembers := map[string]int{}
	names := c.IPSetNames()
	for _, name := range names {
		if strings.HasPrefix(name, "cali60") {
			numMembers[name] = c.NumNFTSetMembers(6, name)
		} else {
			numMembers[name] = c.NumNFTSetMembers(4, name)
		}
	}
	return numMembers
}

func (c *Container) NFTSetSize(name string) int {
	return c.NFTSetSizes()[name]
}

func (c *Container) NFTSetSizeFn(name string) func() int {
	return func() int {
		return c.NFTSetSizes()[name]
	}
}

func (c *Container) NumNFTSetMembers(ipVersion int, setName string) int {
	ip := "ip"
	if ipVersion == 6 {
		ip = "ip6"
	}
	out, err := c.ExecOutput("nft", "--json", "list", "set", ip, "calico", setName)
	if err != nil {
		log.WithError(err).Warn("Failed to list nft IP set.")
		return -1
	}

	type nftResp struct {
		Nftables []map[string]interface{} `json:"nftables"`
	}
	var resp nftResp
	Expect(json.Unmarshal([]byte(out), &resp)).NotTo(HaveOccurred(), fmt.Sprintf("Failed to unmarshal JSON: %s", out))
	for _, obj := range resp.Nftables {
		if obj["set"] != nil {
			setObj, ok := obj["set"].(map[string]interface{})
			Expect(ok).To(BeTrue(), fmt.Sprintf("Failed to parse set: %v", obj))
			if _, ok := setObj["elem"]; !ok {
				// No elements.
				return 0
			}
			elems, ok := setObj["elem"].([]interface{})
			Expect(ok).To(BeTrue(), fmt.Sprintf("Failed to parse elem: %v", setObj))
			return len(elems)
		}
	}
	return len(out)
}

func (c *Container) IPSetNames() []string {
	if os.Getenv("FELIX_FV_NFTABLES") == "Enabled" {
		return c.nftablesSetNames()
	}

	out, err := c.ExecOutput("ipset", "list", "-name")
	Expect(err).NotTo(HaveOccurred())
	out = strings.Trim(out, "\n")
	if out == "" {
		return nil
	}
	return strings.Split(out, "\n")
}

func (c *Container) nftablesSetNames() []string {
	// Get the set names for both IPv4 and IPv6.
	ipv4Names := c.nftablesSetNamesForVersion("ip")
	ipv6Names := c.nftablesSetNamesForVersion("ip6")
	return append(ipv4Names, ipv6Names...)
}

func (c *Container) nftablesSetNamesForVersion(ver string) []string {
	out, err := c.ExecOutput("nft", "list", "sets", ver)
	Expect(err).NotTo(HaveOccurred(), out)
	var names []string
	for _, line := range strings.Split(out, "\n") {
		line = strings.TrimSpace(line)
		if strings.HasPrefix(line, "set ") {
			// set <name> {
			names = append(names, strings.Fields(line)[1])
		}
	}
	return names
}

func (c *Container) NumIPSets() int {
	return len(c.IPSetNames())
}

// NumTCBPFProgs Returns the number of TC BPF programs attached to the given interface.  Only direct-action
// programs are listed (i.e. the type that we use).
func (c *Container) NumTCBPFProgs(ifaceName string) int {
	var total int
	if strings.ToLower(os.Getenv("FELIX_FV_BPFATTACHTYPE")) == "tc" {
		for _, dir := range []string{"ingress", "egress"} {
			out, err := c.ExecOutput("tc", "filter", "show", "dev", ifaceName, dir)
			Expect(err).NotTo(HaveOccurred())
			count := strings.Count(out, "direct-action")
			log.Debugf("Output from tc filter show for %s, dir=%s: %q (count=%d)", c.Name, dir, out, count)
			total += count
		}
	} else {
		out, err := c.ExecOutput("bpftool", "net", "show")
		Expect(err).NotTo(HaveOccurred())
		total = strings.Count(out, ifaceName)

	}
	return total
}

func (c *Container) NumTCBPFProgsFn(ifaceName string) func() int {
	return func() int {
		return c.NumTCBPFProgs(ifaceName)
	}
}

// NumTCBPFProgs Returns the number of TC BPF programs attached to eth0.  Only direct-action programs are
// listed (i.e. the type that we use).
func (c *Container) NumTCBPFProgsEth0() int {
	return c.NumTCBPFProgs("eth0")
}

// BPFRoutes returns the output of calico-bpf routes dump, trimmed of whitespace and sorted.
func (c *Container) BPFRoutes() string {
	out, err := c.ExecOutput("calico-bpf", "routes", "dump")
	if err != nil {
		log.WithError(err).Error("Failed to run calico-bpf")
	}

	lines := strings.Split(out, "\n")
	var filteredLines []string
	for _, l := range lines {
		l = strings.TrimLeft(l, " ")
		if len(l) == 0 {
			continue
		}
		filteredLines = append(filteredLines, l)
	}
	sort.Strings(filteredLines)
	return strings.Join(filteredLines, "\n")
}

// BPFNATDump returns parsed out NAT maps keyed by "<ip> port <port> proto <proto>". Each
// value is list of "<ip>:<port>".
func (c *Container) BPFNATDump(ipv6 bool) map[string][]string {
	var (
		err error
		out string
	)

	if ipv6 {
		out, err = c.ExecOutput("calico-bpf", "-6", "nat", "dump")
	} else {
		out, err = c.ExecOutput("calico-bpf", "nat", "dump")
	}
	if err != nil {
		log.WithError(err).Error("Failed to run calico-bpf")
	}

	feMatch := regexp.MustCompile(`(.* port \d+ proto \d+) id (\d+) count.*`)

	lines := strings.Split(out, "\n")
	front := ""
	id := ""
	back := []string(nil)
	nat := make(map[string][]string)

	var beMatch *regexp.Regexp

	for _, l := range lines {
		if front != "" {
			if be := beMatch.FindStringSubmatch(l); be != nil {
				back = append(back, be[1])
			} else {
				nat[front] = back
				back = []string(nil)
				front = ""
			}
		}

		if front == "" {
			if fe := feMatch.FindStringSubmatch(l); fe == nil {
				continue
			} else {
				front = fe[1]
				id = fe[2]
				beMatch = regexp.MustCompile("\\s+" + id + ":\\d+\\s+(\\d+\\.\\d+\\.\\d+\\.\\d+:\\d+)")
			}
		}

	}

	if front != "" {
		nat[front] = back
	}

	return nat
}

// BPFNATHasBackendForService returns true is the given service has the given backend programmed in NAT tables
func (c *Container) BPFNATHasBackendForService(svcIP string, svcPort, proto int, ip string, port int) bool {
	front := fmt.Sprintf("%s port %d proto %d", svcIP, svcPort, proto)
	back := net.JoinHostPort(ip, fmt.Sprint(port))

	ipv6 := net.ParseIP(ip).To4() == nil
	nat := c.BPFNATDump(ipv6)
	if natBack, ok := nat[front]; ok {
		found := false
		for _, b := range natBack {
			if b == back {
				found = true
				break
			}
		}
		if !found {
			return false
		}
	} else {
		return false
	}

	return true
}
