Redo docker build system with buildkit+multi-stage and cache pio packages (#2338)

This commit is contained in:
Otto Winter 2021-09-20 09:07:38 +02:00 committed by GitHub
parent a990898256
commit 272ceadbb0
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 295 additions and 293 deletions

View File

@ -1,7 +1,6 @@
{ {
"name": "ESPHome Dev", "name": "ESPHome Dev",
"context": "..", "image": "esphome/esphome-lint:dev",
"dockerFile": "../docker/Dockerfile.dev",
"postCreateCommand": [ "postCreateCommand": [
"script/devcontainer-post-create" "script/devcontainer-post-create"
], ],

View File

@ -27,6 +27,11 @@ jobs:
uses: actions/setup-python@v2 uses: actions/setup-python@v2
with: with:
python-version: '3.9' python-version: '3.9'
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set TAG - name: Set TAG
run: | run: |
echo "TAG=check" >> $GITHUB_ENV echo "TAG=check" >> $GITHUB_ENV

View File

@ -9,61 +9,7 @@ on:
pull_request: pull_request:
jobs: jobs:
ci-with-container:
name: ${{ matrix.name }}
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
include:
- id: clang-format
name: Run script/clang-format
- id: clang-tidy
name: Run script/clang-tidy for ESP8266
options: --environment esp8266-tidy --grep ARDUINO_ARCH_ESP8266
- id: clang-tidy
name: Run script/clang-tidy for ESP32 1/4
options: --environment esp32-tidy --split-num 4 --split-at 1
- id: clang-tidy
name: Run script/clang-tidy for ESP32 2/4
options: --environment esp32-tidy --split-num 4 --split-at 2
- id: clang-tidy
name: Run script/clang-tidy for ESP32 3/4
options: --environment esp32-tidy --split-num 4 --split-at 3
- id: clang-tidy
name: Run script/clang-tidy for ESP32 4/4
options: --environment esp32-tidy --split-num 4 --split-at 4
# cpp lint job runs with esphome-lint docker image so that clang-format-*
# doesn't have to be installed
container: ghcr.io/esphome/esphome-lint:1.2
steps:
- uses: actions/checkout@v2
- name: Register problem matchers
run: |
echo "::add-matcher::.github/workflows/matchers/clang-tidy.json"
echo "::add-matcher::.github/workflows/matchers/gcc.json"
# Also run git-diff-index so that the step is marked as failed on formatting errors,
# since clang-format doesn't do anything but change files if -i is passed.
- name: Run clang-format
run: |
script/clang-format -i
git diff-index --quiet HEAD --
if: ${{ matrix.id == 'clang-format' }}
- name: Run clang-tidy
run: script/clang-tidy --all-headers --fix ${{ matrix.options }}
if: ${{ matrix.id == 'clang-tidy' }}
- name: Suggested changes
run: script/ci-suggest-changes
if: always()
ci: ci:
# Don't use the esphome-lint docker image because it may contain outdated requirements.
# This way, all dependencies are cached via the cache action.
name: ${{ matrix.name }} name: ${{ matrix.name }}
runs-on: ubuntu-latest runs-on: ubuntu-latest
strategy: strategy:
@ -77,48 +23,85 @@ jobs:
- id: test - id: test
file: tests/test1.yaml file: tests/test1.yaml
name: Test tests/test1.yaml name: Test tests/test1.yaml
pio_cache_key: test1
- id: test - id: test
file: tests/test2.yaml file: tests/test2.yaml
name: Test tests/test2.yaml name: Test tests/test2.yaml
pio_cache_key: test2
- id: test - id: test
file: tests/test3.yaml file: tests/test3.yaml
name: Test tests/test3.yaml name: Test tests/test3.yaml
pio_cache_key: test1
- id: test - id: test
file: tests/test4.yaml file: tests/test4.yaml
name: Test tests/test4.yaml name: Test tests/test4.yaml
pio_cache_key: test4
- id: test - id: test
file: tests/test5.yaml file: tests/test5.yaml
name: Test tests/test5.yaml name: Test tests/test5.yaml
pio_cache_key: test5
- id: pytest - id: pytest
name: Run pytest name: Run pytest
- id: clang-format
name: Run script/clang-format
- id: clang-tidy
name: Run script/clang-tidy for ESP8266
options: --environment esp8266-tidy --grep ARDUINO_ARCH_ESP8266
pio_cache_key: tidyesp8266
- id: clang-tidy
name: Run script/clang-tidy for ESP32 1/4
options: --environment esp32-tidy --split-num 4 --split-at 1
pio_cache_key: tidyesp32
- id: clang-tidy
name: Run script/clang-tidy for ESP32 2/4
options: --environment esp32-tidy --split-num 4 --split-at 2
pio_cache_key: tidyesp32
- id: clang-tidy
name: Run script/clang-tidy for ESP32 3/4
options: --environment esp32-tidy --split-num 4 --split-at 3
pio_cache_key: tidyesp32
- id: clang-tidy
name: Run script/clang-tidy for ESP32 4/4
options: --environment esp32-tidy --split-num 4 --split-at 4
pio_cache_key: tidyesp32
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Set up Python - name: Set up Python
uses: actions/setup-python@v2 uses: actions/setup-python@v2
id: python
with: with:
python-version: '3.7' python-version: '3.7'
- name: Cache pip modules - name: Cache pip modules
uses: actions/cache@v1 uses: actions/cache@v2
with: with:
path: ~/.cache/pip path: ~/.cache/pip
key: esphome-pip-3.7-${{ hashFiles('setup.py') }} key: pip-${{ steps.python.outputs.python-version }}-${{ hashFiles('requirements*.txt') }}
restore-keys: | restore-keys: |
esphome-pip-3.7- pip-${{ steps.python.outputs.python-version }}-
# Use per test platformio cache because tests have different platform versions
- name: Cache ~/.platformio
uses: actions/cache@v1
with:
path: ~/.platformio
key: test-home-platformio-${{ matrix.file }}-${{ hashFiles('esphome/core/config.py') }}
restore-keys: |
test-home-platformio-${{ matrix.file }}-
if: ${{ matrix.id == 'test' }}
- name: Set up python environment - name: Set up python environment
run: script/setup run: |
pip3 install -r requirements.txt -r requirements_optional.txt -r requirements_test.txt
pip3 install -e .
# Use per check platformio cache because checks use different parts
- name: Cache platformio
uses: actions/cache@v2
with:
path: ~/.platformio
key: platformio-${{ matrix.pio_cache_key }}-${{ hashFiles('platformio.ini') }}
restore-keys: |
platformio-${{ matrix.pio_cache_key }}-
if: matrix.id == 'test' || matrix.id == 'clang-tidy'
- name: Install clang tools
run: |
sudo apt-get install \
clang-format-11 \
clang-tidy-11
if: matrix.id == 'clang-tidy' || matrix.id == 'clang-format'
- name: Register problem matchers - name: Register problem matchers
run: | run: |
@ -127,20 +110,45 @@ jobs:
echo "::add-matcher::.github/workflows/matchers/python.json" echo "::add-matcher::.github/workflows/matchers/python.json"
echo "::add-matcher::.github/workflows/matchers/pytest.json" echo "::add-matcher::.github/workflows/matchers/pytest.json"
echo "::add-matcher::.github/workflows/matchers/gcc.json" echo "::add-matcher::.github/workflows/matchers/gcc.json"
echo "::add-matcher::.github/workflows/matchers/clang-tidy.json"
- name: Lint Custom - name: Lint Custom
run: | run: |
script/ci-custom.py script/ci-custom.py
script/build_codeowners.py --check script/build_codeowners.py --check
if: ${{ matrix.id == 'ci-custom' }} if: matrix.id == 'ci-custom'
- name: Lint Python - name: Lint Python
run: script/lint-python run: script/lint-python
if: ${{ matrix.id == 'lint-python' }} if: matrix.id == 'lint-python'
- run: esphome compile ${{ matrix.file }} - run: esphome compile ${{ matrix.file }}
if: ${{ matrix.id == 'test' }} if: matrix.id == 'test'
env:
# Also cache libdeps, store them in a ~/.platformio subfolder
PLATFORMIO_LIBDEPS_DIR: ~/.platformio/libdeps
- name: Run pytest - name: Run pytest
run: | run: |
pytest -vv --tb=native tests pytest -vv --tb=native tests
if: ${{ matrix.id == 'pytest' }} if: matrix.id == 'pytest'
# Also run git-diff-index so that the step is marked as failed on formatting errors,
# since clang-format doesn't do anything but change files if -i is passed.
- name: Run clang-format
run: |
script/clang-format -i
git diff-index --quiet HEAD --
if: matrix.id == 'clang-format'
- name: Run clang-tidy
run: |
script/clang-tidy --all-headers --fix ${{ matrix.options }}
if: matrix.id == 'clang-tidy'
env:
# Also cache libdeps, store them in a ~/.platformio subfolder
PLATFORMIO_LIBDEPS_DIR: ~/.platformio/libdeps
- name: Suggested changes
run: script/ci-suggest-changes
if: always() && (matrix.id == 'clang-tidy' || matrix.id == 'clang-format')

View File

@ -1,100 +0,0 @@
name: Build and publish lint docker image
# Only run when docker paths change
on:
push:
branches: [dev]
paths:
- 'docker/Dockerfile.lint'
- 'requirements.txt'
- 'requirements_optional.txt'
- 'requirements_test.txt'
- 'platformio.ini'
- '.github/workflows/docker-lint-build.yml'
jobs:
deploy-docker:
name: Build and publish docker containers
if: github.repository == 'esphome/esphome'
runs-on: ubuntu-latest
strategy:
matrix:
arch: [amd64, armv7, aarch64]
build_type: ["lint"]
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.9'
- name: Set TAG
run: |
echo "TAG=1.2" >> $GITHUB_ENV
- name: Run build
run: |
docker/build.py \
--tag "${TAG}" \
--arch "${{ matrix.arch }}" \
--build-type "${{ matrix.build_type }}" \
build
- name: Log in to docker hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to the GitHub container registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Run push
run: |
docker/build.py \
--tag "${TAG}" \
--arch "${{ matrix.arch }}" \
--build-type "${{ matrix.build_type }}" \
push
deploy-docker-manifest:
if: github.repository == 'esphome/esphome'
runs-on: ubuntu-latest
needs: [deploy-docker]
strategy:
matrix:
build_type: ["lint"]
steps:
- uses: actions/checkout@v2
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.9'
- name: Set TAG
run: |
echo "TAG=1.2" >> $GITHUB_ENV
- name: Enable experimental manifest support
run: |
mkdir -p ~/.docker
echo "{\"experimental\": \"enabled\"}" > ~/.docker/config.json
- name: Log in to docker hub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKER_USER }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Log in to the GitHub container registry
uses: docker/login-action@v1
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Run manifest
run: |
docker/build.py \
--tag "${TAG}" \
--build-type "${{ matrix.build_type }}" \
manifest

View File

@ -57,7 +57,7 @@ jobs:
strategy: strategy:
matrix: matrix:
arch: [amd64, armv7, aarch64] arch: [amd64, armv7, aarch64]
build_type: ["ha-addon", "docker"] build_type: ["ha-addon", "docker", "lint"]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Set up Python - name: Set up Python
@ -65,13 +65,10 @@ jobs:
with: with:
python-version: '3.9' python-version: '3.9'
- name: Run build - name: Set up Docker Buildx
run: | uses: docker/setup-buildx-action@v1
docker/build.py \ - name: Set up QEMU
--tag "${{ needs.init.outputs.tag }}" \ uses: docker/setup-qemu-action@v1
--arch "${{ matrix.arch }}" \
--build-type "${{ matrix.build_type }}" \
build
- name: Log in to docker hub - name: Log in to docker hub
uses: docker/login-action@v1 uses: docker/login-action@v1
@ -85,13 +82,14 @@ jobs:
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Run push - name: Build and push
run: | run: |
docker/build.py \ docker/build.py \
--tag "${{ needs.init.outputs.tag }}" \ --tag "${{ needs.init.outputs.tag }}" \
--arch "${{ matrix.arch }}" \ --arch "${{ matrix.arch }}" \
--build-type "${{ matrix.build_type }}" \ --build-type "${{ matrix.build_type }}" \
push build \
--push
deploy-docker-manifest: deploy-docker-manifest:
if: github.repository == 'esphome/esphome' if: github.repository == 'esphome/esphome'
@ -99,7 +97,7 @@ jobs:
needs: [init, deploy-docker] needs: [init, deploy-docker]
strategy: strategy:
matrix: matrix:
build_type: ["ha-addon", "docker"] build_type: ["ha-addon", "docker", "lint"]
steps: steps:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- name: Set up Python - name: Set up Python

View File

@ -1,5 +1,55 @@
ARG BUILD_FROM=esphome/esphome-base:latest # Build these with the build.py script
FROM ${BUILD_FROM} # Example:
# python3 docker/build.py --tag dev --arch amd64 --build-type docker build
# One of "docker", "hassio"
ARG BASEIMGTYPE=docker
FROM ghcr.io/hassio-addons/debian-base/amd64:5.0.0 AS base-hassio-amd64
FROM ghcr.io/hassio-addons/debian-base/aarch64:5.0.0 AS base-hassio-arm64
FROM ghcr.io/hassio-addons/debian-base/armv7:5.0.0 AS base-hassio-armv7
FROM debian:bullseye-20210816-slim AS base-docker-amd64
FROM debian:bullseye-20210816-slim AS base-docker-arm64
FROM debian:bullseye-20210816-slim AS base-docker-armv7
# Use TARGETARCH/TARGETVARIANT defined by docker
# https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope
FROM base-${BASEIMGTYPE}-${TARGETARCH}${TARGETVARIANT} AS base
RUN \
apt-get update \
# Use pinned versions so that we get updates with build caching
&& apt-get install -y --no-install-recommends \
python3=3.9.2-3 \
python3-pip=20.3.4-4 \
python3-setuptools=52.0.0-4 \
python3-pil=8.1.2+dfsg-0.3 \
python3-cryptography=3.3.2-1 \
iputils-ping=3:20210202-1 \
git=1:2.30.2-1 \
curl=7.74.0-1.3+b1 \
&& rm -rf \
/tmp/* \
/var/{cache,log}/* \
/var/lib/apt/lists/*
ENV \
# Fix click python3 lang warning https://click.palletsprojects.com/en/7.x/python3/
LANG=C.UTF-8 LC_ALL=C.UTF-8 \
# Store globally installed pio libs in /piolibs
PLATFORMIO_GLOBALLIB_DIR=/piolibs
RUN \
# Ubuntu python3-pip is missing wheel
pip3 install --no-cache-dir \
wheel==0.36.2 \
platformio==5.2.0 \
# Change some platformio settings
&& platformio settings set enable_telemetry No \
&& platformio settings set check_libraries_interval 1000000 \
&& platformio settings set check_platformio_interval 1000000 \
&& platformio settings set check_platforms_interval 1000000 \
&& mkdir -p /piolibs
# First install requirements to leverage caching when requirements don't change # First install requirements to leverage caching when requirements don't change
COPY requirements.txt requirements_optional.txt docker/platformio_install_deps.py platformio.ini / COPY requirements.txt requirements_optional.txt docker/platformio_install_deps.py platformio.ini /
@ -7,9 +57,14 @@ RUN \
pip3 install --no-cache-dir -r /requirements.txt -r /requirements_optional.txt \ pip3 install --no-cache-dir -r /requirements.txt -r /requirements_optional.txt \
&& /platformio_install_deps.py /platformio.ini && /platformio_install_deps.py /platformio.ini
# Then copy esphome and install
COPY . .
RUN pip3 install --no-cache-dir -e . # ======================= docker-type image =======================
FROM base AS docker
# Copy esphome and install
COPY . /esphome
RUN pip3 install --no-cache-dir -e /esphome
# Settings for dashboard # Settings for dashboard
ENV USERNAME="" PASSWORD="" ENV USERNAME="" PASSWORD=""
@ -17,14 +72,74 @@ ENV USERNAME="" PASSWORD=""
# Expose the dashboard to Docker # Expose the dashboard to Docker
EXPOSE 6052 EXPOSE 6052
# Run healthcheck (heartbeat) COPY docker/docker_entrypoint.sh /entrypoint.sh
HEALTHCHECK --interval=30s --timeout=30s \
CMD curl --fail http://localhost:6052 || exit 1
# The directory the user should mount their configuration files to # The directory the user should mount their configuration files to
VOLUME /config
WORKDIR /config WORKDIR /config
# Set entrypoint to esphome so that the user doesn't have to type 'esphome' # Set entrypoint to esphome (via a script) so that the user doesn't have to type 'esphome'
# in every docker command twice # in every docker command twice
ENTRYPOINT ["esphome"] ENTRYPOINT ["/entrypoint.sh"]
# When no arguments given, start the dashboard in the workdir # When no arguments given, start the dashboard in the workdir
CMD ["dashboard", "/config"] CMD ["dashboard", "/config"]
# ======================= hassio-type image =======================
FROM base AS hassio
RUN \
apt-get update \
# Use pinned versions so that we get updates with build caching
&& apt-get install -y --no-install-recommends \
nginx=1.18.0-6.1 \
&& rm -rf \
/tmp/* \
/var/{cache,log}/* \
/var/lib/apt/lists/*
ARG BUILD_VERSION=dev
# Copy root filesystem
COPY docker/hassio-rootfs/ /
# Copy esphome and install
COPY . /esphome
RUN pip3 install --no-cache-dir -e /esphome
# Labels
LABEL \
io.hass.name="ESPHome" \
io.hass.description="Manage and program ESP8266/ESP32 microcontrollers through YAML configuration files" \
io.hass.type="addon" \
io.hass.version="${BUILD_VERSION}"
# io.hass.arch is inherited from addon-debian-base
# ======================= lint-type image =======================
FROM base AS lint
ENV \
PLATFORMIO_CORE_DIR=/esphome/.temp/platformio
RUN \
apt-get update \
# Use pinned versions so that we get updates with build caching
&& apt-get install -y --no-install-recommends \
clang-format-11=1:11.0.1-2 \
clang-tidy-11=1:11.0.1-2 \
patch=2.7.6-7 \
software-properties-common=0.96.20.2-2.1 \
nano=5.4-2 \
build-essential=12.9 \
python3-dev=3.9.2-3 \
&& rm -rf \
/tmp/* \
/var/{cache,log}/* \
/var/lib/apt/lists/*
VOLUME ["/esphome"]
WORKDIR /esphome

View File

@ -1 +0,0 @@
FROM esphome/esphome-lint:1.2

View File

@ -1,25 +0,0 @@
ARG BUILD_FROM=esphome/esphome-hassio-base:latest
FROM ${BUILD_FROM}
# First install requirements to leverage caching when requirements don't change
COPY requirements.txt requirements_optional.txt docker/platformio_install_deps.py platformio.ini /
RUN \
pip3 install --no-cache-dir -r /requirements.txt -r /requirements_optional.txt \
&& /platformio_install_deps.py /platformio.ini
# Copy root filesystem
COPY docker/rootfs/ /
# Then copy esphome and install
COPY . /opt/esphome/
RUN pip3 install --no-cache-dir -e /opt/esphome
# Build arguments
ARG BUILD_VERSION=dev
# Labels
LABEL \
io.hass.name="ESPHome" \
io.hass.description="Manage and program ESP8266/ESP32 microcontrollers through YAML configuration files" \
io.hass.type="addon" \
io.hass.version=${BUILD_VERSION}

View File

@ -1,10 +0,0 @@
ARG BUILD_FROM=esphome/esphome-lint-base:latest
FROM ${BUILD_FROM}
COPY requirements.txt requirements_optional.txt requirements_test.txt docker/platformio_install_deps.py platformio.ini /
RUN \
pip3 install --no-cache-dir -r /requirements.txt -r /requirements_optional.txt -r /requirements_test.txt \
&& /platformio_install_deps.py /platformio.ini
VOLUME ["/esphome"]
WORKDIR /esphome

View File

@ -2,7 +2,7 @@
from dataclasses import dataclass from dataclasses import dataclass
import subprocess import subprocess
import argparse import argparse
import platform from platform import machine
import shlex import shlex
import re import re
import sys import sys
@ -24,9 +24,6 @@ TYPE_LINT = 'lint'
TYPES = [TYPE_DOCKER, TYPE_HA_ADDON, TYPE_LINT] TYPES = [TYPE_DOCKER, TYPE_HA_ADDON, TYPE_LINT]
BASE_VERSION = "4.2.0"
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument("--tag", type=str, required=True, help="The main docker tag to push to. If a version number also adds latest and/or beta tag") parser.add_argument("--tag", type=str, required=True, help="The main docker tag to push to. If a version number also adds latest and/or beta tag")
parser.add_argument("--arch", choices=ARCHS, required=False, help="The architecture to build for") parser.add_argument("--arch", choices=ARCHS, required=False, help="The architecture to build for")
@ -34,27 +31,17 @@ parser.add_argument("--build-type", choices=TYPES, required=True, help="The type
parser.add_argument("--dry-run", action="store_true", help="Don't run any commands, just print them") parser.add_argument("--dry-run", action="store_true", help="Don't run any commands, just print them")
subparsers = parser.add_subparsers(help="Action to perform", dest="command", required=True) subparsers = parser.add_subparsers(help="Action to perform", dest="command", required=True)
build_parser = subparsers.add_parser("build", help="Build the image") build_parser = subparsers.add_parser("build", help="Build the image")
push_parser = subparsers.add_parser("push", help="Tag the already built image and push it to docker hub") build_parser.add_argument("--push", help="Also push the images")
manifest_parser = subparsers.add_parser("manifest", help="Create a manifest from already pushed images") manifest_parser = subparsers.add_parser("manifest", help="Create a manifest from already pushed images")
# only lists some possibilities, doesn't have to be perfect
# https://stackoverflow.com/a/45125525
UNAME_TO_ARCH = {
"x86_64": ARCH_AMD64,
"aarch64": ARCH_AARCH64,
"aarch64_be": ARCH_AARCH64,
"arm": ARCH_ARMV7,
}
@dataclass(frozen=True) @dataclass(frozen=True)
class DockerParams: class DockerParams:
build_from: str
build_to: str build_to: str
manifest_to: str manifest_to: str
dockerfile: str baseimgtype: str
platform: str
target: str
@classmethod @classmethod
def for_type_arch(cls, build_type, arch): def for_type_arch(cls, build_type, arch):
@ -63,18 +50,28 @@ class DockerParams:
TYPE_HA_ADDON: "esphome/esphome-hassio", TYPE_HA_ADDON: "esphome/esphome-hassio",
TYPE_LINT: "esphome/esphome-lint" TYPE_LINT: "esphome/esphome-lint"
}[build_type] }[build_type]
build_from = f"ghcr.io/{prefix}-base-{arch}:{BASE_VERSION}"
build_to = f"{prefix}-{arch}" build_to = f"{prefix}-{arch}"
dockerfile = { baseimgtype = {
TYPE_DOCKER: "docker/Dockerfile", TYPE_DOCKER: "docker",
TYPE_HA_ADDON: "docker/Dockerfile.hassio", TYPE_HA_ADDON: "hassio",
TYPE_LINT: "docker/Dockerfile.lint", TYPE_LINT: "docker",
}[build_type]
platform = {
ARCH_AMD64: "linux/amd64",
ARCH_ARMV7: "linux/arm/v7",
ARCH_AARCH64: "linux/arm64",
}[arch]
target = {
TYPE_DOCKER: "docker",
TYPE_HA_ADDON: "hassio",
TYPE_LINT: "lint",
}[build_type] }[build_type]
return cls( return cls(
build_from=build_from,
build_to=build_to, build_to=build_to,
manifest_to=prefix, manifest_to=prefix,
dockerfile=dockerfile baseimgtype=baseimgtype,
platform=platform,
target=target,
) )
@ -117,41 +114,26 @@ def main():
CHANNEL_RELEASE: "latest", CHANNEL_RELEASE: "latest",
}[channel] }[channel]
cache_img = f"ghcr.io/{params.build_to}:{cache_tag}" cache_img = f"ghcr.io/{params.build_to}:{cache_tag}"
run_command("docker", "pull", cache_img, ignore_error=True)
# 2. register QEMU binfmt (if not host arch)
is_native = UNAME_TO_ARCH.get(platform.machine()) == args.arch
if not is_native:
run_command(
"docker", "run", "--rm", "--privileged", "multiarch/qemu-user-static:5.2.0-2",
"--reset", "-p", "yes"
)
# 3. build
run_command(
"docker", "build",
"--build-arg", f"BUILD_FROM={params.build_from}",
"--build-arg", f"BUILD_VERSION={args.tag}",
"--tag", f"{params.build_to}:{args.tag}",
"--cache-from", cache_img,
"--file", params.dockerfile,
"."
)
elif args.command == "push":
params = DockerParams.for_type_arch(args.build_type, args.arch)
imgs = [f"{params.build_to}:{tag}" for tag in tags_to_push] imgs = [f"{params.build_to}:{tag}" for tag in tags_to_push]
imgs += [f"ghcr.io/{params.build_to}:{tag}" for tag in tags_to_push] imgs += [f"ghcr.io/{params.build_to}:{tag}" for tag in tags_to_push]
src = imgs[0]
# 1. tag images # 3. build
for img in imgs[1:]: cmd = [
run_command( "docker", "buildx", "build",
"docker", "tag", src, img "--build-arg", f"BASEIMGTYPE={params.baseimgtype}",
) "--build-arg", f"BUILD_VERSION={args.tag}",
# 2. push images "--cache-from", cache_img,
"--file", "docker/Dockerfile",
"--platform", params.platform,
"--target", params.target,
]
for img in imgs: for img in imgs:
run_command( cmd += ["--tag", img]
"docker", "push", img if args.push:
) cmd.append("--push")
run_command(*cmd, ".")
elif args.command == "manifest": elif args.command == "manifest":
manifest = DockerParams.for_type_arch(args.build_type, ARCH_AMD64).manifest_to manifest = DockerParams.for_type_arch(args.build_type, ARCH_AMD64).manifest_to

18
docker/docker_entrypoint.sh Executable file
View File

@ -0,0 +1,18 @@
#!/bin/bash
# If /cache is mounted, use that as PIO's coredir
# otherwise use path in /config (so that PIO packages aren't downloaded on each compile)
if [[ -d /cache ]]; then
export PLATFORMIO_CORE_DIR=/cache/platformio
else
export PLATFORMIO_CORE_DIR=/config/.esphome/platformio
fi
if [[ ! -d "${PLATFORMIO_CORE_DIR}" ]]; then
echo "Creating cache directory ${PLATFORMIO_CORE_DIR}"
echo "You can change this behavior by mounting a directory to the container's /cache directory."
mkdir -p "${PLATFORMIO_CORE_DIR}"
fi
exec esphome "$@"

View File

@ -0,0 +1,9 @@
#!/usr/bin/with-contenv bashio
# ==============================================================================
# Community Hass.io Add-ons: ESPHome
# This files creates all directories used by esphome
# ==============================================================================
PLATFORMIO_CORE_DIR=/data/cache/platformio
mkdir -p "${PLATFORMIO_CORE_DIR}"

View File

@ -22,5 +22,8 @@ if bashio::config.has_value 'relative_url'; then
export ESPHOME_DASHBOARD_RELATIVE_URL=$(bashio::config 'relative_url') export ESPHOME_DASHBOARD_RELATIVE_URL=$(bashio::config 'relative_url')
fi fi
export PLATFORMIO_CORE_DIR=/data/cache/platformio
export PLATFORMIO_GLOBALLIB_DIR=/piolibs
bashio::log.info "Starting ESPHome dashboard..." bashio::log.info "Starting ESPHome dashboard..."
exec esphome dashboard /config/esphome --socket /var/run/esphome.sock --hassio exec esphome dashboard /config/esphome --socket /var/run/esphome.sock --hassio

View File

@ -67,8 +67,8 @@ FILTER_PLATFORMIO_LINES = [
def run_platformio_cli(*args, **kwargs) -> Union[str, int]: def run_platformio_cli(*args, **kwargs) -> Union[str, int]:
os.environ["PLATFORMIO_FORCE_COLOR"] = "true" os.environ["PLATFORMIO_FORCE_COLOR"] = "true"
os.environ["PLATFORMIO_BUILD_DIR"] = os.path.abspath(CORE.relative_pioenvs_path()) os.environ["PLATFORMIO_BUILD_DIR"] = os.path.abspath(CORE.relative_pioenvs_path())
os.environ["PLATFORMIO_LIBDEPS_DIR"] = os.path.abspath( os.environ.setdefault(
CORE.relative_piolibdeps_path() "PLATFORMIO_LIBDEPS_DIR", os.path.abspath(CORE.relative_piolibdeps_path())
) )
cmd = ["platformio"] + list(args) cmd = ["platformio"] + list(args)

View File

@ -217,7 +217,9 @@ def lint_ext_check(fname):
) )
@lint_file_check(exclude=["docker/rootfs/*", "docker/*.py", "script/*", "setup.py"]) @lint_file_check(
exclude=["**.sh", "docker/hassio-rootfs/**", "docker/*.py", "script/*", "setup.py"]
)
def lint_executable_bit(fname): def lint_executable_bit(fname):
ex = EXECUTABLE_BIT[fname] ex = EXECUTABLE_BIT[fname]
if ex != 100644: if ex != 100644:

View File

@ -12,7 +12,6 @@ if [ ! -f $cpp_json ]; then
pio init --ide vscode --silent pio init --ide vscode --silent
sed -i "/\\/workspaces\/esphome\/include/d" $cpp_json sed -i "/\\/workspaces\/esphome\/include/d" $cpp_json
else else
echo "Cpp environment already configured. To reconfigure it you could run one the following commands:" echo "Cpp environment already configured. To reconfigure it you can run one the following commands:"
echo " pio init --ide vscode -e livingroom8266" echo " pio init --ide vscode"
echo " pio init --ide vscode -e livingroom32"
fi fi