1
0
Fork 0
ostree-native-containers/.forgejo/workflows/build-image.yaml
ver4a dc28e9f75e
Some checks failed
/ build-kde (push) Failing after 2m9s
/ build-gnome (push) Failing after 2m4s
Use flock instead of checking for running pulls using pgrep
The previous solution is potentially racey, it only prevents starting a
pull if there is already one running, but there is still a tiny window
of time where both pulls could start after the checks and run at the
same time. This new solution should fill that gap, since the locking
should be atomic.
2024-11-18 11:05:48 +01:00

47 lines
3.6 KiB
YAML

on:
push:
schedule:
- cron: '0 5 * * *'
jobs:
build-kde:
runs-on: shell
steps:
- uses: actions/checkout@v4
- run: cd ${{ env.GITHUB_WORKSPACE }}
- run: podman login -u ${{ vars.REGISTRY_USERNAME }} -p ${{ secrets.REGISTRY_PASSWORD }} ${{ vars.REGISTRY_DOMAIN }}
# base
- run: mkdir cache
# Waits for all "podman pull"s to exit before starting a pull
- run: flock -x /tmp/CI-podman-pull-lock -c 'podman pull quay.io/fedora-ostree-desktops/kinoite:41'
- run: podman build . -f Dockerfile.kde --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:Z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main
- run: podman push --compression-format=zstd --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main
# base + nvidia
- run: podman build . -f Dockerfile.kde-nvidia --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:Z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia
- run: podman push --compression-format=zstd --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia
# base + nvidia + ver4a's configuration
- run: podman build . -f Dockerfile.kde-nvidia-ver4a --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:Z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a
- run: podman push --compression-format=zstd --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a
- if: '!cancelled()'
run: >
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main && podman image prune -f &&
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia && podman image prune -f &&
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a && podman image prune -f
build-gnome:
runs-on: shell
steps:
- uses: actions/checkout@v4
- run: cd ${{ env.GITHUB_WORKSPACE }}
- run: podman login -u ${{ vars.REGISTRY_USERNAME }} -p ${{ secrets.REGISTRY_PASSWORD }} ${{ vars.REGISTRY_DOMAIN }}
# base
- run: mkdir cache
# Waits for all "podman pull"s to exit before starting a pull
- run: flock -x /tmp/CI-podman-pull-lock -c 'podman pull quay.io/fedora-ostree-desktops/silverblue:41'
- run: podman build . -f Dockerfile.gnome --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:Z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main
- run: podman push --compression-format=zstd --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main
# base + nvidia
- run: podman build . -f Dockerfile.gnome-nvidia --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:Z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia
- run: podman push --compression-format=zstd --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia
- if: '!cancelled()'
run: >
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main && podman image prune -f &&
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia && podman image prune -f