The setup with separate container doesn't make sense, sharing the cache directly from bash image build simplifies the entire caching setup.
41 lines
3.2 KiB
YAML
41 lines
3.2 KiB
YAML
on:
|
|
push:
|
|
schedule:
|
|
- cron: '0 5 * * *'
|
|
jobs:
|
|
build-kde:
|
|
runs-on: shell
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- run: cd ${{ env.GITHUB_WORKSPACE }}
|
|
- run: podman login -u ${{ vars.REGISTRY_USERNAME }} -p ${{ secrets.REGISTRY_PASSWORD }} ${{ vars.REGISTRY_DOMAIN }}
|
|
# base
|
|
- run: podman build . -f Dockerfile.kde --no-cache --pull=always -v ${PWD}/cache:/var/cache/libdnf5:z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main
|
|
- run: podman push --compression-format=zstd:chunked --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main
|
|
# base + nvidia
|
|
- run: podman build . -f Dockerfile.kde-nvidia --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:O --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia
|
|
- run: podman push --compression-format=zstd:chunked --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia
|
|
# base + nvidia + ver4a's configuration
|
|
- run: podman build . -f Dockerfile.kde-nvidia-ver4a --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:O --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a
|
|
- run: podman push --compression-format=zstd:chunked --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a
|
|
- if: '!cancelled()'
|
|
run: >
|
|
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main && podman image prune -f &&
|
|
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia && podman image prune -f &&
|
|
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-kde:main-nvidia-ver4a && podman image prune -f
|
|
build-gnome:
|
|
runs-on: shell
|
|
steps:
|
|
- uses: actions/checkout@v4
|
|
- run: cd ${{ env.GITHUB_WORKSPACE }}
|
|
- run: podman login -u ${{ vars.REGISTRY_USERNAME }} -p ${{ secrets.REGISTRY_PASSWORD }} ${{ vars.REGISTRY_DOMAIN }}
|
|
# base
|
|
- run: podman build . -f Dockerfile.gnome --no-cache --pull=always -v ${PWD}/cache:/var/cache/libdnf5:z --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main
|
|
- run: podman push --compression-format=zstd:chunked --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main
|
|
# base + nvidia
|
|
- run: podman build . -f Dockerfile.gnome-nvidia --no-cache --pull=never -v ${PWD}/cache:/var/cache/libdnf5:O --squash -t ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia
|
|
- run: podman push --compression-format=zstd:chunked --compression-level=${{ vars.COMPRESSION_LEVEL }} ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia
|
|
- if: '!cancelled()'
|
|
run: >
|
|
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main && podman image prune -f &&
|
|
podman image rm -f ${{ vars.REGISTRY_DOMAIN }}/ver4a/onc-gnome:main-nvidia && podman image prune -f
|