diff --git a/.direnv/bin/nix-direnv-reload b/.direnv/bin/nix-direnv-reload deleted file mode 100755 index 683725d..0000000 --- a/.direnv/bin/nix-direnv-reload +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env bash -set -e -if [[ ! -d "/home/mya/thesis" ]]; then - echo "Cannot find source directory; Did you move it?" - echo "(Looking for "/home/mya/thesis")" - echo 'Cannot force reload with this script - use "direnv reload" manually and then try again' - exit 1 -fi - -# rebuild the cache forcefully -_nix_direnv_force_reload=1 direnv exec "/home/mya/thesis" true - -# Update the mtime for .envrc. -# This will cause direnv to reload again - but without re-building. -touch "/home/mya/thesis/.envrc" - -# Also update the timestamp of whatever profile_rc we have. -# This makes sure that we know we are up to date. -touch -r "/home/mya/thesis/.envrc" "/home/mya/thesis/.direnv"/*.rc diff --git a/.direnv/flake-inputs/72x8cqlqwfpj59mhy3mk2rh12zacddmr-source b/.direnv/flake-inputs/72x8cqlqwfpj59mhy3mk2rh12zacddmr-source deleted file mode 120000 index d5a1ddc..0000000 --- a/.direnv/flake-inputs/72x8cqlqwfpj59mhy3mk2rh12zacddmr-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/72x8cqlqwfpj59mhy3mk2rh12zacddmr-source \ No newline at end of file diff --git a/.direnv/flake-inputs/8mf17vg4416wkyd7kc1b4yp4pgvsi0qy-source b/.direnv/flake-inputs/8mf17vg4416wkyd7kc1b4yp4pgvsi0qy-source deleted file mode 120000 index ef7441f..0000000 --- a/.direnv/flake-inputs/8mf17vg4416wkyd7kc1b4yp4pgvsi0qy-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/8mf17vg4416wkyd7kc1b4yp4pgvsi0qy-source \ No newline at end of file diff --git a/.direnv/flake-inputs/i9syjxw0famqkabw2d01n723m3sc7vdn-source b/.direnv/flake-inputs/i9syjxw0famqkabw2d01n723m3sc7vdn-source deleted file mode 120000 index 4be6498..0000000 --- a/.direnv/flake-inputs/i9syjxw0famqkabw2d01n723m3sc7vdn-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/i9syjxw0famqkabw2d01n723m3sc7vdn-source \ No newline at end of file diff --git a/.direnv/flake-inputs/na7sykizsgkzh9i3wc8m8pz5xfqib2rv-source b/.direnv/flake-inputs/na7sykizsgkzh9i3wc8m8pz5xfqib2rv-source deleted file mode 120000 index 9cfec12..0000000 --- a/.direnv/flake-inputs/na7sykizsgkzh9i3wc8m8pz5xfqib2rv-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/na7sykizsgkzh9i3wc8m8pz5xfqib2rv-source \ No newline at end of file diff --git a/.direnv/flake-inputs/rylyv28gslxqq5pxzr4llgrp2asq898n-source b/.direnv/flake-inputs/rylyv28gslxqq5pxzr4llgrp2asq898n-source deleted file mode 120000 index a3e4ef9..0000000 --- a/.direnv/flake-inputs/rylyv28gslxqq5pxzr4llgrp2asq898n-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/rylyv28gslxqq5pxzr4llgrp2asq898n-source \ No newline at end of file diff --git a/.direnv/flake-inputs/yj1wxm9hh8610iyzqnz75kvs6xl8j3my-source b/.direnv/flake-inputs/yj1wxm9hh8610iyzqnz75kvs6xl8j3my-source deleted file mode 120000 index f17959f..0000000 --- a/.direnv/flake-inputs/yj1wxm9hh8610iyzqnz75kvs6xl8j3my-source +++ /dev/null @@ -1 +0,0 @@ -/nix/store/yj1wxm9hh8610iyzqnz75kvs6xl8j3my-source \ No newline at end of file diff --git a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa deleted file mode 120000 index bf24490..0000000 --- a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa +++ /dev/null @@ -1 +0,0 @@ -/nix/store/zmfxzr0ln2qgqg72g8a66v5jr1lqmwd1-nix-shell-env \ No newline at end of file diff --git a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc b/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc deleted file mode 100644 index 57db8f0..0000000 --- a/.direnv/flake-profile-a5d5b61aa8a61b7d9d765e1daf971a9a578f1cfa.rc +++ /dev/null @@ -1,1881 +0,0 @@ -unset shellHook -PATH=${PATH:-} -nix_saved_PATH="$PATH" -XDG_DATA_DIRS=${XDG_DATA_DIRS:-} -nix_saved_XDG_DATA_DIRS="$XDG_DATA_DIRS" -AR='ar' -export AR -AS='as' -export AS -BASH='/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin/bash' -CC='gcc' -export CC -CONFIG_SHELL='/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin/bash' -export CONFIG_SHELL -CXX='g++' -export CXX -HOSTTYPE='x86_64' -HOST_PATH='/nix/store/5486924x9z8h46c59szch55zq7s99jhj-typst-0.11.1/bin:/nix/store/ysqx2xfzygv2rxl7nxnw48276z5ckppn-coreutils-9.5/bin:/nix/store/36rvynxwln7iz0qq3k1v3r1mna8bma8s-findutils-4.9.0/bin:/nix/store/0fw4a3z849azkhyjxnpxbygj4g5qhd0v-diffutils-3.10/bin:/nix/store/7xwbkzfrs6flyvjyvd23m8r2mlnycinq-gnused-4.9/bin:/nix/store/d9xr7s3z0r8rf0ba22q6ilqv68agymdb-gnugrep-3.11/bin:/nix/store/9fklixgxv2a9xxpbn504mpifnbz94rb5-gawk-5.2.2/bin:/nix/store/iz0gv4jb2b8bkb9krmha68b00b24p3rl-gnutar-1.35/bin:/nix/store/g14bdsvp1lqqhiyd9g88nyjhp16hs9wj-gzip-1.13/bin:/nix/store/fmk8lz57yy64hvz9rp58krlzb3q9df70-bzip2-1.0.8-bin/bin:/nix/store/3hnf34qxi3h6c62dw95crgxdxvibasml-gnumake-4.4.1/bin:/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin:/nix/store/f46k9pgiq88v6yh76wxb3dv1ggpn6fml-patch-2.7.6/bin:/nix/store/5srqwq17md7w5ln001iaxhxpd0839d8r-xz-5.4.7-bin/bin:/nix/store/j4aja22d2ngbpmi4vdpdd2dvp1hm7dvr-file-5.45/bin' -export HOST_PATH -IFS=' -' -IN_NIX_SHELL='impure' -export IN_NIX_SHELL -LD='ld' -export LD -LINENO='76' -MACHTYPE='x86_64-pc-linux-gnu' -NIX_BINTOOLS='/nix/store/bl7gf4crmr480jfm8raswv9xn2v1qwiw-binutils-wrapper-2.41' -export NIX_BINTOOLS -NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu='1' -export NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu -NIX_BUILD_CORES='8' -export NIX_BUILD_CORES -NIX_CC='/nix/store/r73z9i18vbjcph7k2f3isrysxzx6sqjx-gcc-wrapper-13.2.0' -export NIX_CC -NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu='1' -export NIX_CC_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu -NIX_CFLAGS_COMPILE=' -frandom-seed=zmfxzr0ln2' -export NIX_CFLAGS_COMPILE -NIX_ENFORCE_NO_NATIVE='1' -export NIX_ENFORCE_NO_NATIVE -NIX_HARDENING_ENABLE='bindnow format fortify fortify3 pic relro stackprotector strictoverflow' -export NIX_HARDENING_ENABLE -NIX_LDFLAGS='-rpath /home/mya/thesis/outputs/out/lib ' -export NIX_LDFLAGS -NIX_NO_SELF_RPATH='1' -NIX_STORE='/nix/store' -export NIX_STORE -NM='nm' -export NM -OBJCOPY='objcopy' -export OBJCOPY -OBJDUMP='objdump' -export OBJDUMP -OLDPWD='' -export OLDPWD -OPTERR='1' -OSTYPE='linux-gnu' -PATH='/nix/store/cy6j2yii4y9jlbqr10k190v0b3jdbwl3-patchelf-0.15.0/bin:/nix/store/r73z9i18vbjcph7k2f3isrysxzx6sqjx-gcc-wrapper-13.2.0/bin:/nix/store/llmjvk4i2yncv8xqdvs4382wr3kgdmvp-gcc-13.2.0/bin:/nix/store/z5gvbj7p7dr89k42ncrl2mlv5v3ymjlp-glibc-2.39-52-bin/bin:/nix/store/ysqx2xfzygv2rxl7nxnw48276z5ckppn-coreutils-9.5/bin:/nix/store/bl7gf4crmr480jfm8raswv9xn2v1qwiw-binutils-wrapper-2.41/bin:/nix/store/7v7g86ml0ri171gfcrs1d442px5bi1p3-binutils-2.41/bin:/nix/store/5486924x9z8h46c59szch55zq7s99jhj-typst-0.11.1/bin:/nix/store/ysqx2xfzygv2rxl7nxnw48276z5ckppn-coreutils-9.5/bin:/nix/store/36rvynxwln7iz0qq3k1v3r1mna8bma8s-findutils-4.9.0/bin:/nix/store/0fw4a3z849azkhyjxnpxbygj4g5qhd0v-diffutils-3.10/bin:/nix/store/7xwbkzfrs6flyvjyvd23m8r2mlnycinq-gnused-4.9/bin:/nix/store/d9xr7s3z0r8rf0ba22q6ilqv68agymdb-gnugrep-3.11/bin:/nix/store/9fklixgxv2a9xxpbn504mpifnbz94rb5-gawk-5.2.2/bin:/nix/store/iz0gv4jb2b8bkb9krmha68b00b24p3rl-gnutar-1.35/bin:/nix/store/g14bdsvp1lqqhiyd9g88nyjhp16hs9wj-gzip-1.13/bin:/nix/store/fmk8lz57yy64hvz9rp58krlzb3q9df70-bzip2-1.0.8-bin/bin:/nix/store/3hnf34qxi3h6c62dw95crgxdxvibasml-gnumake-4.4.1/bin:/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin:/nix/store/f46k9pgiq88v6yh76wxb3dv1ggpn6fml-patch-2.7.6/bin:/nix/store/5srqwq17md7w5ln001iaxhxpd0839d8r-xz-5.4.7-bin/bin:/nix/store/j4aja22d2ngbpmi4vdpdd2dvp1hm7dvr-file-5.45/bin' -export PATH -PS4='+ ' -RANLIB='ranlib' -export RANLIB -READELF='readelf' -export READELF -SIZE='size' -export SIZE -SOURCE_DATE_EPOCH='315532800' -export SOURCE_DATE_EPOCH -STRINGS='strings' -export STRINGS -STRIP='strip' -export STRIP -XDG_DATA_DIRS='/nix/store/cy6j2yii4y9jlbqr10k190v0b3jdbwl3-patchelf-0.15.0/share' -export XDG_DATA_DIRS -__structuredAttrs='' -export __structuredAttrs -_substituteStream_has_warned_replace_deprecation='false' -buildInputs='/nix/store/5486924x9z8h46c59szch55zq7s99jhj-typst-0.11.1' -export buildInputs -buildPhase='{ echo "------------------------------------------------------------"; - echo " WARNING: the existence of this path is not guaranteed."; - echo " It is an internal implementation detail for pkgs.mkShell."; - echo "------------------------------------------------------------"; - echo; - # Record all build inputs as runtime dependencies - export; -} >> "$out" -' -export buildPhase -builder='/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin/bash' -export builder -cmakeFlags='' -export cmakeFlags -configureFlags='' -export configureFlags -defaultBuildInputs='' -defaultNativeBuildInputs='/nix/store/cy6j2yii4y9jlbqr10k190v0b3jdbwl3-patchelf-0.15.0 /nix/store/l4sqcq84ibmissf3427f34mjlzx28h20-update-autotools-gnu-config-scripts-hook /nix/store/h9lc1dpi14z7is86ffhl3ld569138595-audit-tmpdir.sh /nix/store/m54bmrhj6fqz8nds5zcj97w9s9bckc9v-compress-man-pages.sh /nix/store/wgrbkkaldkrlrni33ccvm3b6vbxzb656-make-symlinks-relative.sh /nix/store/5yzw0vhkyszf2d179m0qfkgxmp5wjjx4-move-docs.sh /nix/store/fyaryjvghbkpfnsyw97hb3lyb37s1pd6-move-lib64.sh /nix/store/kd4xwxjpjxi71jkm6ka0np72if9rm3y0-move-sbin.sh /nix/store/pag6l61paj1dc9sv15l7bm5c17xn5kyk-move-systemd-user-units.sh /nix/store/jivxp510zxakaaic7qkrb7v1dd2rdbw9-multiple-outputs.sh /nix/store/ilaf1w22bxi6jsi45alhmvvdgy4ly3zs-patch-shebangs.sh /nix/store/cickvswrvann041nqxb0rxilc46svw1n-prune-libtool-files.sh /nix/store/xyff06pkhki3qy1ls77w10s0v79c9il0-reproducible-builds.sh /nix/store/ngg1cv31c8c7bcm2n8ww4g06nq7s4zhm-set-source-date-epoch-to-latest.sh /nix/store/gps9qrh99j7g02840wv5x78ykmz30byp-strip.sh /nix/store/r73z9i18vbjcph7k2f3isrysxzx6sqjx-gcc-wrapper-13.2.0' -depsBuildBuild='' -export depsBuildBuild -depsBuildBuildPropagated='' -export depsBuildBuildPropagated -depsBuildTarget='' -export depsBuildTarget -depsBuildTargetPropagated='' -export depsBuildTargetPropagated -depsHostHost='' -export depsHostHost -depsHostHostPropagated='' -export depsHostHostPropagated -depsTargetTarget='' -export depsTargetTarget -depsTargetTargetPropagated='' -export depsTargetTargetPropagated -doCheck='' -export doCheck -doInstallCheck='' -export doInstallCheck -dontAddDisableDepTrack='1' -export dontAddDisableDepTrack -declare -a envBuildBuildHooks=() -declare -a envBuildHostHooks=() -declare -a envBuildTargetHooks=() -declare -a envHostHostHooks=('ccWrapper_addCVars' 'bintoolsWrapper_addLDVars' ) -declare -a envHostTargetHooks=('ccWrapper_addCVars' 'bintoolsWrapper_addLDVars' ) -declare -a envTargetTargetHooks=() -declare -a fixupOutputHooks=('if [ -z "${dontPatchELF-}" ]; then patchELF "$prefix"; fi' 'if [[ -z "${noAuditTmpdir-}" && -e "$prefix" ]]; then auditTmpdir "$prefix"; fi' 'if [ -z "${dontGzipMan-}" ]; then compressManPages "$prefix"; fi' '_moveLib64' '_moveSbin' '_moveSystemdUserUnits' 'patchShebangsAuto' '_pruneLibtoolFiles' '_doStrip' ) -guess='8' -initialPath='/nix/store/ysqx2xfzygv2rxl7nxnw48276z5ckppn-coreutils-9.5 /nix/store/36rvynxwln7iz0qq3k1v3r1mna8bma8s-findutils-4.9.0 /nix/store/0fw4a3z849azkhyjxnpxbygj4g5qhd0v-diffutils-3.10 /nix/store/7xwbkzfrs6flyvjyvd23m8r2mlnycinq-gnused-4.9 /nix/store/d9xr7s3z0r8rf0ba22q6ilqv68agymdb-gnugrep-3.11 /nix/store/9fklixgxv2a9xxpbn504mpifnbz94rb5-gawk-5.2.2 /nix/store/iz0gv4jb2b8bkb9krmha68b00b24p3rl-gnutar-1.35 /nix/store/g14bdsvp1lqqhiyd9g88nyjhp16hs9wj-gzip-1.13 /nix/store/fmk8lz57yy64hvz9rp58krlzb3q9df70-bzip2-1.0.8-bin /nix/store/3hnf34qxi3h6c62dw95crgxdxvibasml-gnumake-4.4.1 /nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26 /nix/store/f46k9pgiq88v6yh76wxb3dv1ggpn6fml-patch-2.7.6 /nix/store/5srqwq17md7w5ln001iaxhxpd0839d8r-xz-5.4.7-bin /nix/store/j4aja22d2ngbpmi4vdpdd2dvp1hm7dvr-file-5.45' -mesonFlags='' -export mesonFlags -name='nix-shell-env' -export name -nativeBuildInputs='' -export nativeBuildInputs -out='/home/mya/thesis/outputs/out' -export out -outputBin='out' -outputDev='out' -outputDevdoc='REMOVE' -outputDevman='out' -outputDoc='out' -outputInclude='out' -outputInfo='out' -outputLib='out' -outputMan='out' -outputs='out' -export outputs -patches='' -export patches -phases='buildPhase' -export phases -pkg='/nix/store/r73z9i18vbjcph7k2f3isrysxzx6sqjx-gcc-wrapper-13.2.0' -declare -a pkgsBuildBuild=() -declare -a pkgsBuildHost=('/nix/store/cy6j2yii4y9jlbqr10k190v0b3jdbwl3-patchelf-0.15.0' '/nix/store/l4sqcq84ibmissf3427f34mjlzx28h20-update-autotools-gnu-config-scripts-hook' '/nix/store/h9lc1dpi14z7is86ffhl3ld569138595-audit-tmpdir.sh' '/nix/store/m54bmrhj6fqz8nds5zcj97w9s9bckc9v-compress-man-pages.sh' '/nix/store/wgrbkkaldkrlrni33ccvm3b6vbxzb656-make-symlinks-relative.sh' '/nix/store/5yzw0vhkyszf2d179m0qfkgxmp5wjjx4-move-docs.sh' '/nix/store/fyaryjvghbkpfnsyw97hb3lyb37s1pd6-move-lib64.sh' '/nix/store/kd4xwxjpjxi71jkm6ka0np72if9rm3y0-move-sbin.sh' '/nix/store/pag6l61paj1dc9sv15l7bm5c17xn5kyk-move-systemd-user-units.sh' '/nix/store/jivxp510zxakaaic7qkrb7v1dd2rdbw9-multiple-outputs.sh' '/nix/store/ilaf1w22bxi6jsi45alhmvvdgy4ly3zs-patch-shebangs.sh' '/nix/store/cickvswrvann041nqxb0rxilc46svw1n-prune-libtool-files.sh' '/nix/store/xyff06pkhki3qy1ls77w10s0v79c9il0-reproducible-builds.sh' '/nix/store/ngg1cv31c8c7bcm2n8ww4g06nq7s4zhm-set-source-date-epoch-to-latest.sh' '/nix/store/gps9qrh99j7g02840wv5x78ykmz30byp-strip.sh' '/nix/store/r73z9i18vbjcph7k2f3isrysxzx6sqjx-gcc-wrapper-13.2.0' '/nix/store/bl7gf4crmr480jfm8raswv9xn2v1qwiw-binutils-wrapper-2.41' ) -declare -a pkgsBuildTarget=() -declare -a pkgsHostHost=() -declare -a pkgsHostTarget=('/nix/store/5486924x9z8h46c59szch55zq7s99jhj-typst-0.11.1' ) -declare -a pkgsTargetTarget=() -declare -a postFixupHooks=('_makeSymlinksRelativeInAllOutputs' '_multioutPropagateDev' ) -declare -a postUnpackHooks=('_updateSourceDateEpochFromSourceRoot' ) -declare -a preConfigureHooks=('_multioutConfig' ) -preConfigurePhases=' updateAutotoolsGnuConfigScriptsPhase' -declare -a preFixupHooks=('_moveToShare' '_multioutDocs' '_multioutDevs' ) -preferLocalBuild='1' -export preferLocalBuild -prefix='/home/mya/thesis/outputs/out' -declare -a propagatedBuildDepFiles=('propagated-build-build-deps' 'propagated-native-build-inputs' 'propagated-build-target-deps' ) -propagatedBuildInputs='' -export propagatedBuildInputs -declare -a propagatedHostDepFiles=('propagated-host-host-deps' 'propagated-build-inputs' ) -propagatedNativeBuildInputs='' -export propagatedNativeBuildInputs -declare -a propagatedTargetDepFiles=('propagated-target-target-deps' ) -shell='/nix/store/5jw69mbaj5dg4l2bj58acg3gxywfszpj-bash-5.2p26/bin/bash' -export shell -shellHook='' -export shellHook -stdenv='/nix/store/dd7nxjnni7nzm0846fq5xrm89ais5lwz-stdenv-linux' -export stdenv -strictDeps='' -export strictDeps -system='x86_64-linux' -export system -declare -a unpackCmdHooks=('_defaultUnpack' ) -_accumFlagsArray () -{ - - local name; - if [ -n "$__structuredAttrs" ]; then - for name in "$@"; - do - local -n nameref="$name"; - flagsArray+=(${nameref+"${nameref[@]}"}); - done; - else - for name in "$@"; - do - local -n nameref="$name"; - case "$name" in - *Array) - flagsArray+=(${nameref+"${nameref[@]}"}) - ;; - *) - flagsArray+=(${nameref-}) - ;; - esac; - done; - fi -} -_activatePkgs () -{ - - local hostOffset targetOffset; - local pkg; - for hostOffset in "${allPlatOffsets[@]}"; - do - local pkgsVar="${pkgAccumVarVars[hostOffset + 1]}"; - for targetOffset in "${allPlatOffsets[@]}"; - do - (( hostOffset <= targetOffset )) || continue; - local pkgsRef="${pkgsVar}[$targetOffset - $hostOffset]"; - local pkgsSlice="${!pkgsRef}[@]"; - for pkg in ${!pkgsSlice+"${!pkgsSlice}"}; - do - activatePackage "$pkg" "$hostOffset" "$targetOffset"; - done; - done; - done -} -_addRpathPrefix () -{ - - if [ "${NIX_NO_SELF_RPATH:-0}" != 1 ]; then - export NIX_LDFLAGS="-rpath $1/lib ${NIX_LDFLAGS-}"; - fi -} -_addToEnv () -{ - - local depHostOffset depTargetOffset; - local pkg; - for depHostOffset in "${allPlatOffsets[@]}"; - do - local hookVar="${pkgHookVarVars[depHostOffset + 1]}"; - local pkgsVar="${pkgAccumVarVars[depHostOffset + 1]}"; - for depTargetOffset in "${allPlatOffsets[@]}"; - do - (( depHostOffset <= depTargetOffset )) || continue; - local hookRef="${hookVar}[$depTargetOffset - $depHostOffset]"; - if [[ -z "${strictDeps-}" ]]; then - local visitedPkgs=""; - for pkg in "${pkgsBuildBuild[@]}" "${pkgsBuildHost[@]}" "${pkgsBuildTarget[@]}" "${pkgsHostHost[@]}" "${pkgsHostTarget[@]}" "${pkgsTargetTarget[@]}"; - do - if [[ "$visitedPkgs" = *"$pkg"* ]]; then - continue; - fi; - runHook "${!hookRef}" "$pkg"; - visitedPkgs+=" $pkg"; - done; - else - local pkgsRef="${pkgsVar}[$depTargetOffset - $depHostOffset]"; - local pkgsSlice="${!pkgsRef}[@]"; - for pkg in ${!pkgsSlice+"${!pkgsSlice}"}; - do - runHook "${!hookRef}" "$pkg"; - done; - fi; - done; - done -} -_allFlags () -{ - - export system pname name version; - for varName in $(awk 'BEGIN { for (v in ENVIRON) if (v ~ /^[a-z][a-zA-Z0-9_]*$/) print v }'); - do - if (( "${NIX_DEBUG:-0}" >= 1 )); then - printf "@%s@ -> %q\n" "${varName}" "${!varName}" 1>&2; - fi; - args+=("--subst-var" "$varName"); - done -} -_assignFirst () -{ - - local varName="$1"; - local _var; - local REMOVE=REMOVE; - shift; - for _var in "$@"; - do - if [ -n "${!_var-}" ]; then - eval "${varName}"="${_var}"; - return; - fi; - done; - echo; - echo "error: _assignFirst: could not find a non-empty variable whose name to assign to ${varName}."; - echo " The following variables were all unset or empty:"; - echo " $*"; - if [ -z "${out:-}" ]; then - echo ' If you do not want an "out" output in your derivation, make sure to define'; - echo ' the other specific required outputs. This can be achieved by picking one'; - echo " of the above as an output."; - echo ' You do not have to remove "out" if you want to have a different default'; - echo ' output, because the first output is taken as a default.'; - echo; - fi; - return 1 -} -_callImplicitHook () -{ - - local def="$1"; - local hookName="$2"; - if declare -F "$hookName" > /dev/null; then - "$hookName"; - else - if type -p "$hookName" > /dev/null; then - source "$hookName"; - else - if [ -n "${!hookName:-}" ]; then - eval "${!hookName}"; - else - return "$def"; - fi; - fi; - fi -} -_defaultUnpack () -{ - - local fn="$1"; - local destination; - if [ -d "$fn" ]; then - destination="$(stripHash "$fn")"; - if [ -e "$destination" ]; then - echo "Cannot copy $fn to $destination: destination already exists!"; - echo "Did you specify two \"srcs\" with the same \"name\"?"; - return 1; - fi; - cp -pr --reflink=auto -- "$fn" "$destination"; - else - case "$fn" in - *.tar.xz | *.tar.lzma | *.txz) - ( XZ_OPT="--threads=$NIX_BUILD_CORES" xz -d < "$fn"; - true ) | tar xf - --mode=+w --warning=no-timestamp - ;; - *.tar | *.tar.* | *.tgz | *.tbz2 | *.tbz) - tar xf "$fn" --mode=+w --warning=no-timestamp - ;; - *) - return 1 - ;; - esac; - fi -} -_doStrip () -{ - - local -ra flags=(dontStripHost dontStripTarget); - local -ra debugDirs=(stripDebugList stripDebugListTarget); - local -ra allDirs=(stripAllList stripAllListTarget); - local -ra stripCmds=(STRIP STRIP_FOR_TARGET); - local -ra ranlibCmds=(RANLIB RANLIB_FOR_TARGET); - stripDebugList=${stripDebugList[*]:-lib lib32 lib64 libexec bin sbin}; - stripDebugListTarget=${stripDebugListTarget[*]:-}; - stripAllList=${stripAllList[*]:-}; - stripAllListTarget=${stripAllListTarget[*]:-}; - local i; - for i in ${!stripCmds[@]}; - do - local -n flag="${flags[$i]}"; - local -n debugDirList="${debugDirs[$i]}"; - local -n allDirList="${allDirs[$i]}"; - local -n stripCmd="${stripCmds[$i]}"; - local -n ranlibCmd="${ranlibCmds[$i]}"; - if [[ -n "${dontStrip-}" || -n "${flag-}" ]] || ! type -f "${stripCmd-}" 2> /dev/null 1>&2; then - continue; - fi; - stripDirs "$stripCmd" "$ranlibCmd" "$debugDirList" "${stripDebugFlags[*]:--S -p}"; - stripDirs "$stripCmd" "$ranlibCmd" "$allDirList" "${stripAllFlags[*]:--s -p}"; - done -} -_eval () -{ - - if declare -F "$1" > /dev/null 2>&1; then - "$@"; - else - eval "$1"; - fi -} -_makeSymlinksRelative () -{ - - local symlinkTarget; - if [ "${dontRewriteSymlinks-}" ] || [ ! -e "$prefix" ]; then - return; - fi; - while IFS= read -r -d '' f; do - symlinkTarget=$(readlink "$f"); - if [[ "$symlinkTarget"/ != "$prefix"/* ]]; then - continue; - fi; - if [ ! -e "$symlinkTarget" ]; then - echo "the symlink $f is broken, it points to $symlinkTarget (which is missing)"; - fi; - echo "rewriting symlink $f to be relative to $prefix"; - ln -snrf "$symlinkTarget" "$f"; - done < <(find $prefix -type l -print0) -} -_makeSymlinksRelativeInAllOutputs () -{ - - local output; - for output in $(getAllOutputNames); - do - prefix="${!output}" _makeSymlinksRelative; - done -} -_moveLib64 () -{ - - if [ "${dontMoveLib64-}" = 1 ]; then - return; - fi; - if [ ! -e "$prefix/lib64" -o -L "$prefix/lib64" ]; then - return; - fi; - echo "moving $prefix/lib64/* to $prefix/lib"; - mkdir -p $prefix/lib; - shopt -s dotglob; - for i in $prefix/lib64/*; - do - mv --no-clobber "$i" $prefix/lib; - done; - shopt -u dotglob; - rmdir $prefix/lib64; - ln -s lib $prefix/lib64 -} -_moveSbin () -{ - - if [ "${dontMoveSbin-}" = 1 ]; then - return; - fi; - if [ ! -e "$prefix/sbin" -o -L "$prefix/sbin" ]; then - return; - fi; - echo "moving $prefix/sbin/* to $prefix/bin"; - mkdir -p $prefix/bin; - shopt -s dotglob; - for i in $prefix/sbin/*; - do - mv "$i" $prefix/bin; - done; - shopt -u dotglob; - rmdir $prefix/sbin; - ln -s bin $prefix/sbin -} -_moveSystemdUserUnits () -{ - - if [ "${dontMoveSystemdUserUnits:-0}" = 1 ]; then - return; - fi; - if [ ! -e "${prefix:?}/lib/systemd/user" ]; then - return; - fi; - local source="$prefix/lib/systemd/user"; - local target="$prefix/share/systemd/user"; - echo "moving $source/* to $target"; - mkdir -p "$target"; - ( shopt -s dotglob; - for i in "$source"/*; - do - mv "$i" "$target"; - done ); - rmdir "$source"; - ln -s "$target" "$source" -} -_moveToShare () -{ - - if [ -n "$__structuredAttrs" ]; then - if [ -z "${forceShare-}" ]; then - forceShare=(man doc info); - fi; - else - forceShare=(${forceShare:-man doc info}); - fi; - if [[ -z "$out" ]]; then - return; - fi; - for d in "${forceShare[@]}"; - do - if [ -d "$out/$d" ]; then - if [ -d "$out/share/$d" ]; then - echo "both $d/ and share/$d/ exist!"; - else - echo "moving $out/$d to $out/share/$d"; - mkdir -p $out/share; - mv $out/$d $out/share/; - fi; - fi; - done -} -_multioutConfig () -{ - - if [ "$(getAllOutputNames)" = "out" ] || [ -z "${setOutputFlags-1}" ]; then - return; - fi; - if [ -z "${shareDocName:-}" ]; then - local confScript="${configureScript:-}"; - if [ -z "$confScript" ] && [ -x ./configure ]; then - confScript=./configure; - fi; - if [ -f "$confScript" ]; then - local shareDocName="$(sed -n "s/^PACKAGE_TARNAME='\(.*\)'$/\1/p" < "$confScript")"; - fi; - if [ -z "$shareDocName" ] || echo "$shareDocName" | grep -q '[^a-zA-Z0-9_-]'; then - shareDocName="$(echo "$name" | sed 's/-[^a-zA-Z].*//')"; - fi; - fi; - prependToVar configureFlags --bindir="${!outputBin}"/bin --sbindir="${!outputBin}"/sbin --includedir="${!outputInclude}"/include --oldincludedir="${!outputInclude}"/include --mandir="${!outputMan}"/share/man --infodir="${!outputInfo}"/share/info --docdir="${!outputDoc}"/share/doc/"${shareDocName}" --libdir="${!outputLib}"/lib --libexecdir="${!outputLib}"/libexec --localedir="${!outputLib}"/share/locale; - prependToVar installFlags pkgconfigdir="${!outputDev}"/lib/pkgconfig m4datadir="${!outputDev}"/share/aclocal aclocaldir="${!outputDev}"/share/aclocal -} -_multioutDevs () -{ - - if [ "$(getAllOutputNames)" = "out" ] || [ -z "${moveToDev-1}" ]; then - return; - fi; - moveToOutput include "${!outputInclude}"; - moveToOutput lib/pkgconfig "${!outputDev}"; - moveToOutput share/pkgconfig "${!outputDev}"; - moveToOutput lib/cmake "${!outputDev}"; - moveToOutput share/aclocal "${!outputDev}"; - for f in "${!outputDev}"/{lib,share}/pkgconfig/*.pc; - do - echo "Patching '$f' includedir to output ${!outputInclude}"; - sed -i "/^includedir=/s,=\${prefix},=${!outputInclude}," "$f"; - done -} -_multioutDocs () -{ - - local REMOVE=REMOVE; - moveToOutput share/info "${!outputInfo}"; - moveToOutput share/doc "${!outputDoc}"; - moveToOutput share/gtk-doc "${!outputDevdoc}"; - moveToOutput share/devhelp/books "${!outputDevdoc}"; - moveToOutput share/man "${!outputMan}"; - moveToOutput share/man/man3 "${!outputDevman}" -} -_multioutPropagateDev () -{ - - if [ "$(getAllOutputNames)" = "out" ]; then - return; - fi; - local outputFirst; - for outputFirst in $(getAllOutputNames); - do - break; - done; - local propagaterOutput="$outputDev"; - if [ -z "$propagaterOutput" ]; then - propagaterOutput="$outputFirst"; - fi; - if [ -z "${propagatedBuildOutputs+1}" ]; then - local po_dirty="$outputBin $outputInclude $outputLib"; - set +o pipefail; - propagatedBuildOutputs=`echo "$po_dirty" | tr -s ' ' '\n' | grep -v -F "$propagaterOutput" | sort -u | tr '\n' ' ' `; - set -o pipefail; - fi; - if [ -z "$propagatedBuildOutputs" ]; then - return; - fi; - mkdir -p "${!propagaterOutput}"/nix-support; - for output in $propagatedBuildOutputs; - do - echo -n " ${!output}" >> "${!propagaterOutput}"/nix-support/propagated-build-inputs; - done -} -_overrideFirst () -{ - - if [ -z "${!1-}" ]; then - _assignFirst "$@"; - fi -} -_pruneLibtoolFiles () -{ - - if [ "${dontPruneLibtoolFiles-}" ] || [ ! -e "$prefix" ]; then - return; - fi; - find "$prefix" -type f -name '*.la' -exec grep -q '^# Generated by .*libtool' {} \; -exec grep -q "^old_library=''" {} \; -exec sed -i {} -e "/^dependency_libs='[^']/ c dependency_libs='' #pruned" \; -} -_updateSourceDateEpochFromSourceRoot () -{ - - if [ -n "$sourceRoot" ]; then - updateSourceDateEpoch "$sourceRoot"; - fi -} -activatePackage () -{ - - local pkg="$1"; - local -r hostOffset="$2"; - local -r targetOffset="$3"; - (( hostOffset <= targetOffset )) || exit 1; - if [ -f "$pkg" ]; then - source "$pkg"; - fi; - if [[ -z "${strictDeps-}" || "$hostOffset" -le -1 ]]; then - addToSearchPath _PATH "$pkg/bin"; - fi; - if (( hostOffset <= -1 )); then - addToSearchPath _XDG_DATA_DIRS "$pkg/share"; - fi; - if [[ "$hostOffset" -eq 0 && -d "$pkg/bin" ]]; then - addToSearchPath _HOST_PATH "$pkg/bin"; - fi; - if [[ -f "$pkg/nix-support/setup-hook" ]]; then - source "$pkg/nix-support/setup-hook"; - fi -} -addEnvHooks () -{ - - local depHostOffset="$1"; - shift; - local pkgHookVarsSlice="${pkgHookVarVars[$depHostOffset + 1]}[@]"; - local pkgHookVar; - for pkgHookVar in "${!pkgHookVarsSlice}"; - do - eval "${pkgHookVar}s"'+=("$@")'; - done -} -addToSearchPath () -{ - - addToSearchPathWithCustomDelimiter ":" "$@" -} -addToSearchPathWithCustomDelimiter () -{ - - local delimiter="$1"; - local varName="$2"; - local dir="$3"; - if [[ -d "$dir" && "${!varName:+${delimiter}${!varName}${delimiter}}" != *"${delimiter}${dir}${delimiter}"* ]]; then - export "${varName}=${!varName:+${!varName}${delimiter}}${dir}"; - fi -} -appendToVar () -{ - - local -n nameref="$1"; - local useArray type; - if [ -n "$__structuredAttrs" ]; then - useArray=true; - else - useArray=false; - fi; - if declare -p "$1" 2> /dev/null | grep -q '^'; then - type="$(declare -p "$1")"; - if [[ "$type" =~ "declare -A" ]]; then - echo "appendToVar(): ERROR: trying to use appendToVar on an associative array, use variable+=([\"X\"]=\"Y\") instead." 1>&2; - return 1; - else - if [[ "$type" =~ "declare -a" ]]; then - useArray=true; - else - useArray=false; - fi; - fi; - fi; - shift; - if $useArray; then - nameref=(${nameref+"${nameref[@]}"} "$@"); - else - nameref="${nameref-} $*"; - fi -} -auditTmpdir () -{ - - local dir="$1"; - [ -e "$dir" ] || return 0; - echo "checking for references to $TMPDIR/ in $dir..."; - local i; - find "$dir" -type f -print0 | while IFS= read -r -d '' i; do - if [[ "$i" =~ .build-id ]]; then - continue; - fi; - if isELF "$i"; then - if { - printf :; - patchelf --print-rpath "$i" - } | grep -q -F ":$TMPDIR/"; then - echo "RPATH of binary $i contains a forbidden reference to $TMPDIR/"; - exit 1; - fi; - fi; - if isScript "$i"; then - if [ -e "$(dirname "$i")/.$(basename "$i")-wrapped" ]; then - if grep -q -F "$TMPDIR/" "$i"; then - echo "wrapper script $i contains a forbidden reference to $TMPDIR/"; - exit 1; - fi; - fi; - fi; - done -} -bintoolsWrapper_addLDVars () -{ - - local role_post; - getHostRoleEnvHook; - if [[ -d "$1/lib64" && ! -L "$1/lib64" ]]; then - export NIX_LDFLAGS${role_post}+=" -L$1/lib64"; - fi; - if [[ -d "$1/lib" ]]; then - local -a glob=($1/lib/lib*); - if [ "${#glob[*]}" -gt 0 ]; then - export NIX_LDFLAGS${role_post}+=" -L$1/lib"; - fi; - fi -} -buildPhase () -{ - - runHook preBuild; - if [[ -z "${makeFlags-}" && -z "${makefile:-}" && ! ( -e Makefile || -e makefile || -e GNUmakefile ) ]]; then - echo "no Makefile or custom buildPhase, doing nothing"; - else - foundMakefile=1; - local flagsArray=(${enableParallelBuilding:+-j${NIX_BUILD_CORES}} SHELL=$SHELL); - _accumFlagsArray makeFlags makeFlagsArray buildFlags buildFlagsArray; - echoCmd 'build flags' "${flagsArray[@]}"; - make ${makefile:+-f $makefile} "${flagsArray[@]}"; - unset flagsArray; - fi; - runHook postBuild -} -ccWrapper_addCVars () -{ - - local role_post; - getHostRoleEnvHook; - if [ -d "$1/include" ]; then - export NIX_CFLAGS_COMPILE${role_post}+=" -isystem $1/include"; - fi; - if [ -d "$1/Library/Frameworks" ]; then - export NIX_CFLAGS_COMPILE${role_post}+=" -iframework $1/Library/Frameworks"; - fi -} -checkPhase () -{ - - runHook preCheck; - if [[ -z "${foundMakefile:-}" ]]; then - echo "no Makefile or custom checkPhase, doing nothing"; - runHook postCheck; - return; - fi; - if [[ -z "${checkTarget:-}" ]]; then - if make -n ${makefile:+-f $makefile} check > /dev/null 2>&1; then - checkTarget=check; - else - if make -n ${makefile:+-f $makefile} test > /dev/null 2>&1; then - checkTarget=test; - fi; - fi; - fi; - if [[ -z "${checkTarget:-}" ]]; then - echo "no check/test target in ${makefile:-Makefile}, doing nothing"; - else - local flagsArray=(${enableParallelChecking:+-j${NIX_BUILD_CORES}} SHELL=$SHELL); - _accumFlagsArray makeFlags makeFlagsArray; - if [ -n "$__structuredAttrs" ]; then - flagsArray+=("${checkFlags[@]:-VERBOSE=y}"); - else - flagsArray+=(${checkFlags:-VERBOSE=y}); - fi; - _accumFlagsArray checkFlagsArray; - flagsArray+=(${checkTarget}); - echoCmd 'check flags' "${flagsArray[@]}"; - make ${makefile:+-f $makefile} "${flagsArray[@]}"; - unset flagsArray; - fi; - runHook postCheck -} -compressManPages () -{ - - local dir="$1"; - if [ -L "$dir"/share ] || [ -L "$dir"/share/man ] || [ ! -d "$dir/share/man" ]; then - return; - fi; - echo "gzipping man pages under $dir/share/man/"; - find "$dir"/share/man/ -type f -a '!' -regex '.*\.\(bz2\|gz\|xz\)$' -print0 | while IFS= read -r -d '' f; do - if gzip -c -n "$f" > "$f".gz; then - rm "$f"; - else - rm "$f".gz; - fi; - done; - find "$dir"/share/man/ -type l -a '!' -regex '.*\.\(bz2\|gz\|xz\)$' -print0 | sort -z | while IFS= read -r -d '' f; do - local target; - target="$(readlink -f "$f")"; - if [ -f "$target".gz ]; then - ln -sf "$target".gz "$f".gz && rm "$f"; - fi; - done -} -configurePhase () -{ - - runHook preConfigure; - : "${configureScript=}"; - if [[ -z "$configureScript" && -x ./configure ]]; then - configureScript=./configure; - fi; - if [ -z "${dontFixLibtool:-}" ]; then - export lt_cv_deplibs_check_method="${lt_cv_deplibs_check_method-pass_all}"; - local i; - find . -iname "ltmain.sh" -print0 | while IFS='' read -r -d '' i; do - echo "fixing libtool script $i"; - fixLibtool "$i"; - done; - CONFIGURE_MTIME_REFERENCE=$(mktemp configure.mtime.reference.XXXXXX); - find . -executable -type f -name configure -exec grep -l 'GNU Libtool is free software; you can redistribute it and/or modify' {} \; -exec touch -r {} "$CONFIGURE_MTIME_REFERENCE" \; -exec sed -i s_/usr/bin/file_file_g {} \; -exec touch -r "$CONFIGURE_MTIME_REFERENCE" {} \;; - rm -f "$CONFIGURE_MTIME_REFERENCE"; - fi; - if [[ -z "${dontAddPrefix:-}" && -n "$prefix" ]]; then - prependToVar configureFlags "${prefixKey:---prefix=}$prefix"; - fi; - if [[ -f "$configureScript" ]]; then - if [ -z "${dontAddDisableDepTrack:-}" ]; then - if grep -q dependency-tracking "$configureScript"; then - prependToVar configureFlags --disable-dependency-tracking; - fi; - fi; - if [ -z "${dontDisableStatic:-}" ]; then - if grep -q enable-static "$configureScript"; then - prependToVar configureFlags --disable-static; - fi; - fi; - if [ -z "${dontPatchShebangsInConfigure:-}" ]; then - patchShebangs --build "$configureScript"; - fi; - fi; - if [ -n "$configureScript" ]; then - local -a flagsArray; - _accumFlagsArray configureFlags configureFlagsArray; - echoCmd 'configure flags' "${flagsArray[@]}"; - $configureScript "${flagsArray[@]}"; - unset flagsArray; - else - echo "no configure script, doing nothing"; - fi; - runHook postConfigure -} -consumeEntire () -{ - - if IFS='' read -r -d '' "$1"; then - echo "consumeEntire(): ERROR: Input null bytes, won't process" 1>&2; - return 1; - fi -} -distPhase () -{ - - runHook preDist; - local flagsArray=(); - _accumFlagsArray distFlags distFlagsArray; - flagsArray+=(${distTarget:-dist}); - echo 'dist flags: %q' "${flagsArray[@]}"; - make ${makefile:+-f $makefile} "${flagsArray[@]}"; - if [ "${dontCopyDist:-0}" != 1 ]; then - mkdir -p "$out/tarballs"; - cp -pvd ${tarballs[*]:-*.tar.gz} "$out/tarballs"; - fi; - runHook postDist -} -dumpVars () -{ - - if [ "${noDumpEnvVars:-0}" != 1 ]; then - export 2> /dev/null >| "$NIX_BUILD_TOP/env-vars" || true; - fi -} -echoCmd () -{ - - printf "%s:" "$1"; - shift; - printf ' %q' "$@"; - echo -} -exitHandler () -{ - - exitCode="$?"; - set +e; - if [ -n "${showBuildStats:-}" ]; then - read -r -d '' -a buildTimes < <(times); - echo "build times:"; - echo "user time for the shell ${buildTimes[0]}"; - echo "system time for the shell ${buildTimes[1]}"; - echo "user time for all child processes ${buildTimes[2]}"; - echo "system time for all child processes ${buildTimes[3]}"; - fi; - if (( "$exitCode" != 0 )); then - runHook failureHook; - if [ -n "${succeedOnFailure:-}" ]; then - echo "build failed with exit code $exitCode (ignored)"; - mkdir -p "$out/nix-support"; - printf "%s" "$exitCode" > "$out/nix-support/failed"; - exit 0; - fi; - else - runHook exitHook; - fi; - return "$exitCode" -} -findInputs () -{ - - local -r pkg="$1"; - local -r hostOffset="$2"; - local -r targetOffset="$3"; - (( hostOffset <= targetOffset )) || exit 1; - local varVar="${pkgAccumVarVars[hostOffset + 1]}"; - local varRef="$varVar[$((targetOffset - hostOffset))]"; - local var="${!varRef}"; - unset -v varVar varRef; - local varSlice="$var[*]"; - case "${!varSlice-}" in - *" $pkg "*) - return 0 - ;; - esac; - unset -v varSlice; - eval "$var"'+=("$pkg")'; - if ! [ -e "$pkg" ]; then - echo "build input $pkg does not exist" 1>&2; - exit 1; - fi; - function mapOffset () - { - local -r inputOffset="$1"; - local -n outputOffset="$2"; - if (( inputOffset <= 0 )); then - outputOffset=$((inputOffset + hostOffset)); - else - outputOffset=$((inputOffset - 1 + targetOffset)); - fi - }; - local relHostOffset; - for relHostOffset in "${allPlatOffsets[@]}"; - do - local files="${propagatedDepFilesVars[relHostOffset + 1]}"; - local hostOffsetNext; - mapOffset "$relHostOffset" hostOffsetNext; - (( -1 <= hostOffsetNext && hostOffsetNext <= 1 )) || continue; - local relTargetOffset; - for relTargetOffset in "${allPlatOffsets[@]}"; - do - (( "$relHostOffset" <= "$relTargetOffset" )) || continue; - local fileRef="${files}[$relTargetOffset - $relHostOffset]"; - local file="${!fileRef}"; - unset -v fileRef; - local targetOffsetNext; - mapOffset "$relTargetOffset" targetOffsetNext; - (( -1 <= hostOffsetNext && hostOffsetNext <= 1 )) || continue; - [[ -f "$pkg/nix-support/$file" ]] || continue; - local pkgNext; - read -r -d '' pkgNext < "$pkg/nix-support/$file" || true; - for pkgNext in $pkgNext; - do - findInputs "$pkgNext" "$hostOffsetNext" "$targetOffsetNext"; - done; - done; - done -} -fixLibtool () -{ - - local search_path; - for flag in $NIX_LDFLAGS; - do - case $flag in - -L*) - search_path+=" ${flag#-L}" - ;; - esac; - done; - sed -i "$1" -e "s^eval \(sys_lib_search_path=\).*^\1'${search_path:-}'^" -e 's^eval sys_lib_.+search_path=.*^^' -} -fixupPhase () -{ - - local output; - for output in $(getAllOutputNames); - do - if [ -e "${!output}" ]; then - chmod -R u+w,u-s,g-s "${!output}"; - fi; - done; - runHook preFixup; - local output; - for output in $(getAllOutputNames); - do - prefix="${!output}" runHook fixupOutput; - done; - recordPropagatedDependencies; - if [ -n "${setupHook:-}" ]; then - mkdir -p "${!outputDev}/nix-support"; - substituteAll "$setupHook" "${!outputDev}/nix-support/setup-hook"; - fi; - if [ -n "${setupHooks:-}" ]; then - mkdir -p "${!outputDev}/nix-support"; - local hook; - for hook in ${setupHooks[@]}; - do - local content; - consumeEntire content < "$hook"; - substituteAllStream content "file '$hook'" >> "${!outputDev}/nix-support/setup-hook"; - unset -v content; - done; - unset -v hook; - fi; - if [ -n "${propagatedUserEnvPkgs:-}" ]; then - mkdir -p "${!outputBin}/nix-support"; - printWords $propagatedUserEnvPkgs > "${!outputBin}/nix-support/propagated-user-env-packages"; - fi; - runHook postFixup -} -genericBuild () -{ - - export GZIP_NO_TIMESTAMPS=1; - if [ -f "${buildCommandPath:-}" ]; then - source "$buildCommandPath"; - return; - fi; - if [ -n "${buildCommand:-}" ]; then - eval "$buildCommand"; - return; - fi; - if [ -z "${phases[*]:-}" ]; then - phases="${prePhases[*]:-} unpackPhase patchPhase ${preConfigurePhases[*]:-} configurePhase ${preBuildPhases[*]:-} buildPhase checkPhase ${preInstallPhases[*]:-} installPhase ${preFixupPhases[*]:-} fixupPhase installCheckPhase ${preDistPhases[*]:-} distPhase ${postPhases[*]:-}"; - fi; - for curPhase in ${phases[*]}; - do - runPhase "$curPhase"; - done -} -getAllOutputNames () -{ - - if [ -n "$__structuredAttrs" ]; then - echo "${!outputs[*]}"; - else - echo "$outputs"; - fi -} -getHostRole () -{ - - getRole "$hostOffset" -} -getHostRoleEnvHook () -{ - - getRole "$depHostOffset" -} -getRole () -{ - - case $1 in - -1) - role_post='_FOR_BUILD' - ;; - 0) - role_post='' - ;; - 1) - role_post='_FOR_TARGET' - ;; - *) - echo "binutils-wrapper-2.41: used as improper sort of dependency" 1>&2; - return 1 - ;; - esac -} -getTargetRole () -{ - - getRole "$targetOffset" -} -getTargetRoleEnvHook () -{ - - getRole "$depTargetOffset" -} -getTargetRoleWrapper () -{ - - case $targetOffset in - -1) - export NIX_BINTOOLS_WRAPPER_TARGET_BUILD_x86_64_unknown_linux_gnu=1 - ;; - 0) - export NIX_BINTOOLS_WRAPPER_TARGET_HOST_x86_64_unknown_linux_gnu=1 - ;; - 1) - export NIX_BINTOOLS_WRAPPER_TARGET_TARGET_x86_64_unknown_linux_gnu=1 - ;; - *) - echo "binutils-wrapper-2.41: used as improper sort of dependency" 1>&2; - return 1 - ;; - esac -} -installCheckPhase () -{ - - runHook preInstallCheck; - if [[ -z "${foundMakefile:-}" ]]; then - echo "no Makefile or custom installCheckPhase, doing nothing"; - else - if [[ -z "${installCheckTarget:-}" ]] && ! make -n ${makefile:+-f $makefile} "${installCheckTarget:-installcheck}" > /dev/null 2>&1; then - echo "no installcheck target in ${makefile:-Makefile}, doing nothing"; - else - local flagsArray=(${enableParallelChecking:+-j${NIX_BUILD_CORES}} SHELL=$SHELL); - _accumFlagsArray makeFlags makeFlagsArray installCheckFlags installCheckFlagsArray; - flagsArray+=(${installCheckTarget:-installcheck}); - echoCmd 'installcheck flags' "${flagsArray[@]}"; - make ${makefile:+-f $makefile} "${flagsArray[@]}"; - unset flagsArray; - fi; - fi; - runHook postInstallCheck -} -installPhase () -{ - - runHook preInstall; - if [[ -z "${makeFlags-}" && -z "${makefile:-}" && ! ( -e Makefile || -e makefile || -e GNUmakefile ) ]]; then - echo "no Makefile or custom installPhase, doing nothing"; - runHook postInstall; - return; - else - foundMakefile=1; - fi; - if [ -n "$prefix" ]; then - mkdir -p "$prefix"; - fi; - local flagsArray=(${enableParallelInstalling:+-j${NIX_BUILD_CORES}} SHELL=$SHELL); - _accumFlagsArray makeFlags makeFlagsArray installFlags installFlagsArray; - if [ -n "$__structuredAttrs" ]; then - flagsArray+=("${installTargets[@]:-install}"); - else - flagsArray+=(${installTargets:-install}); - fi; - echoCmd 'install flags' "${flagsArray[@]}"; - make ${makefile:+-f $makefile} "${flagsArray[@]}"; - unset flagsArray; - runHook postInstall -} -isELF () -{ - - local fn="$1"; - local fd; - local magic; - exec {fd}< "$fn"; - read -r -n 4 -u "$fd" magic; - exec {fd}>&-; - if [ "$magic" = 'ELF' ]; then - return 0; - else - return 1; - fi -} -isMachO () -{ - - local fn="$1"; - local fd; - local magic; - exec {fd}< "$fn"; - read -r -n 4 -u "$fd" magic; - exec {fd}>&-; - if [[ "$magic" = $(echo -ne "\xfe\xed\xfa\xcf") || "$magic" = $(echo -ne "\xcf\xfa\xed\xfe") ]]; then - return 0; - else - if [[ "$magic" = $(echo -ne "\xfe\xed\xfa\xce") || "$magic" = $(echo -ne "\xce\xfa\xed\xfe") ]]; then - return 0; - else - if [[ "$magic" = $(echo -ne "\xca\xfe\xba\xbe") || "$magic" = $(echo -ne "\xbe\xba\xfe\xca") ]]; then - return 0; - else - return 1; - fi; - fi; - fi -} -isScript () -{ - - local fn="$1"; - local fd; - local magic; - exec {fd}< "$fn"; - read -r -n 2 -u "$fd" magic; - exec {fd}>&-; - if [[ "$magic" =~ \#! ]]; then - return 0; - else - return 1; - fi -} -mapOffset () -{ - - local -r inputOffset="$1"; - local -n outputOffset="$2"; - if (( inputOffset <= 0 )); then - outputOffset=$((inputOffset + hostOffset)); - else - outputOffset=$((inputOffset - 1 + targetOffset)); - fi -} -moveToOutput () -{ - - local patt="$1"; - local dstOut="$2"; - local output; - for output in $(getAllOutputNames); - do - if [ "${!output}" = "$dstOut" ]; then - continue; - fi; - local srcPath; - for srcPath in "${!output}"/$patt; - do - if [ ! -e "$srcPath" ] && [ ! -L "$srcPath" ]; then - continue; - fi; - if [ "$dstOut" = REMOVE ]; then - echo "Removing $srcPath"; - rm -r "$srcPath"; - else - local dstPath="$dstOut${srcPath#${!output}}"; - echo "Moving $srcPath to $dstPath"; - if [ -d "$dstPath" ] && [ -d "$srcPath" ]; then - rmdir "$srcPath" --ignore-fail-on-non-empty; - if [ -d "$srcPath" ]; then - mv -t "$dstPath" "$srcPath"/*; - rmdir "$srcPath"; - fi; - else - mkdir -p "$(readlink -m "$dstPath/..")"; - mv "$srcPath" "$dstPath"; - fi; - fi; - local srcParent="$(readlink -m "$srcPath/..")"; - if [ -n "$(find "$srcParent" -maxdepth 0 -type d -empty 2> /dev/null)" ]; then - echo "Removing empty $srcParent/ and (possibly) its parents"; - rmdir -p --ignore-fail-on-non-empty "$srcParent" 2> /dev/null || true; - fi; - done; - done -} -patchELF () -{ - - local dir="$1"; - [ -e "$dir" ] || return 0; - echo "shrinking RPATHs of ELF executables and libraries in $dir"; - local i; - while IFS= read -r -d '' i; do - if [[ "$i" =~ .build-id ]]; then - continue; - fi; - if ! isELF "$i"; then - continue; - fi; - echo "shrinking $i"; - patchelf --shrink-rpath "$i" || true; - done < <(find "$dir" -type f -print0) -} -patchPhase () -{ - - runHook prePatch; - local -a patchesArray; - if [ -n "$__structuredAttrs" ]; then - patchesArray=(${patches:+"${patches[@]}"}); - else - patchesArray=(${patches:-}); - fi; - for i in "${patchesArray[@]}"; - do - echo "applying patch $i"; - local uncompress=cat; - case "$i" in - *.gz) - uncompress="gzip -d" - ;; - *.bz2) - uncompress="bzip2 -d" - ;; - *.xz) - uncompress="xz -d" - ;; - *.lzma) - uncompress="lzma -d" - ;; - esac; - local -a flagsArray; - if [ -n "$__structuredAttrs" ]; then - flagsArray=("${patchFlags[@]:--p1}"); - else - flagsArray=(${patchFlags:--p1}); - fi; - $uncompress < "$i" 2>&1 | patch "${flagsArray[@]}"; - done; - runHook postPatch -} -patchShebangs () -{ - - local pathName; - local update; - while [[ $# -gt 0 ]]; do - case "$1" in - --host) - pathName=HOST_PATH; - shift - ;; - --build) - pathName=PATH; - shift - ;; - --update) - update=true; - shift - ;; - --) - shift; - break - ;; - -* | --*) - echo "Unknown option $1 supplied to patchShebangs" 1>&2; - return 1 - ;; - *) - break - ;; - esac; - done; - echo "patching script interpreter paths in $@"; - local f; - local oldPath; - local newPath; - local arg0; - local args; - local oldInterpreterLine; - local newInterpreterLine; - if [[ $# -eq 0 ]]; then - echo "No arguments supplied to patchShebangs" 1>&2; - return 0; - fi; - local f; - while IFS= read -r -d '' f; do - isScript "$f" || continue; - read -r oldInterpreterLine < "$f" || [ "$oldInterpreterLine" ]; - read -r oldPath arg0 args <<< "${oldInterpreterLine:2}"; - if [[ -z "${pathName:-}" ]]; then - if [[ -n $strictDeps && $f == "$NIX_STORE"* ]]; then - pathName=HOST_PATH; - else - pathName=PATH; - fi; - fi; - if [[ "$oldPath" == *"/bin/env" ]]; then - if [[ $arg0 == "-S" ]]; then - arg0=${args%% *}; - args=${args#* }; - newPath="$(PATH="${!pathName}" command -v "env" || true)"; - args="-S $(PATH="${!pathName}" command -v "$arg0" || true) $args"; - else - if [[ $arg0 == "-"* || $arg0 == *"="* ]]; then - echo "$f: unsupported interpreter directive \"$oldInterpreterLine\" (set dontPatchShebangs=1 and handle shebang patching yourself)" 1>&2; - exit 1; - else - newPath="$(PATH="${!pathName}" command -v "$arg0" || true)"; - fi; - fi; - else - if [[ -z $oldPath ]]; then - oldPath="/bin/sh"; - fi; - newPath="$(PATH="${!pathName}" command -v "$(basename "$oldPath")" || true)"; - args="$arg0 $args"; - fi; - newInterpreterLine="$newPath $args"; - newInterpreterLine=${newInterpreterLine%${newInterpreterLine##*[![:space:]]}}; - if [[ -n "$oldPath" && ( "$update" == true || "${oldPath:0:${#NIX_STORE}}" != "$NIX_STORE" ) ]]; then - if [[ -n "$newPath" && "$newPath" != "$oldPath" ]]; then - echo "$f: interpreter directive changed from \"$oldInterpreterLine\" to \"$newInterpreterLine\""; - escapedInterpreterLine=${newInterpreterLine//\\/\\\\}; - timestamp=$(stat --printf "%y" "$f"); - sed -i -e "1 s|.*|#\!$escapedInterpreterLine|" "$f"; - touch --date "$timestamp" "$f"; - fi; - fi; - done < <(find "$@" -type f -perm -0100 -print0) -} -patchShebangsAuto () -{ - - if [[ -z "${dontPatchShebangs-}" && -e "$prefix" ]]; then - if [[ "$output" != out && "$output" = "$outputDev" ]]; then - patchShebangs --build "$prefix"; - else - patchShebangs --host "$prefix"; - fi; - fi -} -prependToVar () -{ - - local -n nameref="$1"; - local useArray type; - if [ -n "$__structuredAttrs" ]; then - useArray=true; - else - useArray=false; - fi; - if declare -p "$1" 2> /dev/null | grep -q '^'; then - type="$(declare -p "$1")"; - if [[ "$type" =~ "declare -A" ]]; then - echo "prependToVar(): ERROR: trying to use prependToVar on an associative array." 1>&2; - return 1; - else - if [[ "$type" =~ "declare -a" ]]; then - useArray=true; - else - useArray=false; - fi; - fi; - fi; - shift; - if $useArray; then - nameref=("$@" ${nameref+"${nameref[@]}"}); - else - nameref="$* ${nameref-}"; - fi -} -printLines () -{ - - (( "$#" > 0 )) || return 0; - printf '%s\n' "$@" -} -printWords () -{ - - (( "$#" > 0 )) || return 0; - printf '%s ' "$@" -} -recordPropagatedDependencies () -{ - - declare -ra flatVars=(depsBuildBuildPropagated propagatedNativeBuildInputs depsBuildTargetPropagated depsHostHostPropagated propagatedBuildInputs depsTargetTargetPropagated); - declare -ra flatFiles=("${propagatedBuildDepFiles[@]}" "${propagatedHostDepFiles[@]}" "${propagatedTargetDepFiles[@]}"); - local propagatedInputsIndex; - for propagatedInputsIndex in "${!flatVars[@]}"; - do - local propagatedInputsSlice="${flatVars[$propagatedInputsIndex]}[@]"; - local propagatedInputsFile="${flatFiles[$propagatedInputsIndex]}"; - [[ -n "${!propagatedInputsSlice}" ]] || continue; - mkdir -p "${!outputDev}/nix-support"; - printWords ${!propagatedInputsSlice} > "${!outputDev}/nix-support/$propagatedInputsFile"; - done -} -runHook () -{ - - local hookName="$1"; - shift; - local hooksSlice="${hookName%Hook}Hooks[@]"; - local hook; - for hook in "_callImplicitHook 0 $hookName" ${!hooksSlice+"${!hooksSlice}"}; - do - _eval "$hook" "$@"; - done; - return 0 -} -runOneHook () -{ - - local hookName="$1"; - shift; - local hooksSlice="${hookName%Hook}Hooks[@]"; - local hook ret=1; - for hook in "_callImplicitHook 1 $hookName" ${!hooksSlice+"${!hooksSlice}"}; - do - if _eval "$hook" "$@"; then - ret=0; - break; - fi; - done; - return "$ret" -} -runPhase () -{ - - local curPhase="$*"; - if [[ "$curPhase" = unpackPhase && -n "${dontUnpack:-}" ]]; then - return; - fi; - if [[ "$curPhase" = patchPhase && -n "${dontPatch:-}" ]]; then - return; - fi; - if [[ "$curPhase" = configurePhase && -n "${dontConfigure:-}" ]]; then - return; - fi; - if [[ "$curPhase" = buildPhase && -n "${dontBuild:-}" ]]; then - return; - fi; - if [[ "$curPhase" = checkPhase && -z "${doCheck:-}" ]]; then - return; - fi; - if [[ "$curPhase" = installPhase && -n "${dontInstall:-}" ]]; then - return; - fi; - if [[ "$curPhase" = fixupPhase && -n "${dontFixup:-}" ]]; then - return; - fi; - if [[ "$curPhase" = installCheckPhase && -z "${doInstallCheck:-}" ]]; then - return; - fi; - if [[ "$curPhase" = distPhase && -z "${doDist:-}" ]]; then - return; - fi; - if [[ -n $NIX_LOG_FD ]]; then - echo "@nix { \"action\": \"setPhase\", \"phase\": \"$curPhase\" }" >&"$NIX_LOG_FD"; - fi; - showPhaseHeader "$curPhase"; - dumpVars; - local startTime=$(date +"%s"); - eval "${!curPhase:-$curPhase}"; - local endTime=$(date +"%s"); - showPhaseFooter "$curPhase" "$startTime" "$endTime"; - if [ "$curPhase" = unpackPhase ]; then - [ -n "${sourceRoot:-}" ] && chmod +x "${sourceRoot}"; - cd "${sourceRoot:-.}"; - fi -} -showPhaseFooter () -{ - - local phase="$1"; - local startTime="$2"; - local endTime="$3"; - local delta=$(( endTime - startTime )); - (( delta < 30 )) && return; - local H=$((delta/3600)); - local M=$((delta%3600/60)); - local S=$((delta%60)); - echo -n "$phase completed in "; - (( H > 0 )) && echo -n "$H hours "; - (( M > 0 )) && echo -n "$M minutes "; - echo "$S seconds" -} -showPhaseHeader () -{ - - local phase="$1"; - echo "Running phase: $phase" -} -stripDirs () -{ - - local cmd="$1"; - local ranlibCmd="$2"; - local paths="$3"; - local stripFlags="$4"; - local excludeFlags=(); - local pathsNew=; - [ -z "$cmd" ] && echo "stripDirs: Strip command is empty" 1>&2 && exit 1; - [ -z "$ranlibCmd" ] && echo "stripDirs: Ranlib command is empty" 1>&2 && exit 1; - local pattern; - if [ -n "${stripExclude:-}" ]; then - for pattern in "${stripExclude[@]}"; - do - excludeFlags+=(-a '!' '(' -name "$pattern" -o -wholename "$prefix/$pattern" ')'); - done; - fi; - local p; - for p in ${paths}; - do - if [ -e "$prefix/$p" ]; then - pathsNew="${pathsNew} $prefix/$p"; - fi; - done; - paths=${pathsNew}; - if [ -n "${paths}" ]; then - echo "stripping (with command $cmd and flags $stripFlags) in $paths"; - local striperr; - striperr="$(mktemp --tmpdir="$TMPDIR" 'striperr.XXXXXX')"; - find $paths -type f "${excludeFlags[@]}" -a '!' -path "$prefix/lib/debug/*" -printf '%D-%i,%p\0' | sort -t, -k1,1 -u -z | cut -d, -f2- -z | xargs -r -0 -n1 -P "$NIX_BUILD_CORES" -- $cmd $stripFlags 2> "$striperr" || exit_code=$?; - [[ "$exit_code" = 123 || -z "$exit_code" ]] || ( cat "$striperr" 1>&2 && exit 1 ); - rm "$striperr"; - find $paths -name '*.a' -type f -exec $ranlibCmd '{}' \; 2> /dev/null; - fi -} -stripHash () -{ - - local strippedName casematchOpt=0; - strippedName="$(basename -- "$1")"; - shopt -q nocasematch && casematchOpt=1; - shopt -u nocasematch; - if [[ "$strippedName" =~ ^[a-z0-9]{32}- ]]; then - echo "${strippedName:33}"; - else - echo "$strippedName"; - fi; - if (( casematchOpt )); then - shopt -s nocasematch; - fi -} -substitute () -{ - - local input="$1"; - local output="$2"; - shift 2; - if [ ! -f "$input" ]; then - echo "substitute(): ERROR: file '$input' does not exist" 1>&2; - return 1; - fi; - local content; - consumeEntire content < "$input"; - if [ -e "$output" ]; then - chmod +w "$output"; - fi; - substituteStream content "file '$input'" "$@" > "$output" -} -substituteAll () -{ - - local input="$1"; - local output="$2"; - local -a args=(); - _allFlags; - substitute "$input" "$output" "${args[@]}" -} -substituteAllInPlace () -{ - - local fileName="$1"; - shift; - substituteAll "$fileName" "$fileName" "$@" -} -substituteAllStream () -{ - - local -a args=(); - _allFlags; - substituteStream "$1" "$2" "${args[@]}" -} -substituteInPlace () -{ - - local -a fileNames=(); - for arg in "$@"; - do - if [[ "$arg" = "--"* ]]; then - break; - fi; - fileNames+=("$arg"); - shift; - done; - for file in "${fileNames[@]}"; - do - substitute "$file" "$file" "$@"; - done -} -substituteStream () -{ - - local var=$1; - local description=$2; - shift 2; - while (( "$#" )); do - local replace_mode="$1"; - case "$1" in - --replace) - if ! "$_substituteStream_has_warned_replace_deprecation"; then - echo "substituteStream() in derivation $name: WARNING: '--replace' is deprecated, use --replace-{fail,warn,quiet}. ($description)" 1>&2; - _substituteStream_has_warned_replace_deprecation=true; - fi; - replace_mode='--replace-warn' - ;& - --replace-quiet | --replace-warn | --replace-fail) - pattern="$2"; - replacement="$3"; - shift 3; - local savedvar; - savedvar="${!var}"; - eval "$var"'=${'"$var"'//"$pattern"/"$replacement"}'; - if [ "$pattern" != "$replacement" ]; then - if [ "${!var}" == "$savedvar" ]; then - if [ "$replace_mode" == --replace-warn ]; then - printf "substituteStream() in derivation $name: WARNING: pattern %q doesn't match anything in %s\n" "$pattern" "$description" 1>&2; - else - if [ "$replace_mode" == --replace-fail ]; then - printf "substituteStream() in derivation $name: ERROR: pattern %q doesn't match anything in %s\n" "$pattern" "$description" 1>&2; - return 1; - fi; - fi; - fi; - fi - ;; - --subst-var) - local varName="$2"; - shift 2; - if ! [[ "$varName" =~ ^[a-zA-Z_][a-zA-Z0-9_]*$ ]]; then - echo "substituteStream() in derivation $name: ERROR: substitution variables must be valid Bash names, \"$varName\" isn't." 1>&2; - return 1; - fi; - if [ -z ${!varName+x} ]; then - echo "substituteStream() in derivation $name: ERROR: variable \$$varName is unset" 1>&2; - return 1; - fi; - pattern="@$varName@"; - replacement="${!varName}"; - eval "$var"'=${'"$var"'//"$pattern"/"$replacement"}' - ;; - --subst-var-by) - pattern="@$2@"; - replacement="$3"; - eval "$var"'=${'"$var"'//"$pattern"/"$replacement"}'; - shift 3 - ;; - *) - echo "substituteStream() in derivation $name: ERROR: Invalid command line argument: $1" 1>&2; - return 1 - ;; - esac; - done; - printf "%s" "${!var}" -} -unpackFile () -{ - - curSrc="$1"; - echo "unpacking source archive $curSrc"; - if ! runOneHook unpackCmd "$curSrc"; then - echo "do not know how to unpack source archive $curSrc"; - exit 1; - fi -} -unpackPhase () -{ - - runHook preUnpack; - if [ -z "${srcs:-}" ]; then - if [ -z "${src:-}" ]; then - echo 'variable $src or $srcs should point to the source'; - exit 1; - fi; - srcs="$src"; - fi; - local -a srcsArray; - if [ -n "$__structuredAttrs" ]; then - srcsArray=("${srcs[@]}"); - else - srcsArray=($srcs); - fi; - local dirsBefore=""; - for i in *; - do - if [ -d "$i" ]; then - dirsBefore="$dirsBefore $i "; - fi; - done; - for i in "${srcsArray[@]}"; - do - unpackFile "$i"; - done; - : "${sourceRoot=}"; - if [ -n "${setSourceRoot:-}" ]; then - runOneHook setSourceRoot; - else - if [ -z "$sourceRoot" ]; then - for i in *; - do - if [ -d "$i" ]; then - case $dirsBefore in - *\ $i\ *) - - ;; - *) - if [ -n "$sourceRoot" ]; then - echo "unpacker produced multiple directories"; - exit 1; - fi; - sourceRoot="$i" - ;; - esac; - fi; - done; - fi; - fi; - if [ -z "$sourceRoot" ]; then - echo "unpacker appears to have produced no directories"; - exit 1; - fi; - echo "source root is $sourceRoot"; - if [ "${dontMakeSourcesWritable:-0}" != 1 ]; then - chmod -R u+w -- "$sourceRoot"; - fi; - runHook postUnpack -} -updateAutotoolsGnuConfigScriptsPhase () -{ - - if [ -n "${dontUpdateAutotoolsGnuConfigScripts-}" ]; then - return; - fi; - for script in config.sub config.guess; - do - for f in $(find . -type f -name "$script"); - do - echo "Updating Autotools / GNU config script to a newer upstream version: $f"; - cp -f "/nix/store/pqjr4bpaykb34bp13m1gc5sin1gzb1a8-gnu-config-2024-01-01/$script" "$f"; - done; - done -} -updateSourceDateEpoch () -{ - - local path="$1"; - local -a res=($(find "$path" -type f -not -newer "$NIX_BUILD_TOP/.." -printf '%T@ %p\0' | sort -n --zero-terminated | tail -n1 --zero-terminated | head -c -1)); - local time="${res[0]//\.[0-9]*/}"; - local newestFile="${res[1]}"; - if [ "${time:-0}" -gt "$SOURCE_DATE_EPOCH" ]; then - echo "setting SOURCE_DATE_EPOCH to timestamp $time of file $newestFile"; - export SOURCE_DATE_EPOCH="$time"; - local now="$(date +%s)"; - if [ "$time" -gt $((now - 60)) ]; then - echo "warning: file $newestFile may be generated; SOURCE_DATE_EPOCH may be non-deterministic"; - fi; - fi -} -PATH="$PATH${nix_saved_PATH:+:$nix_saved_PATH}" -XDG_DATA_DIRS="$XDG_DATA_DIRS${nix_saved_XDG_DATA_DIRS:+:$nix_saved_XDG_DATA_DIRS}" -export NIX_BUILD_TOP="$(mktemp -d -t nix-shell.XXXXXX)" -export TMP="$NIX_BUILD_TOP" -export TMPDIR="$NIX_BUILD_TOP" -export TEMP="$NIX_BUILD_TOP" -export TEMPDIR="$NIX_BUILD_TOP" -eval "$shellHook" diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml new file mode 100644 index 0000000..8e2a69e --- /dev/null +++ b/.github/workflows/main.yml @@ -0,0 +1,16 @@ +name: "Build" +on: + pull_request: + push: +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: cachix/install-nix-action@v27 + - run: nix build + - run: cp result HDA-thesis.pdf + - uses: actions/upload-artifact@v4 + with: + name: HDA-thesis + path: HDA-thesis.pdf diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..428fbed --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +.direnv +main.pdf +Session.vim diff --git a/Session.vim b/Session.vim deleted file mode 100644 index 806b0f7..0000000 --- a/Session.vim +++ /dev/null @@ -1,194 +0,0 @@ -let SessionLoad = 1 -let s:so_save = &g:so | let s:siso_save = &g:siso | setg so=0 siso=0 | setl so=-1 siso=-1 -let v:this_session=expand(":p") -silent only -silent tabonly -cd ~/thesis -if expand('%') == '' && !&modified && line('$') <= 1 && getline(1) == '' - let s:wipebuf = bufnr('%') -endif -let s:shortmess_save = &shortmess -if &shortmess =~ 'A' - set shortmess=aoOA -else - set shortmess=aoO -endif -badd +183 content/BACH.typ -badd +1 glossary.typ -badd +1 bibliography.bib -badd +1 \$ -badd +14 pseudocode/bach_1.typ -badd +265 content/SMHD.typ -badd +7 pseudocode/bach_find_best_appr.typ -badd +60 content/introduction.typ -argglobal -%argdel -set stal=2 -tabnew +setlocal\ bufhidden=wipe -tabnew +setlocal\ bufhidden=wipe -tabnew +setlocal\ bufhidden=wipe -tabnew +setlocal\ bufhidden=wipe -tabrewind -edit content/BACH.typ -argglobal -balt content/SMHD.typ -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 88 - ((17 * winheight(0) + 25) / 50) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 88 -normal! 0102| -tabnext -edit content/introduction.typ -argglobal -balt content/BACH.typ -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 56 - ((41 * winheight(0) + 25) / 50) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 56 -normal! 0 -tabnext -edit glossary.typ -argglobal -balt content/BACH.typ -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 1 - ((0 * winheight(0) + 25) / 50) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 1 -normal! 0 -tabnext -edit bibliography.bib -argglobal -balt glossary.typ -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 1 - ((0 * winheight(0) + 25) / 50) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 1 -normal! 015| -tabnext -edit pseudocode/bach_1.typ -let s:save_splitbelow = &splitbelow -let s:save_splitright = &splitright -set splitbelow splitright -wincmd _ | wincmd | -split -1wincmd k -wincmd w -let &splitbelow = s:save_splitbelow -let &splitright = s:save_splitright -wincmd t -let s:save_winminheight = &winminheight -let s:save_winminwidth = &winminwidth -set winminheight=0 -set winheight=1 -set winminwidth=0 -set winwidth=1 -exe '1resize ' . ((&lines * 25 + 26) / 53) -exe '2resize ' . ((&lines * 24 + 26) / 53) -argglobal -balt bibliography.bib -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 12 - ((11 * winheight(0) + 12) / 25) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 12 -normal! 068| -wincmd w -argglobal -if bufexists(fnamemodify("pseudocode/bach_find_best_appr.typ", ":p")) | buffer pseudocode/bach_find_best_appr.typ | else | edit pseudocode/bach_find_best_appr.typ | endif -if &buftype ==# 'terminal' - silent file pseudocode/bach_find_best_appr.typ -endif -balt pseudocode/bach_1.typ -setlocal fdm=manual -setlocal fde=0 -setlocal fmr={{{,}}} -setlocal fdi=# -setlocal fdl=0 -setlocal fml=1 -setlocal fdn=20 -setlocal fen -silent! normal! zE -let &fdl = &fdl -let s:l = 7 - ((6 * winheight(0) + 12) / 24) -if s:l < 1 | let s:l = 1 | endif -keepjumps exe s:l -normal! zt -keepjumps 7 -normal! 031| -wincmd w -exe '1resize ' . ((&lines * 25 + 26) / 53) -exe '2resize ' . ((&lines * 24 + 26) / 53) -tabnext 5 -set stal=1 -if exists('s:wipebuf') && len(win_findbuf(s:wipebuf)) == 0 && getbufvar(s:wipebuf, '&buftype') isnot# 'terminal' - silent exe 'bwipe ' . s:wipebuf -endif -unlet! s:wipebuf -set winheight=1 winwidth=20 -let &shortmess = s:shortmess_save -let &winminheight = s:save_winminheight -let &winminwidth = s:save_winminwidth -let s:sx = expand(":p:r")."x.vim" -if filereadable(s:sx) - exe "source " . fnameescape(s:sx) -endif -let &g:so = s:so_save | let &g:siso = s:siso_save -set hlsearch -nohlsearch -doautoall SessionLoadPost -unlet SessionLoad -" vim: set ft=vim : diff --git a/bibliography.bib b/bibliography.bib index 5be53bc..dcba2bd 100644 --- a/bibliography.bib +++ b/bibliography.bib @@ -54,4 +54,57 @@ year={2020}, organization={IEEE} } - + +@article{PUFChartRef, + title={Variable-length bit mapping and error-correcting codes for higher-order alphabet pufs—extended version}, + author={Immler, Vincent and Hiller, Matthias and Liu, Qinzhi and Lenz, Andreas and Wachter-Zeh, Antonia}, + journal={Journal of Hardware and Systems Security}, + volume={3}, + pages={78--93}, + year={2019}, + publisher={Springer} +} + + +@article{PUFIntro2, + title={Physically Unclonable Functions: Constructions, Properties and Applications (Fysisch onkloonbare functies: constructies, eigenschappen en toepassingen)}, + author={Maes, Roel}, + year={2012} +} + +@inproceedings{fuzzycommitmentpaper, + title={Achieving secure fuzzy commitment scheme for optical pufs}, + author={Ignatenko, Tanya and Willems, Frans}, + booktitle={2009 Fifth International Conference on Intelligent Information Hiding and Multimedia Signal Processing}, + pages={1185--1188}, + year={2009}, + organization={IEEE} +} + +@article{ruchti2021decoder, + title={When the Decoder Has to Look Twice: Glitching a PUF Error Correction}, + author={Ruchti, Jonas and Gruber, Michael and Pehl, Michael}, + journal={Cryptology ePrint Archive}, + year={2021} +} + +@article{delvaux2014helper, + title={Helper data algorithms for PUF-based key generation: Overview and analysis}, + author={Delvaux, Jeroen and Gu, Dawu and Schellekens, Dries and Verbauwhede, Ingrid}, + journal={IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems}, + volume={34}, + number={6}, + pages={889--902}, + year={2014}, + publisher={IEEE} +} + +@inproceedings{maes2009soft, + title={A soft decision helper data algorithm for SRAM PUFs}, + author={Maes, Roel and Tuyls, Pim and Verbauwhede, Ingrid}, + booktitle={2009 IEEE international symposium on information theory}, + pages={2101--2105}, + year={2009}, + organization={IEEE} +} + diff --git a/charts/PUF.typ b/charts/PUF.typ new file mode 100644 index 0000000..dc6a216 --- /dev/null +++ b/charts/PUF.typ @@ -0,0 +1,34 @@ +#import "@preview/fletcher:0.5.1" as fletcher: diagram, node, edge +#import fletcher.shapes: diamond + +#diagram( + node-stroke: 1pt, + edge-stroke: 1pt, + //node-inset: 2pt, + node((0,0), [PUF], corner-radius: 2pt, name: ), + edge(, , "->", $nu$), + node((1,0), [Initial quantization], name: , width: 10em), + edge(, , "->", $k$), + node((2,0), [Encoding], name: , width: 8em), + node((1,1), [Helper data\ generation], name: , width: 10em), + edge(, , "->"), + node((2.25, -0.5), [Enrollment], name: , stroke: none), + node(enclose: (, , ), stroke: (dash: "dashed"), inset: 10pt), + node((0, 2), [PUF], corner-radius: 2pt, name: ), + node((1, 2), [Repeated quantization], name: ), + node((2, 2), [Error correction], name: ), + node((3, 1), [$kappa = kappa^*$?],name: ), + node((2, 1), [Error correction helper data], name: , width: 8em), + node((2.25, 2.5), [Reconstruction], stroke: none, name: ), + node(enclose: (, , ), stroke: (dash: "dashed"), inset: 10pt), + + edge(, , "->", $h$), + edge(, , "->", $nu^*$), + edge(, , "->", $k^*$), + edge(, , "->"), + edge(, "r,d", "->", $kappa$, label-pos: 0.3), + edge(, "r,u", "->", $kappa^*$, label-pos: 0.4), + edge(, , "->") + + +) diff --git a/content/BACH.typ b/content/BACH.typ index 4a27422..df147e3 100644 --- a/content/BACH.typ +++ b/content/BACH.typ @@ -1,12 +1,17 @@ #import "@preview/glossarium:0.4.1": * +#import "@preview/tablex:0.0.8": tablex, rowspanx, colspanx -= Boundary Adaptive Clustering with Helper Data += Boundary Adaptive Clustering with Helper Data (BACH) -Instead of generating helper-data to improve the quantization process itself, like in #gls("smhdt"), or using some kind of error correcting code after the quantization process, we can also try to find helper-data before performing the quantization that will optimize our input values before quantizing them to minimize the risk of bit and symbol errors during the reconstruction phase. +//Instead of generating helper-data to improve the quantization process itself, like in #gls("smhdt"), or using some kind of error correcting code after the quantization process, we can also try to find helper-data before performing the quantization that will optimize our input values before quantizing them to minimize the risk of bit and symbol errors during the reconstruction phase. + +We can explore the option of finding helper data before performing the quantization process. +This approach aims to optimize our input values prior to quantization, which may help minimize the risk of bit and symbol errors during the reconstruction phase. +This differs from methods like @smhdt, which generate helper data to improve the quantization process itself, of those that apply error-correcting codes afterward. Since this #gls("hda") modifies the input values before the quantization takes place, we will consider the input values as zero-mean Gaussian distributed and not use a CDF to transform these values into the tilde-domain. -== Optimizing a 1-bit sign-based quantization +== Optimizing single-bit sign-based quantization Before we take a look at the higher order quantization cases, we will start with a very basic method of quantization: a quantizer, that only returns a symbol with a width of $1$ bit and uses the sign of the input value to determine the resulting bit symbol. @@ -17,14 +22,15 @@ Before we take a look at the higher order quantization cases, we will start with If we overlay the PDF of a zero-mean Gaussian distributed variable $X$ with a sign-based quantizer function as shown in @fig:1-bit_normal, we can see that the expected value of the Gaussian distribution overlaps with the decision threshold of the sign-based quantizer. Considering that the margin of error of the value $x$ is comparable with the one shown in @fig:tmhd_example_enroll, we can conclude that values of $X$ that reside near $0$ are to be considered more unreliable than values that are further away from the x-value 0. -This means that the quantizer used here is very unreliable without generated helper-data. +This means that the quantizer used here is very unreliable as is. Now, to increase the reliability of this quantizer, we can try to move our input values further away from the value $x = 0$. -To do so, we can define a new input value $z$ as a linear combination of two realizations of $X$, $x_1$ and $x_2$ with a set of weights $h_1$ and $h_2$: +To do so, we can define a new input value $z$ as a linear combination of two realizations of $X$, $x_1$ and $x_2$ with a set of weights $h_1$ and $h_2$ that we will use as helper data: $ -z = h_1 dot x_1 + h_2 dot x_2 . +z = h_1 dot x_1 + h_2 dot x_2 , $ +with $h_i in {plus.minus 1}$. Building only the sum of two input values $x_1 + x_2$ is not sufficient here, since the resulting distribution would be a normal distribution with $mu = 0$ as well. === Derivation of the resulting distribution To find a description for the random distribution $Z$ of $z$ we can interpret this process mathematically as a maximisation of a sum. @@ -32,9 +38,9 @@ This can be realized by replacing the values of $x_i$ with their absolute values $ z = abs(x_1) + abs(x_2) $ -Taking into account, that $x_i$ are realizations of a normal distribution -- that we can assume without loss of generality to have its expected value at $x=0$ and a standard deviation of $sigma = 1$ -- we can define the overall resulting random distribution $Z$ to be: +Taking into account that $x_i$ are realizations of a normal distribution, we can assume without loss of generality that $X$ is i.i.d., /*to have its expected value at $x=0$ and a standard deviation of $sigma = 1$ --*/ defining the overall resulting random distribution $Z$ as: $ -Z = abs(X) + abs(X). +Z = abs(X_1) + abs(X_2). $ We will redefine $abs(X)$ as a half-normal distribution $Y$ whose PDF is $ @@ -43,7 +49,7 @@ f_Y(y, sigma) &= frac(sqrt(2), sigma sqrt(pi)) lr(exp(-frac(y^2, 2 sigma^2)) mid $ Now, $Z$ simplifies to $ -Z = Y + Y. +Z = Y_1 + Y_2. $ We can assume for now that the realizations of $Y$ are independent of each other. The PDF of the addition of these two distributions can be described through the convolution of their respective PDFs: @@ -75,17 +81,23 @@ $f_Z^* (z)$ now describes the final random distribution after the application of === Generating helper-data -To find the optimal set of helper-data that will result in the distribution shown in @fig:z_pdf, we can define the vector of all possible linear combinations $bold(z)$ as the vector-matrix multiplication of the two input values $x_i$ and the matrix $bold(H)$ of all weight combinations: +To find the optimal set of helper-data that will result in the distribution shown in @fig:z_pdf, we can define the vector of all possible linear combinations $bold(z)$ as the vector-matrix multiplication of the input values $x_i$ and the matrix $bold(H)$ of all weight combinations with $h_i in [plus.minus 1]$: $ bold(z) &= bold(x) dot bold(H)\ -&= vec(x_1, x_2) dot mat(delim: "[", h_1, -h_1, h_1, -h_1; h_2, h_2, -h_2, -h_2)\ -&= vec(x_1, x_2) dot mat(delim: "[", +1, -1, +1, -1; +1, +1, -1, -1) $ We will choose the optimal weights based on the highest absolute value of $bold(z)$, as that value will be the furthest away from $0$. -We may encounter two entries in $bold(z)$ that both have the same highest absolute value. -In that case, we will choose the combination of weights randomly out of our possible options. +//We may encounter two entries in $bold(z)$ that both have the same highest absolute value. +//In that case, we will choose the combination of weights randomly out of our possible options. +To not encounter two entries in $bold(z)$ that both have the same highest absolute value, we can set the first helper data bit to be always $h_1 = 1$. -If we take a look at the dimensionality of the matrix of all weight combinations, we notice that we will need to store $log_2(2) = 1$ helper-data bit. +Considering our single-bit quantization case, @eq:z_combinations can be written as: + +$ +bold(z) = vec(x_1, x_2) dot mat(delim: "[", +1, -1, +1, -1; +1, +1, -1, -1) +$ + +The vector of optimal weights $bold(h_"opt")$ can now be found through $op("argmax")_h (bold(z))$. +If we take a look at the dimensionality of the matrix of all weight combinations, we notice that we will need to store only $1$ helper-data bit per quantized symbol because $h_1$ is set to $1$. In fact, we will show later, that the amount of helper-data bits used by this HDA is directly linked to the number of input values used instead of the number of bits we want to extract during quantization. == Generalization to higher-order bit quantization @@ -94,23 +106,28 @@ We can generalize the idea of @sect:1-bit-opt and apply it for a higher-order bi Contrary to @smhdt, we will always use the same step function as quantizer and optimize the input values $x$ to be the furthest away from any decision threshold. In this higher-order case, this means that we want to optimise our input values as far away as possible from the nearest decision threshold of the quantizer instead of just maximising the absolute value of the linear combination. -For a complete generalization of this method, we will also parametrize the amount of addends in the linear combination of $z$. -//Later we will be able to show that a higher number of summands for $z$ can provide better approximations for the ideal values of $z$ at the expense of the number of available input values for the quantizer. - +For a complete generalization of this method, we will also parametrize the amount of addends $N$ kin the linear combination of $z$. That means we can define $z$ from now on as: $ -z = sum_(i=1)^(n>=2) x_i dot h_i +z = sum_(i=1)^(N) x_i dot h_i $ We can define the condition to test whereas a tested linear combination is optimal as follows:\ The optimal linear combination $z_"opt"$ is found, when the distance to the nearest quantizer decision bound is maximised. +Finding the weights $bold(h)_"opt"$ of the optimal linear combination $z_"opt"$ can be formalized as: + +$ +bold(h)_"opt" = op("argmax")_h op("min")_j abs(bold(h)^T bold(x) - b_j) "s.t." h_j in {plus.minus 1} +$ ==== Example with 2-bit quantizer //Let's consider the following example using a 2-bit quantizer:\ We can define the bounds of the two bit quantizer $bold(b)$ as $[-alpha, 0, alpha]$ omitting the bounds $plus.minus infinity$. The values of $bold(b)$ are already placed in the real domain to directly quantize normal distributed input values. +A simple way to solve @eq:optimization is to use a brute force method and calculate all distances to every quantization bound $b_j$, because the number of possible combinations is finite. +Furthermore, fining a solution for @eq:optimization analytically poses to be significantly more complex. The linear combination $z$ for the amount of addends $i = 2$ is defined as $ @@ -142,47 +159,55 @@ $ The optimal linear combination $z_"opt"$ can now be found as the entry $z_j$ of $bold(z)$ where its corresponding distance $nu_j$ is maximised. -Two important points were anticipated in this example: +=== Simulation of the bound distance maximisation strategy + +Two important points were anticipated in the preceding example: 1. We cannot define the resulting random distribution $Z$ after performing this operation analytically and thus also not the quantizer bounds $bold(b)$. A way to account for that is to guess the resulting random distribution and $bold(b)$ initially and repeating the optimization using quantizer bounds found through the @ecdf of the resulting linear combination values. 2. If the optimization described above is repeated multiple times using an @ecdf, the resulting random distribution $Z$ must converge to a stable random distribution. Otherwise we will not be able to carry out a reliable quantization in which the symbols are uniformly distributed. -=== Simulation of the bound distance maximisation strategy +To check that the strategy for optimizing the linear combination provided in the example above results in a converging random distribution, we will perform a simulation of the optimization as described in the example using $100 space.nobreak 000$ simulated normal distributed values as realizations of the standard normal distribution with the parameters $mu = 0$ and $sigma = 1$. -To check that the strategy for optimizing the linear combination provided in the example above results in a converging random distribution, we will perform a simulation of the optimization as described in the example using $100,000$ simulated normal distributed values as realizations of the standard normal distribution with the parameters $mu = 0$ and $sigma = 1$. +@fig:bach_instability shows various histograms of the vector $bold(z)_"opt"$ after different iterations. +Even though the overall shape of the distribution comes close to our goal of moving the input values away from the quantizer bounds $bold(b)$, the distribution itself does not converge to one specific, final shape. +It seems that the resulting distributions for each iteration oscillate in some way, since the distributions for iterations $7$ and $25$ have the same shape. +However the distribution seems to be chaotic and thus does not seem suitable for further quantization. #figure( grid( columns: (1fr, 1fr), rows: (2), - row-gutter: 5pt, - figure( - image("../graphics/plots/bach/instability/frame_1.png"), - ), - figure( - image("../graphics/plots/bach/instability/frame_7.png"), - ), - figure( - image("../graphics/plots/bach/instability/frame_18.png"), - ), - figure( - image("../graphics/plots/bach/instability/frame_25.png"), - ) + [//#figure( + #image("../graphics/plots/bach/instability/frame_1.png") + #v(-2em) + //) + Iteration 1], + [//#figure( + #image("../graphics/plots/bach/instability/frame_7.png") + #v(-2em) + //) + Iteration 7], + [//#figure( + #image("../graphics/plots/bach/instability/frame_18.png") + #v(-2em) + //) + Iteration 18], + [//#figure( + #image("../graphics/plots/bach/instability/frame_25.png") + #v(-2em) + //) + Iteration 25] ), caption: [Probability distributions for various iterations] ) -@fig:bach_instability shows various histograms of the vector $bold(z)_"opt"$ after different iterations. -Even though the overall shape of the distribution comes close to our goal of moving the input values away from the quantizer bounds $bold(b)$, the distribution itself does not converge to one specific, final shape. -It seems that the resulting distributions for each iteration oscillate in some way, since the distributions for iterations $7$ and $25$ have the same shape. -However the distribution does not seem to be predictable on the basis of the index of the iteration. === Center Point Approximation For that reason, we will now propose a different strategy to find the weights for the optimal linear combination $z_"opt"$. -Instead of defining the desired outcome of $z_"opt"$ as the greatest distance to the nearest quantizer decision threshold, we will define a vector $bold(cal(o)) in.rev {cal(o)_1, cal(o)_2 ..., cal(o)_(2^M)}$ containing the optimal values that we want to approximate with $z$. -Considering a m-bit quantizer with $2^m$ steps, we can define the values of $bold(cal(o))$ as the center points of these quantizer steps. -Its cardinality is $2^M$, while $M$ defines the number of bits we want to extract through the quantization. +Instead of defining the desired outcome of $z_"opt"$ as the greatest distance to the nearest quantizer decision threshold, we will define a vector $bold(cal(o)) = [cal(o)_1, cal(o)_2 ..., cal(o)_(2^M)]$ containing the optimal values that we want to approximate with $z$. +Considering a M-bit quantizer with $2^M$ steps, we can define the values of $bold(cal(o))$ as the center points of these quantizer steps. +Its cardinality is $2^M$. It has to be noted, that $bold(cal(o))$ consists of optimal values that we may not be able to exactly approximate using a linear combination based on weights and our given input values. We can find the optimal linear combination $z_"opt"$ by finding the minimum of all distances to all optimal points defined in $bold(cal(o))$. @@ -198,9 +223,9 @@ $ include("../pseudocode/bach_find_best_appr.typ") ) -@alg:best_appr shows a programmatic approach to find the set of weights for the best approximation. The algorithm returns a tuple consisting of the weight combination $bold(h)$ and the resulting value of the linear combination $z_"opt"$ +@alg:best_appr shows a programmatic approach to find the set of weights for the best approximation. The algorithm returns a tuple consisting of the weight combination $bold(h)$ and the resulting value of the linear combination $z_"opt"$. -Because the superposition of different linear combinations of normal distributions corresponds to a Gaussian Mixture Model, wherein finding the ideal set of points $bold(cal(o))$ analytically is impossible. +Because the superposition of different linear combinations of normal distributions corresponds to a Gaussian Mixture Model, finding the ideal set of points $bold(cal(o))$ analytically is impossible. Instead, we will first estimate $bold(cal(o))$ based on the normal distribution parameters after performing multiple convolutions with the input distribution $X$. The parameters of a multiple convoluted normal distribution is defined as: @@ -218,7 +243,7 @@ $ The parameters $mu_Z$ and $sigma_Z$ allow us to apply an inverse CDF on a multi-bit quantizer $cal(Q)(2, tilde(x))$ defined in the tilde-domain. Our initial values for $bold(cal(o))_"first"$ can now be defined as the centers of the steps of the transformed quantizer function $cal(Q)(2, x)$. These points can be found easily but for the outermost center points whose quantizer steps have a bound $plus.minus infinity$.\ -However, we can still find these two remaining center points by artificially defining the outermost bounds of the quantizer as $frac(1, 2^M dot 4)$ and $frac((2^M dot 4)-1, 2^M dot 4)$ in the tilde-domain and also apply the inverse CDF to them. +However, we can still find these two remaining center points by artificially defining the outermost bounds of the quantizer as $frac(1, 2^(2 dot M))$ and $frac((2^(2 dot M))-1, 2^(2 dot M))$ in the tilde-domain and also apply the inverse CDF to them. #scale(x: 90%, y: 90%)[ #figure( @@ -245,33 +270,46 @@ We can also use a simulation here to check the convergence of the distribution $ #figure( grid( columns: (2), - figure( - image("./../graphics/plots/bach/stability/frame_1.png") - ), - figure( + [#figure( + image("./../graphics/plots/bach/stability/frame_1.png"), + //caption: [Iteration 1] + ) + #v(-2em) + Iteration 1], + [#figure( image("./../graphics/plots/bach/stability/frame_25.png") - ), + ) + #v(-2em) + Iteration 25], ), caption: [Probability distributions for the first and 25th iteration of the center point approximation method] ) -Comparing the distributions in fig:bach_stability, we can see that besides a closer arrangement the overall shape of the probability distribution $Z$ converges to a Gaussian Mixture representing the original estimated distribution $Z$ through @eq:z_dist_def through smaller normal distributions. +Comparing the distributions in @fig:bach_stability, we can see that besides a closer arrangement the overall shape of the probability distribution $Z$ converges to a stable distribution representing the original estimated distribution $Z$ through @eq:z_dist_def through smaller normal distributions. The output of @alg:bach_1 is the vector of optimal weights $bold(h)_"opt"$. $bold(h)_"opt"$ can now be used to complete the enrollment phase and quantize the values $bold(z)_"opt"$. -To perform reconstruction, we can construct the same linear combination used during enrollment with the found helper-data and the new PUF readout measurements. +To perform reconstruction, we can calculate the same linear combination used during enrollment with the generated helper-data and the new PUF readout measurements. +We can lower the computational complexity of this approach by using the assumption that $X$ are i.i.d.. +The end result of $bold(cal(o))$ can be calculated once for a specific device series and saved in the ROM of. +During enrollment, only the vector $bold(h)_"opt"$ has to be calculated. + +=== Helper-data size and amount of addends + +The amount of helper data is directly linked to the symbol bit width $M$ and the amount of addends $N$ used in the linear combination. +Because we can set the first helper data bit $h_1$ of a linear combination to $1$ to omit the random choice, the resulting extracted bit to helper data bit ratio $cal(r)$ can be defined as $cal(r) = frac(M, N-1)$, whose equation is similar tot he one we used in the @smhdt analysis. == Experiments -To test our implementation of BACH using the prior introduced center point approximation we conducted a similar experiment as in @sect:smhd_experiments. -However, we have omitted the analysis over different temperatures for the enrollment and reconstruction phase here, as the behaviour of BACH corresponds to that of @smhdt in this matter. +To test our implementation of @bach using the prior introduced center point approximation we conducted a similar experiment as in @sect:smhd_experiments. +However, we have omitted the analysis over different temperatures for the enrollment and reconstruction phase here, as the behaviour of @bach corresponds to that of @smhdt in this matter. As in the S-Metric analysis, the resulting dataset consists of the bit error rates of various configurations with quantization symbol widths of up to $4$ bits evaluated with up to $10$ addends for the linear combinations. == Results & Discussion -We can now compare the #glspl("ber") of different BACH configurations. +We can now compare the #glspl("ber") of different @bach configurations. -#figure( +/*#figure( table( columns: (9), align: center + horizon, @@ -282,19 +320,120 @@ We can now compare the #glspl("ber") of different BACH configurations. [$M=3$], [$0.07$], [$0.114$], [$0.05$], [$0.15$], [$0.2$], [$0.26$], [$0.26$], [$0.31$], [$M=4$], [$0.13$], [$0.09$], [$0.18$], [$0.22$], [$0.26$], [$0.31$], [$0.32$],[$0.35$] ), - caption: [#glspl("ber") of different BACH configurations] + caption: [#glspl("ber") of different @bach configurations] +)*/ + + +#figure( + kind: table, + tablex( + columns: 9, + align: center + horizon, + inset: 7pt, + // Color code the table like a heat map + + map-cells: cell => { + if cell.x > 0 and cell.y > 0 { + cell.content = { + let value = float(cell.content.text) + let text-color = if value >= 0.3 { + red.lighten(15%) + } else if value >= 0.2 { + red.lighten(30%) + } else if value >= 0.15 { + orange.darken(10%) + } else if value >= 0.1 { + yellow.darken(13%) } else if value >= 0.08 { + yellow + } else if value >= 0.06 { + olive + } else if value >= 0.04 { + green.lighten(10%) + } else if value >= 0.02 { + green + } else { + green.darken(10%) + } + cell.fill = text-color + strong(cell.content) + } + } + cell +}, + + [*BER*],[N=2],[N=3],[N=4],[N=5], [N=6], [$N=7$], [$N=8$], [$N=9$], + [$M=1$], [0.01], [0.01], [0.012], [0.018], [0.044], [0.05], [0.06], [0.07], + [$M=2$], [0.03], [0.05], [0.02], [0.078], [0.107], [0.114], [0.143], [0.138], + [$M=3$], [0.07], [0.114], [0.05], [0.15], [0.2], [0.26], [0.26], [0.31], + [$M=4$], [0.13], [0.09], [0.18], [0.22], [0.26], [0.31], [0.32],[0.35], + [$M=5$], [0.29], [0.21], [0.37], [0.31], [0.23], [0.23], [0.19], [0.15], + [$M=6$], [0.15], [0.33], [0.15], [0.25], [0.21], [0.23], [0.19], [0.14] + + ), + caption: [#glspl("ber") of different @bach configurations] ) +@tab:BACH_performance shows the #glspl("ber") of @bach configurations with $N$ addends and extracting $M$ bits out of one input value $z$. +The first interesting property we can observe, is the caveat @bach produces for the first three bit combinations $M = 1, 2 "and" 3$ at around $N = 3$ and $N = 4$. +At these points, the @ber experiences a drop followed by a steady rise again for higher numbers of $N$. +//This observation could be explained through the fact that the higher $N$ is chosen, the shorter the resulting key, since $N$ divides out values available for quantization by $N$. +If $M$ is generally chosen higher, @bach seems to return unstable results, halving the @ber as $N$ reaches $9$ for $M=5$ but showing no real improvement for various addends if $M=6$. -@tab:BACH_performance shows the #glspl("ber") of BACH configurations with $N$ addends and extracting $M$ bits out of one input value $z$. -The first interesting property we can observe, is the caveat BACH produces for the first three bit combinations $M = 1, 2 "and" 3$ at around $N = 3$ and $N = 4$. -At these points the @ber experiences a drop followed by a steady rise again for higher numbers of $N$. -This observation could be explained through the fact that the higher $N$ is chosen, the shorter the resulting key, since $N$ divides out values available for quantization by $N$. +We can also compare the performance of @bach using the center point approximation approach with the #glspl("ber") of higher order bit quantizations that don't use any helper data. -=== Impact of helper-data volume and amount of addends +#figure( + table( + columns: 7, -Contrary to @smhdt, the amount of helper data is directly linked to the amount of available input data provided by the @puf readout. -In our experiments, this means we will always generate $800$ helper data bits, since our input data consists of $800$ ring-oscillator frequency differences. + [*M*], [$1$], [$2$], [$3$], [$4$], [$5$], [$6$], + [*BER*], [$0.013$], [$0.02$], [$0.04$], [$0.07$], [$0.11$], [$0.16$] + ), + caption: [#glspl("ber") for higher order bit quantization without helper data ] +) -If we now take into account, that we divide our input set by $N$ addends to receive values available for quantization and are then able to yield $M$ bits per available value due to our higher-bit quantization, we can define the extracted-bit to helper-data bits ratio as -$cal(r) = lr(frac(frac(n dot M, N), 800)mid(|))_(n=800) = frac(M, N)$, whose equation is similar to the one we used in the @smhdt analysis. +Unfortunately, the comparison of #glspl("ber") of @tab:no_hd[Tables] and @tab:BACH_performance[] shows that our current realization of @bach either ties the @ber in @tab:no_hd or is worse. +Let's find out why this happens. + +==== Discussion + +If we take a step back and look at the performance of the optimized single-bit sign-based quantization process of @sect:1-bit-opt, we can compare the following #glspl("ber"): + +#figure( + table( + columns: 2, + [*No helper data*], [$0.013$], + [*With helper data using greatest distance*],[$0.00052$], + [*With helper data using center point approximation*], [$0.01$] + + ), + caption: [Comparison of #glspl("ber") for the single-bit quantization process with and without helper data] +) + +As we can see in @tab:comparison_justification, generating the helper data based on the original idea where @eq:optimization is used improves the @ber of the single-bit quantization by approx. $96%$. +The probability distributions $Z$ of the two different realizations of @bach -- namely the distance maximization strategy and the center point approximation -- give an indication of this discrepancy: + +#figure( +grid( + columns: (2), + [#figure( + image("../graphics/plots/bach/compare/bad.png") + ) + #v(-2em) + Center point approximation], + [#figure( + image("../graphics/plots/bach/compare/good.png") + ) + #v(-2em) + Distance maximization], +), + caption: [Comparison of the histograms of the different strategies to obtain the optimal weights for the single-bit case] +) + +@fig:compar_2_bach shows the two different probability distributions. +We can observe that using a vector of optimal points $bold(cal(o))$ results in a more narrow distribution for $Z$ than just maximizing the linear combination to be as far away from $x=0$ as possible. +This difference in the shape of both distributions seem to be the main contributor to the fact that the optimization using center point approximation yields no improvement for the quantization process. +Unfortunately, we were not able define an algorithm translating this idea to a higher order bit quantization for which the resulting probability distribution $Z$ converges. + +Taking a look at the unstable probability distributions issued by the bound distance maximization strategy in @fig:bach_instability, we can get an idea of what kind of distribution a @bach algorithm should achieve. +While the inner parts of the distributions do not overlap with each other like in the stable iterations shown in @fig:bach_stability, the outermost values of these distributions resemble the shape of what we achieved using the distance maximization for a single-bit optimization. +These two properties could -- if the distribution converges -- result in far better #glspl("ber") for higher order bit quantization, as the comparison in @tab:comparison_justification indicates. diff --git a/content/SMHD.typ b/content/SMHD.typ index d8c1a29..5a77436 100644 --- a/content/SMHD.typ +++ b/content/SMHD.typ @@ -21,14 +21,16 @@ Contrary to @tmhd1, @tmhd2 and @smhd, which display relevant areas as equi-proba It has to be mentioned, that instead of transforming all values of the PUF readout into the Tilde-Domain, we could also use an inverse CDF to transform the bounds of our evenly spaced areas into the real domain with (normal) distributed values, which can be assessed as remarkably less computationally complex.#margin-note[Das erst später] */ === Two-Metric Helper Data Method -The most simple form of a metric-based @hda is the Two-Metric Helper Data Method, since the quantization only yields symbols of 1-bit width and uses the least amount of metrics possible if we want to use more than one metric. +The simplest form of a metric-based @hda is the Two-Metric Helper Data Method. +Its quantization only yields symbols of 1-bit width and it only uses a single bit of helper data to store the choice of metric. @fig:tmhd_example_enroll and @fig:tmhd_example_reconstruct illustrate an example enrollment and reconstruction process. -We would consider the marked point the value of the initial measurement and the marked range our margin of error. +Consider the marked point the value of the initial measurement and the marked range our margin of error. If we now were to use the original quantizer shown in @fig:tmhd_example_enroll during both the enrollment and the reconstruction phases, we would risk a bit error, because the margin of error overlaps with the lower quantization bound $-a$, which we can call a point of uncertainty. -But since we generated helper data during enrollment as depicted in @fig:tmhd_enroll, we can make use of a different quantizer $cal(R)(1, 2, x)$ whose boundaries do not overlap with the error margin. +To alleviate this we generated helper data during enrollment as depicted in @fig:tmhd_enroll, we can make use of a different quantizer $cal(R)(1, 2, x)$ whose boundaries do not overlap with the error margin. #scale(x: 90%, y: 90%)[ -#grid( +#figure( + grid( columns: (1fr, 1fr), [#figure( include("../graphics/quantizers/two-metric/example_enroll.typ"), @@ -36,15 +38,16 @@ But since we generated helper data during enrollment as depicted in @fig:tmhd_en [#figure( include("../graphics/quantizers/two-metric/example_reconstruct.typ"), caption: [Example reconstruction]) ] -)] +), + caption: [Example enrollment and reconstruction of @tmhdt. The window function describes the quantizer used to define the resulting bit. The red dot shows a possible @puf readout measurement with its blue marked strip as margin of error.])] Publications @tmhd1 and @tmhd2 find all the relevant bounds for the enrollment and reconstruction phases under the assumption that the PUF readout (our input value $x$) is zero-mean Gaussian distributed. //Because the parameters for symbol width and number of metrics always stays the same, it is easier to calculate #m//argin-note[obdA annehmen hier] the bounds for 8 equi-probable areas with a standard deviation of $sigma = 1$ first and then multiplying them with the estimated standard deviation of the PUF readout. -Because the parameters for symbol width and number of metrics always stays the same, we can -- without loss of generality -- assume the standard deviation as $sigma = 1$ and calculate the bounds for 8 equi-probable areas for this distribution. +Because the parameters for symbol width and number of metrics always stay the same, we can -- without loss of generality -- assume the standard deviation as $sigma = 1$ and calculate the bounds for 8 equi-probable areas for this distribution. This is done by finding two bounds $a$ and $b$ such, that $ integral_a^b f_X(x) \dx = 1/8 $ This operation yields 9 bounds defining these areas $-infinity$, $-\T1$, $-a$, $-\T2$, $0$, $\T2$, $a$, $\T1$ and $+infinity$. -During the enrollment phase, we will use $plus.minus a$ as our quantizing bounds, returning $0$ if the //#margin-note[Rück-\ sprache?] absolute value is smaller than $a$ and $1$ otherwise. +During the enrollment phase, we will use $plus.minus a$ as our quantizing bounds, returning $0$ if the absolute value of $x$ is smaller than $a$ and $1$ otherwise. The corresponding metric is chosen based on the following conditions: $ M = cases( @@ -53,7 +56,6 @@ $ M = cases( )space.en. $ @fig:tmhd_enroll shows the curve of a quantizer $cal(Q)$ that would be used during the Two-Metric enrollment phase. -At this point we will still assume that our input value $x$ is zero-mean Gaussian distributed. //#margin-note[Als Annahme nach vorne verschieben] #scale(x: 90%, y: 90%)[ #grid( columns: (1fr, 1fr), @@ -67,7 +69,7 @@ At this point we will still assume that our input value $x$ is zero-mean Gaussia ] As previously described, each of these metrics correspond to a different quantizer. -Now, we can use the generated helper data in the reconstruction phase and define a reconstructed bit based on the chosen metric as follows: +In the reconstruction phase, we can use the generated helper data and define a reconstructed bit based on the chosen metric as follows: $ #grid( columns: (1fr, 1fr), @@ -77,7 +79,7 @@ $ #grid( ) $ @fig:tmhd_reconstruct illustrates the basic idea behind the Two-Metric method. Using the helper data, we will move the bounds of the original quantizer (@fig:tmhd_example_enroll) one octile to each side, yielding two new quantizers. -The advantage of this method comes from moving the point of uncertainty away from our readout position. +The advantage of this method comes from moving the point of uncertainty away from our enrollment-time readout. @@ -113,7 +115,8 @@ The generalization consists of two components: == Realization We will now propose a specific realization of the S-Metric Helper Data Method. \ -//As shown in @sect:dist_independency, we can use a CDF to transform our random distributed variable $X$ into an $tilde(X)$ in the tilde domain. +Instead of using the @puf readout directly for @smhdt, we can use a @cdf to transform these values into the tilde domain. +The only requirement we would need to meet here is that the @cdf of the probability distribution used is known. This allows us to use equi-distant bounds for the quantizer instead of equi-probable ones. From now on we will use the following syntax for quantizers that use the S-Metric Helper Data Method: @@ -142,7 +145,7 @@ Right now, this quantizer wouldn't help us generating any helper data. To achieve that, we will need to divide a symbol step -- one, that returns the corresponding quantized symbol - into multiple sub-steps. Using $S$, we can define the step size $Delta_S$ as the division of $Delta$ by $S$: -$ Delta_S = frac(Delta, S) = frac(frac(1, 2^M), S) = frac(1, 2^M dot S) $ +$ Delta_S = frac(Delta, S) = frac(1, 2^M dot S) $ /*After this definition #margin-note[Absatz nochmal neu], we need to make an adjustment to our previously defined quantizer function, because we cannot simply return the quantized value based on a quantizer with step size $Delta_s$. That would just increase the amounts of bits we will extract out of one measurement. @@ -178,22 +181,23 @@ In that sense, increasing the number of metrics will increase the number of sub- We can now perform the enrollment of a full PUF readout. Each measurement will be quantized with out quantizer $cal(E)$, returning a tuple consisting of the quantized symbol and helper data. -$ K_i = cal(E)(s, m, tilde(x_i)) = (k, h)_i space.en. $ +$ kappa_i = cal(E)(s, m, tilde(x_i)) = (k, h)_i space.en. $ -Performing the operation of @eq:smhd_quant for our whole set of measurements will yield a vector of tuples $bold(K)$. +Performing the operation of @eq:smhd_quant for our whole set of measurements will yield a vector of tuples $bold(kappa)$. === Reconstruction We already demonstrated the basic principle of the reconstruction phase in section @sect:tmhd, which showed the advantage of using more than one quantizer during reconstruction. We will call our repeated measurement of $tilde(x)$ that is subject to a certain error $tilde(x^*)$. -To perform reconstruction with $tilde(x^*)$, we will first need to find all $S$ quantizers for which we generated the helper data in the previous step. +To perform reconstruction with $tilde(x^*)$, we will first need to find all $S$ quantizers for which we generated the helper data in the previous step and then choose the one corresponding to the saved metric. We have to distinguish the two cases, that $S$ is either even or odd:\ -If $S$ is even, we need to define $S$ quantizers offset by some distance $phi$. -We can define the ideal position for the quantizer bounds based on its corresponding metric as centered around the center of the related metric. +If $S$ is even, we need to define $S$ quantizers offset by multiples of $phi$. +We can define the ideal position for the quantizer bounds based on its corresponding metric as centered around the center of the metric. We can find these new bounds graphically as depicted in @fig:smhd_find_bound_graph. We first determine the x-values of the centers of a metric (here M1, as shown with the arrows). We can then place the quantizer steps with step size $Delta$ (@eq:delta) evenly spaced around these points. +If the resulting quantizer bound is smaller than $0$ or bigger than $1$, we will either add or subtract $1$ from its value so it stays in the defined range of the tilde domain. With these new points for the vertical steps of $cal(Q)$, we can draw the new quantizer for the first metric in @fig:smhd_found_bound_graph. @@ -236,7 +240,7 @@ Analytically, the offset we are applying to $cal(E)(2, 2, tilde(x))$ can be defi $ Phi = lr(frac(1, 2^M dot S dot 2)mid(|))_(M=2, S=2) = 1 / 16 space.en. $ -$Phi$ is the constant that we will multiply with a certain metric index $i$ to obtain the metric offset $phi$, which is used to define each of the $S$ different quantizers for reconstruction. +$Phi$ is the constant that we will multiply with a certain metric index $i in [- S/2, ..., S/2]$ to obtain the metric offset $phi$, which is used to define each of the $S$ different quantizers for reconstruction. //This is also shown in @fig:smhd_2_2_reconstruction, as our quantizer curve is moved $1/16$ to the left and the right. In @fig:smhd_2_2_reconstruction, the two metric indices $i = plus.minus 1$ will be multiplied with $Phi$, yielding two quantizers, one moved $1/16$ to the left and one moved $1/16$ to the right. @@ -245,7 +249,7 @@ If a odd number of metrics is given, the offset can still be calculated using @e To find all metric offsets for values of $S > 3$, we can use @alg:find_offsets. -For application, we calculate $phi$ based on $S$ and $M$ using @eq:offset. The resulting list of offsets is correctly ordered and can be mapped to the corresponding metrics in ascending order.// as we will show in @fig:4_2_offsets and @fig:6_2_offsets. +We can calculate $phi$ based on $S$ and $M$ using @eq:offset. The resulting list of offsets is correctly ordered and can be mapped to the corresponding metrics in ascending order.// as we will show in @fig:4_2_offsets and @fig:6_2_offsets. #figure( kind: "algorithm", @@ -255,7 +259,7 @@ For application, we calculate $phi$ based on $S$ and $M$ using @eq:offset. The r ==== Offset properties //#inline-note[Diese section ist hier etwas fehl am Platz, ich weiß nur nicht genau wohin damit. Außerdem ist sie ein bisschen durcheinander geschrieben] -Before we go on and experimentally test this realization of the S-Metric method, let's look deeper into the properties of the metric offset value $phi$.\ +Before we go on and experimentally test this realization of the S-Metric method, let's look deeper into the properties of the metric offset value $phi$. Comparing @fig:smhd_2_2_reconstruction, @fig:smhd_3_2_reconstruction and their respective values of @eq:offset, we can observe, that the offset $Phi$ gets smaller the more metrics we use. #figure( @@ -270,7 +274,7 @@ Comparing @fig:smhd_2_2_reconstruction, @fig:smhd_3_2_reconstruction and their r caption: [Offset values for 2-bit configurations] ) As previously stated, we will need to define $S$ quantizers, $S/2$ times to the left and $S/2$ times to the right. -For example, setting parameter $S$ to $4$ means we will need to move the enrollment quantizer $lr(S/2 mid(|))_(S=4) = 2$ times to the left and right. +For example, setting the parameter $S$ to $4$ means we will need to move the enrollment quantizer $2$ times to the left and right. As we can see in @fig:4_2_offsets, $phi$ for the maximum metric indices $i = plus.minus 2$ are identical to the offsets of a 2-bit 2-metric configuration. In fact, this property carries on for higher even numbers of metrics, as shown in @fig:6_2_offsets. @@ -304,12 +308,16 @@ In fact, this property carries on for higher even numbers of metrics, as shown i At $s=6$ metrics, the biggest metric offset we encounter is $phi = 1/16$ at $i = plus.minus 3$.\ This biggest (or maximum) offset is of particular interest to us, as it tells us how far we deviate from the original quantizer used during enrollment. -The maximum offset for a 2-bit configuration $phi$ is $1/16$ and we will introduce smaller offsets in between if we use a higher even number of metrics. +The maximum offset for a 2-bit configuration $phi$ is $1/16$ and we only introduce smaller offsets in between if we use a higher even number of metrics. -More formally, we can define the maximum metric offset for an even number of metrics as follows: +More formally, we can define the maximum metric offset as follows: + +$ phi_"max" = frac(floor(frac(S,2)), 2^M dot S dot 2) $ + +/*More formally, we can define the maximum metric offset for an even number of metrics as follows: $ phi_("max,even") = frac(frac(S,2), 2^M dot S dot 2) = frac(1, 2^M dot 4) $ -Here, we multiply @eq:offset by the maximum metric index $i_"max" = S/2$. +Here, we multiply $phi$ from @eq:offset by the maximum metric index $i_"max" = S/2$. Now, if we want to find the maximum offset for a odd number of metrics, we need to modify @eq:max_offset_even, more specifically its numerator. For that reason, we will decrease the parameter $m$ by $1$, that way we will still perform a division without remainder: @@ -318,8 +326,9 @@ $ phi_"max,odd" &= frac(frac(S-1, 2), 2^n dot S dot 2)\ &= lr(frac(S-1, 2^M dot S dot 4)mid(|))_(M=2, S=3) = 1/24 $ - -It is important to note, that $phi_"max,odd"$, unlike $phi_"max,even"$, is dependent on the parameter $S$ as we can see in @tb:odd_offsets. +*/ +//It is important to note, that $phi_"max,odd"$, unlike $phi_"max,even"$, is dependent on the parameter $S$ as we can see in @tb:odd_offsets. +It is important to note, that $phi_"max"$ is dependent on the parameter $S$ if $S$ is an odd number. #figure( table( @@ -332,11 +341,11 @@ It is important to note, that $phi_"max,odd"$, unlike $phi_"max,even"$, is depen caption: [2-bit maximum offsets, odd] ) -The higher $S$ is chosen, the closer we approximate $phi_"max,even"$ as shown in @eq:offset_limes. +The higher $S$ is chosen, the closer we approximate $phi_"max"$ for even choices of $S$, as shown in @eq:offset_limes. This means, while also keeping the original quantizer during the reconstruction phase, the maximum offset for an odd number of metrics will always be smaller than for an even number. $ -lim_(S arrow.r infinity) phi_"max,odd" &= frac(S-1, 2^M dot S dot 4) #\ +lim_(S arrow.r infinity) phi_"max,odd" &= frac(floor(frac(S,2)), 2^M dot S dot 2) = frac(S-1, 2^M dot S dot 4) #\ &= frac(1, 2^M dot 4) = phi_"max,even" $ @@ -344,7 +353,7 @@ Because $phi_"max,odd"$ only approximates $phi_"max,even"$ if $S arrow.r infinit == Improvements -The by @smhd proposed S-Metric Helper Data Method can be improved by using gray coded labels for the quantized symbols instead of naive ones. +The S-Metric Helper Data Method proposed by Fischer in @smhd can be improved by using Gray-coded labels for the quantized symbols instead of naive labelling. #align(center)[ #scale(x: 80%, y: 80%)[ #figure( @@ -352,7 +361,7 @@ The by @smhd proposed S-Metric Helper Data Method can be improved by using gray caption: [Gray Coded 2-bit quantizer] )]] @fig:2-bit-gray shows a 2-bit quantizer with gray-coded labelling. -In this example, we have an advantage at $tilde(x) = ~ 0.5$, because a quantization error only returns one wrong bit instead of two. +In this example, we have an advantage at $tilde(x) approx 0.5$, because a quantization error only returns one wrong bit instead of two. Furthermore, the transformation into the Tilde-Domain could also be performed using the @ecdf to achieve a more precise uniform distribution because we do not have to estimate a standard deviation of the input values. @@ -360,30 +369,34 @@ Furthermore, the transformation into the Tilde-Domain could also be performed us == Experiments -We tested the implementation of @sect:smhd_implementation with the temperature dataset of @dataset. -The dataset contains counts of positives edges of a toggle flip flop at a set evaluation time $D$. Based on the count and the evaluation time, the frequency of a ring oscillator can be calculated using: $f = 2 dot frac(k, D)$. -Because we want to analyze the performance of the S-Metric method over different temperatures, both during enrollment and reconstruction, we are limited to the second part of the experimental measurements of @dataset. -We will have measurements of $50$ FPGA boards available with $1600$ and $1696$ ring oscillators each. To obtain the values to be processed, we subtract them in pairs, yielding $800$ and $848$ ring oscillator frequency differences _df_.\ -Since the frequencies _f_ are normal distributed, the difference _df_ can be assumed to be zero-mean Gaussian distributed. -To apply the values _df_ to our implementation of the S-Metric method, we will first transform them into the Tilde-Domain using an inverse CDF, resulti/invite ng in uniform distributed values $tilde(italic("df"))$. +We tested the implementation of @sect:smhd_implementation with the dataset of @dataset. +The dataset contains counts of positives edges of a ring oscillator at a set evaluation time $D$. Based on the count and the evaluation time, the frequency of a ring oscillator can be calculated using: $f = 2 dot frac(k, D)$. +Because we want to analyze the performance of the S-Metric method over different temperatures, both during enrollment and reconstruction, we are limited to the experimental measurements of @dataset which varied the temperature during the FPGA operation. +We will have measurements of $50$ FPGA boards available with $1600$ and $1696$ ring oscillators each. +The two measurement sets are obtained from different slices of the FPGA board where the only difference to note is the number of ring oscillators available. +To obtain the values to be processed, we subtract them in pairs, yielding $800$ and $848$ ring oscillator frequency differences _df_.\ +Because we can assume that the frequencies _f_ are i.i.d., the difference _df_ can also be assumed to be i.i.d. +To apply the values _df_ to our implementation of the S-Metric method, we will first transform them into the Tilde-Domain using an inverse CDF, resulting in uniform distributed values $tilde(x)$. Our resulting dataset consists of #glspl("ber") for quantization symbol widths of up to $6 "bits"$ evaluated with generated helper-data from up to $100 "metrics"$. -We chose not to perform simulations for bit widths higher than $6 "bits"$, as we will see later that we have already reached a bit error rate of approx. $10%$ for these configurations. - +In the following section, we will often set the maximum number of metrics to be $S=100$. +This choice refers to the asymptotic behaviour of the @ber and can be equated with the choice $S arrow infinity$. +//We chose not to perform simulations for bit widths higher than $6 "bits"$, as we will see later that we have already reached a bit error rate of approx. $10%$ for these configurations. +#pagebreak() === Results & Discussion The bit error rate of different S-Metric configurations for naive labelling can be seen in @fig:global_errorrates. -For this analysis, enrollment and reconstruction were both performed at room temperature and the quantizer was naively labelled. +For this analysis, enrollment and reconstruction were both performed at room temperature. //and the quantizer was naively labelled. #figure( - image("../graphics/25_25_all_error_rates.svg", width: 95%), - caption: [Bit error rates for same temperature execution. Here we can already observe the asymptotic loss of improvement in #glspl("ber") for higher metric numbers] + image("../graphics/25_25_all_error_rates_fixed.svg", width: 90%), + caption: [Bit error rates for same-temperature execution. Here we can already observe the asymptotic #glspl("ber") for higher metric numbers. The error rate is scaled logarithmically here.] ) We can observe two key properties of the S-Metric method in @fig:global_errorrates. -The error rate in this plot is scaled logarithmically.\ -The exponential growth of the error rate of classic 1-metric configurations can be observed through the linear increase of the error rates. -Also, as we expanded on in @par:offset_props, using more metrics will, at some point, not further improve the bit error rate of the key. -At a symbol width of $m >= 6$ bits, no further improvement through the S-Metric method can be observed. +//The exponential growth of the error rate of classic 1-metric configurations can be observed through the increase of the error rates. +The exponential growth of the @ber can be observed if we set $S=1$ and increase $M$ up to $6$. +Also, as we expanded on in @par:offset_props, at some point using more metrics will no longer improve the bit error rate of the key. +At a symbol width of $M >= 6$ bits, no further improvement through the S-Metric method can be observed. #figure( include("../graphics/plots/errorrates_changerate.typ"), @@ -392,12 +405,12 @@ At a symbol width of $m >= 6$ bits, no further improvement through the S-Metric This tendency can also be shown through @fig:errorrates_changerate. Here, we calculated the quotient of the bit error rate using one metric and 100 metrics. -From $m >= 6$ onwards, $(x_"1" (m)) / (x_"100" (m))$ approaches $~1$, which means, no real improvement is possible anymore through the S-Metric method. +From $M >= 6$ onwards, $(op("BER")(1, 2^M)) / (op("BER")(100, 2^M))$ approaches $~1$, which means, no real improvement is possible anymore through the S-Metric method. -==== Helper Data Volume Impact +==== Impact of helper data size -The amount of helper data bits required by @smhdt is defined as a function of the amount of metrics as $log_2(S)$. -The overall extracted-bits to helper-data-bits ratio can be defined here as $cal(r) = lr(frac(n dot M, log_2(S))mid(|))_(n=800) = frac(800 dot M, log_2(S))$ +The amount of helper data bits required by @smhdt is defined as a function of the number of metrics as $log_2(S)$. +The overall extracted-bits to helper-data-bits ratio can be defined here as $cal(r) = frac(M, log_2(S))$ #figure( table( @@ -405,7 +418,8 @@ The overall extracted-bits to helper-data-bits ratio can be defined here as $cal inset: 7pt, align: center + horizon, [$bold(M)$], [$1$], [$2$], [$3$], [$4$], [$5$], [$6$], - [*Errorrate*], [$0.012$], [$0.9 dot 10^(-4)$], [$0.002$], [$0.025$], [$0.857$], [$0.148$], + [$bold(S)$], [$2$], [$4$], [$8$], [$16$], [$32$], [$64$], + [*@ber*], [$0.012$], [$0.9 dot 10^(-4)$], [$0.002$], [$0.025$], [$0.857$], [$0.148$], ), caption: [S-Metric performance with same bit-to-metric ratios] ) @@ -428,8 +442,7 @@ Since we wont always be able to recreate lab-like conditions during the reconstr ) @fig:smhd_tmp_reconstruction shows the results of this experiment conducted with a 2-bit configuration.\ -As we can see, the further we move away from the temperature of enrollment, the higher the bit error rates turns out to be.\ - +As we can see, the further we move away from the temperature of enrollment, the higher the #glspl("ber"). We can observe this property well in detail in @fig:global_diffs. #scale(x: 90%, y: 90%)[ @@ -439,14 +452,13 @@ We can observe this property well in detail in @fig:global_diffs. )] Here, we compared the asymptotic performance of @smhdt for different temperatures both during enrollment and reconstruction. First we can observe that the optimum temperature for the operation of @smhdt in both phases for the dataset @dataset is $35°C$ instead of the expected $25°C$. -Furthermore, the @ber seems to be almost directly correlated with the absolute temperature difference, especially at higher temperature differences, showing that the further apart the temperatures of the two phases are, the higher the @ber. +Furthermore, the @ber seems to be almost directly determined by the absolute temperature difference, especially at higher temperature differences, showing that the further apart the temperatures of the two phases are, the higher the @ber. ==== Gray coding In @sect:smhd_improvements, we discussed how a gray coded labelling for the quantizer could improve the bit error rates of the S-Metric method. Because we only change the labelling of the quantizing bins and do not make any changes to #gls("smhdt") itself, we can assume that the effects of temperature on the quantization process are directly translated to the gray-coded case. -Therefore, we will not perform this analysis again here. @fig:smhd_gray_coding shows the comparison of applying #gls("smhdt") at room temperature for both naive and gray-coded labels. There we can already observe the improvement of using gray-coded labelling, but the impact of this change of labels can really be seen in @tab:gray_coded_impact. @@ -456,11 +468,11 @@ For $M>3$ the rise of the #gls("ber") predominates the possible improvement by a #figure( table( - columns: (7), + columns: (6), align: center + horizon, inset: 7pt, - [*M*],[1],[2],[3],[4], [5], [6], - [*Improvement*], [$0%$], [$24.75%$], [$47.45%$], [$46.97%$], [$45.91%$], [$37.73%$] + [1],[2],[3],[4], [5], [6], + [$0%$], [$24.75%$], [$47.45%$], [$46.97%$], [$45.91%$], [$37.73%$] ), caption: [Improvement of using gray-coded instead of naive labelling, per bit width] ) @@ -470,4 +482,4 @@ For $M>3$ the rise of the #gls("ber") predominates the possible improvement by a caption: [Comparison between #glspl("ber") using naive labelling and gray-coded labelling] ) -Using our dataset, we can estimate the average improvement for using gray-coded labelling to be at around $33%$. +Using the dataset, we can estimate the average improvement for using gray-coded labelling to be at $33%$. diff --git a/content/background.typ b/content/background.typ deleted file mode 100644 index 9b1896a..0000000 --- a/content/background.typ +++ /dev/null @@ -1,70 +0,0 @@ -#import "@preview/fletcher:0.5.1": diagram, node, edge -#import "@preview/gentle-clues:0.9.0": example -#import "@preview/quill:0.3.0": quantum-circuit, lstick, rstick, ctrl, targ, mqgate, meter - - -= Background -== Quantum Computation and Quantum Circuits -A quantum computer is a device that performs calculations by using certain phenomena of quantum mechanics. -The algorithms that run on this device are specified in quantum circuits. - -#example[ - @example_qc shows a simple quantum circuit that implements a specific quantum algorithm. - - #figure( - quantum-circuit( - lstick($|0〉$), $H$, mqgate($U$, n: 2, width: 5em, inputs: ((qubit: 0, label: $x$), (qubit: 1, label: $y$)), outputs: ((qubit: 0, label: $x$), (qubit: 1, label: $y plus.circle f(x)$))), $H$, meter(), [\ ], - lstick($|1〉$), $H$, 1, 1, 1 - ), - caption: [A quantum circuit implementing the Deutsch-Jozsa algorithm] - ) -] - - -== Decision Diagrams -Decision diagrams in general are directed acyclical graphs, that may be used to express control flow through a series of conditions. -It consists of a set of decision nodes and terminal nodes. -The decision nodes represent an arbitrary decision based on an input value and may thus have any number of outgoing edges. -The terminal nodes represent output values and may not have outgoing edges. - -A @bdd is a specific kind of decision diagram, where there are two terminal nodes (0 and 1) and each decision node has two outgoing edges, depending solely on a single bit of an input value. -@bdd[s] may be used to represent any boolean function. - -#example[ - Example @bdd[s] implementing boolean functions with an arity of $2$ are show in @example_bdd_xor and @example_bdd_and. - - #figure( - diagram( - node-stroke: .1em, - node((0, 0), [$x_0$], radius: 1em), - edge((0, 0), (-1, 1), [0], "->"), - edge((0, 0), (1, 1), [1], "->"), - node((-1, 1), [$x_1$], radius: 1em), - node((1, 1), [$x_1$], radius: 1em), - edge((-1, 1), (-1, 2), [0], "->"), - edge((-1, 1), (1, 2), [1], "->"), - edge((1, 1), (1, 2), [0], "->"), - edge((1, 1), (-1, 2), [1], "->"), - node((-1, 2), [$0$]), - node((1, 2), [$1$]), - ), - caption: [A @bdd for an XOR gate.] - ) - - #figure( - diagram( - node-stroke: .1em, - node((1, 0), [$x_0$], radius: 1em), - edge((1, 0), (0, 2), [0], "->"), - edge((1, 0), (1, 1), [1], "->"), - node((1, 1), [$x_1$], radius: 1em), - edge((1, 1), (0, 2), [0], "->"), - edge((1, 1), (1, 2), [1], "->"), - node((0, 2), [$0$]), - node((1, 2), [$1$]), - ), - caption: [A @bdd for an AND gate.] - ) -] - - diff --git a/content/benchmarks.typ b/content/benchmarks.typ deleted file mode 100644 index 0b81579..0000000 --- a/content/benchmarks.typ +++ /dev/null @@ -1,16 +0,0 @@ -#import "@preview/tablex:0.0.8": tablex -#import "@preview/unify:0.6.0": qty - -= Benchmarks -== Google Benchmark - -== MQT QCEC Bench -To generate test cases for the application schemes, @mqt Bench was used. @quetschlich2023mqtbench - -#tablex( - columns: (1fr, 1fr, 1fr), - rows: (auto, auto, auto), - [*Benchmark Name*], [*Diff Run Time*], [*Proportional Run Time*], - [DJ], [$qty("1.2e-6", "s")$], [$qty("1.5e-6", "s")$], - [Grover], [$qty("1.3e-3", "s")$], [$qty("1.7e-3", "s")$] -) diff --git a/content/conclusion.typ b/content/conclusion.typ deleted file mode 100644 index 0a7f2fd..0000000 --- a/content/conclusion.typ +++ /dev/null @@ -1,17 +0,0 @@ -= Conclusion - -During the course of this work, we took a look at two distinct helper-data algorithms: the S-Metric Helper Data Method and the newly presented method of optimization through Boundary Adaptive Clustering with helper-data. - -The S-Metric method will always outperform BACH considering the amount of helper data needed for operation. -This comes from the nature of S-Metric quickly approaching an optimal @ber for a certain bit width and not improving any further for higher amounts of metrics. -$ -cal(r)_"SMHD" = frac(800 * M, log_2(S))\ -cal(r)_"BACH" = frac(M, N) -$ -Comparing both formulas for the extracted-bits to helper-data-bits ratio for both methods we can quickly see that S-Metric will always yield more extracted bits per helper-data bit than BACH. - -Considering @ber[BERs], S-Metric does outperform BACH for lower bit values. -But while the error rate for higher order quantization rises exponentially for higher-order bit quantizations, the @ber[BERs] of BACH do seem to rise rather linear than exponentially for higher-order bit quantizations. -This behaviour might be attributed to the general procedure of shaping the input values for the quantizer in such a way that they are clustered around the center of a quantizer step, which is a property that carries on for higher order bit quantizations. - -Notiz: die Simulation der BERs von BACH an dieser stelle unterstützt die Behauptung die hier steht, aber der 250 Kerne rechner ist tatsächlich noch sehr lange mit der kalkulation beschäftigt. Das wird aber definitiv noch angehängt diff --git a/content/implementation.typ b/content/implementation.typ deleted file mode 100644 index 4446e58..0000000 --- a/content/implementation.typ +++ /dev/null @@ -1,36 +0,0 @@ -#import "@preview/lovelace:0.3.0": pseudocode-list - -= Implementation -== Visualisation -Initially, a visualisation of the diff algorithms applied to quantum circuits was created to assess their usefulness in equivalence checking. -Additionally, this served as exercise to better understand the algorithms to be used for the implementation in @qcec. - - -== QCEC Application Scheme -The Myers' Algorithm was implemented as an application scheme in @qcec. - -#figure( - block( - pseudocode-list[ - + do something - + do something else - + *while* still something to do - + do even more - + *if* not done yet *then* - + wait a bit - + resume working - + *else* - + go home - + *end* - + *end* - ], - width: 100% - ), - caption: [Myers' algorithm.] -) - - - -== QCEC Benchmarking Tool -As @qcec doesn’t have built-in benchmarks, a benchmarking tool was developed to test different configurations on various circuit pairs. - diff --git a/content/introduction.typ b/content/introduction.typ index b022f85..ce683b9 100644 --- a/content/introduction.typ +++ b/content/introduction.typ @@ -2,38 +2,49 @@ #import "@preview/bob-draw:0.1.0": * = Introduction -In the field of cryptography, @puf devices are a popular tool for key generation and storage. -In general, a @puf describes a kind of circuit that issues due to minimal deviations in the manufacturing process slightly different behaviours during operation. +In the field of cryptography, @puf devices are a popular tool for key generation and storage @PUFIntro @PUFIntro2. +In general, a @puf refers to a type of circuit that exhibits slightly different behaviors during operation due to minor variations in the manufacturing process. Since the behaviour of one @puf device is now only reproducible on itself and not on a device of the same type with the same manufacturing process, it can be used for secure key generation and/or storage.\ To improve the reliability of the keys generated and stored using the @puf, various #glspl("hda") have been introduced. -The general operation of a @puf with a @hda can be divided into two separate stages: _enrollment_ and _reconstruction_. -During enrollment, a @puf readout $v$ is generated upon which helper data $h$ is generated. -At reconstruction, a slightly different @puf readout $v^*$ is generated. -Using the helper data $h$ the new @puf readout $v^*$ can be improved to be less deviated from $v$ as before. -This process of helper-data generation is generally known as _Fuzzy Commitment_. +The general operation of a @puf with a @hda can be divided into two separate stages: _enrollment_ and _reconstruction_ as shown in @fig:puf_operation @PUFChartRef. -Previous works already introduced different #glspl("hda") with various strategies. +#figure( + include("../charts/PUF.typ"), + caption: [@puf model description using enrollment and reconstruction @PUFChartRef] +) + +The enrollment stage will usually be performed in near ideal, lab-like conditions i.e. at room temperature ($25°C$). +During this phase, a first @puf readout $nu$ with corresponding helper data $h$ is generated. +Going on, reconstruction can now be performed under varying conditions, for example at a higher temperature. +Here, slightly different @puf readout $nu^*$ is generated. +Using the helper data $h$ the new @puf readout $nu^*$ can be improved to be less deviated from $v$ as before. +One possible implementation of this principle is called _Fuzzy Commitment_ @fuzzycommitmentpaper @ruchti2021decoder. + +Previous works already introduced different #glspl("hda") with various strategies @delvaux2014helper @maes2009soft. The simplest form of helper-data one could generate is reliability information for every @puf bit. -Here, the @hda marks unreliable @puf bits that are then either discarded during reconstruction or rather corrected using a repetition code after the quantization process. +Here, the @hda marks unreliable @puf bits that are then either discarded during reconstruction or rather corrected using an error correction code after the quantization process. -Going on, publications @tmhd1, @tmhd2 and @smhd already introduced a metric-based @hda. -These #glspl("hda") generate helper data during enrollment to define multiple quantizers for the reconstruction phase to minimize the risk of bit errors. +Going on, publications @tmhd1 and @tmhd2 introduced a metric-based @hda as @tmhdt. +The main goal of such a @hda is to improve the reliability of the @puf during the quantization step of the enrollment phase. +To achieve that, helper data is generated to define multiple quantizers for the reconstruction phase to minimize the risk of bit errors. +A generalization outline to extend @tmhdt for higher order bit quantization has already been proposed by Fischer in @smhd. - -As a newly proposed @hda, we will propose a method to shape the input values of a @puf to better fit inside the bounds of a multi-bit quantizer. -We will explore the question which of these two #glspl("hda") provides the better performance for higher order bit cases using the least amount of helper-data bits possible. +In the course of this work, we will first take a closer look at @smhdt as proposed by Fischer @smhd and provide a concrete realization for this method. +We will also propose the idea of a method to shape the input values of a @puf to better fit within the bounds of a multi-bit quantizer which we call @bach and discuss how such a @hda can be successfully implemented in the future. == Notation To ensure a consistent notation of functions and ideas, we will now introduce some conventions and definitions. -Random distributed variables will be notated with a capital letter, i.e. $X$, its realization will be the corresponding lower case letter, $x$. -Vectors will be written in bold text: $bold(k)$ represents a vector of quantized symbols. -Matrices are denoted with a bold capital letter: $bold(M)$ +Random distributed variables will be notated with a capital letter, i.e. $X$. +Realizations will be the corresponding lower case letter, $x$. +Values of $x$ subject to some kind of error are marked with a $*$ in the exponent e.g., $x^*$. +Vectors will be written in bold text: e.g., $bold(k)$ represents a vector of quantized symbols. +Matrices are denoted with a bold capital letter: $bold(M)$. We will call a quantized symbol $k$. $k$ consists of all possible binary symbols, i.e. $0, 01, 110$. A quantizer will be defined as a function $cal(Q)(x, bold(a))$ that returns a quantized symbol $k$. -We also define the following special quantizers for metric based HDAs: +We also define the following special quantizers for metric based #glspl("hda"): A quantizer used during the enrollment phase is defined by a calligraphic $cal(E)$. For the reconstruction phase, a quantizer will be defined by a calligraphic $cal(R)$ @example-quantizer shows the curve of a 2-bit quantizer that receives $tilde(x)$ as input. In the case, that the value of $tilde(x)$ equals one of the four bounds, the quantized value is chosen randomly from the relevant bins. @@ -48,26 +59,32 @@ $ cal(Q)(S,M) , $ where $S$ determines the number of metrics and $M$ the bit width of the symbols. The corresponding metric is defined through the lower case $s$, the bit symbol through the lower case $m$. +To compare both @smhdt and @bach, we will use a ratio $cal(r) = frac("Extracted bits", "Helper data bits")$. +This ratio gives us an idea how many helper data bits were used to obtain a quantized symbol. +$cal(r)$ is smaller than $1$ if the amount of helper data bits per quantized symbol is bigger than the symbol bit width itself and bigger than $1$ otherwise. -=== Tilde-Domain +=== Tilde Domain -As also described in @smhd, we will use a CDF to transform the real PUF values into the Tilde-Domain +The tilde domain describes the range of numbers between $0$ and $1$, which is defined by the image of a @cdf. +As also described in @smhd, we will use a @cdf to transform the real PUF values into the tilde domain. This transformation can be performed using the function $xi = tilde(x)$. The key property of this transformation is the resulting uniform distribution of $x$. Considering a normal distribution, the CDF is defined as -$ xi(frac(x - mu, sigma)) = frac(1, 2)[1 + \e\rf(frac(x - mu, sigma sqrt(2)))] $ +$ xi(frac(x - mu, sigma)) = frac(1, 2)[1 + op("erf")(frac(x - mu, sigma sqrt(2)))]. $ ==== #gls("ecdf", display: "Empirical cumulative distribution function (eCDF)") -The @ecdf is constructed through sorting the empirical measurements of a distribution @dekking2005modern. Although less accurate, this method allows a more simple and less computationally complex way to transform real valued measurements into the Tilde-Domain. We will mainly use the eCDF in @chap:smhd because of the difficulty of finding an analytical description for the CDF of a Gaussian-Mixture.\ -To apply it, we will sort the vector of realizations $bold(z)$ of a random distributed variable $Z$ in ascending order. +We will not always be able to find an analytical description of a probability distribution and its corresponding @cdf. +Alternatively, an @ecdf can be constructed through sorting the empirical measurements of a distribution @dekking2005modern. +Although less accurate, this method allows a more simple and less computationally complex way to transform real valued measurements into the tilde domain. +We will mainly use the @ecdf in @chap:smhd because of the difficulty of finding an analytical description for the @cdf of a weighted linear combination of random variables. The function for an @ecdf can be defined as $ -xi_#gls("ecdf") (x) = frac("number of elements in " bold(z)", that" <= x, n) in [0, 1], +xi_#gls("ecdf") (x) = frac("number of elements in " bold(z)", s.t" <= x, n) in [0, 1], $ where $n$ defines the number of elements in the vector $bold(z)$. If the vector $bold(z)$ were to contain the elements $[1, 3, 4, 5, 7, 9, 10]$ and $x = 5$, @eq:ecdf_def would result to $xi_#gls("ecdf") (5) = frac(4, 7)$.\ -The application of @eq:ecdf_def on $X$ will transform its values into the empirical tilde-domain. +The application of @eq:ecdf_def on $X$ will transform its values into the empirical tilde domain. We can also define an inverse @ecdf: @@ -77,3 +94,4 @@ $ The result of @eq:ecdf_inverse is the index $i$ of the element $z_i$ from the vector of realizations $bold(z)$. +To apply the @ecdf to our numerical results later, we will sort the vector of realizations $bold(z)$ of a random distributed variable $Z$ in ascending order. diff --git a/content/outlook.typ b/content/outlook.typ index 8f7730d..14ba3f4 100644 --- a/content/outlook.typ +++ b/content/outlook.typ @@ -1,12 +1,26 @@ -= Outlook += Conclusion and Outlook -Upon the findings of this work, further topics might be investigated in the future. +During the course of this work, we took a closer look at an already introduced @hda, @smhdt and provided a concrete realization. +Our experiments showed that after a certain point, using more metrics $S$ won't improve the @ber any further as they behave asymptotically for $S arrow infinity$. +Furthermore, we concluded that for higher choices of the symbol width $M$, @smhdt will not be able to improve on the @ber, as the initial error is too high. +An interesting addition to our analysis provided the improvement of Gray-coded labelling for the quantizer as this resulted in an improvement of $approx 30%$. -Generally, the performances of both helper-data algorithms might be tested on larger datasets. +Going on, we introduced the idea of a new @hda which we called Boundary Adaptive Clustering with Helper data @bach. +Here we aimed to utilize the idea of moving our initial @puf measurement values away from the quantizer bound to reduce the @ber using weighted linear combinations of our input values. +Although this method posed promising results for a sign-based quantization yielding an improvement of $approx 96%$ in our testing, finding a good approach to generalize this concept turned out to be difficult. +The first issue was the lack of an analytical description of the probability distribution resulting from the linear combinations. +We accounted for that by using an algorithm that alternates between defining the quantizing bounds using an @ecdf and optimizing the weights for the linear combinations based on the found bounds. +The initial loose definition to find ideal linear combinations which maximize the distance to their nearest quantization bounds did not result in a stable probability distribution over various iterations. +Thus, we proposed a different approach to approximate the linear combination to the centers between the quantizing bounds. +This method resulted in a stable probability distribution, but did not provide any meaningful improvements to the @ber in comparison to not using any helper data at all. -Specifically concerning the BACH method, instead of using only $plus.minus 1$ as weights for the linear combinations, fractional weights could be used instead as they could provide more flexibility for the outcome of the linear combinations. -In the same sense, a more efficient method to find the optimal linear combination might exist. +Future investigations of the @bach idea might find a solution to the convergence of the bound distance maximization strategy. +Since the vector of bounds $bold(b)$ is updated every iteration of @bach, a limit to the deviation from the previous position of a bound might be set. +Furthermore, a recursive approach to reach higher order bit quantization inputs might also result in a converging distribution. +If we do not want to give up the approach using a vector of optimal points $bold(cal(o))$ as in the center point approximation, a way may be found to increase the distance between all optimal points $bold(cal(o))$ to achieve a better separation for the results of the linear combinations in every quantizer bin. -During the iterative process of the center point approximation in BACH, a way may be found to increase the distance between all optimal points $bold(cal(o))$ to achieve a better separation for the results of the linear combinations in every quantizer bin. +If a converging realization of @bach is found, using fractional weights instead of $plus.minus 1$ could provide more flexibility for the outcome of the linear combinations. +Ultimately, we can build on this in the future and provide a complete key storage system using @bach or @smhdt to improve the quantization process. +But in the end, the real quantizers were the friends we made along the way. diff --git a/content/state.typ b/content/state.typ deleted file mode 100644 index fe6eedc..0000000 --- a/content/state.typ +++ /dev/null @@ -1,3 +0,0 @@ -= State of the Art -There are a variety of existing approaches to providing a suitable oracle for quantum circuit equivalence checking based on @dd[s]. -@qcec currently implements gate-cost, lookahead, one-to-one, proportional and sequential application schemes. @burgholzer2021ec diff --git a/data/errorrates/bach/errorrates4.csv b/data/errorrates/bach/errorrates4.csv index dfa8b0e..7598941 100644 --- a/data/errorrates/bach/errorrates4.csv +++ b/data/errorrates/bach/errorrates4.csv @@ -5,4 +5,4 @@ 6,0.22165875000000004 7,0.17890425000000001 8,0.13274003759398498 -9,0.09169562499999998 +90.09169562499999998 diff --git a/data/errorrates/bach/errorrates_bonus.csv b/data/errorrates/bach/errorrates_bonus.csv new file mode 100644 index 0000000..abe6f50 --- /dev/null +++ b/data/errorrates/bach/errorrates_bonus.csv @@ -0,0 +1,17 @@ +addends,bit,errorrate +2,5,0.2965336 +3,5,0.213798947368421 +4,5,0.37411818181818185 +5,5,0.312252030075188 +6,5,0.23020799999999994 +7,5,0.23144159999999997 +8,5,0.18691639097744356 +9,5,0.14719559999999998 +2,6,0.1480327485380117 +3,6,0.33373909774436084 +4,6,0.14506700000000003 +5,6,0.256454375 +6,6,0.20765733333333333 +7,6,0.2271983709273184 +8,6,0.19503741666666666 +9,6,0.1496060606060606 diff --git a/flake.nix b/flake.nix index bbbd028..d053e3a 100644 --- a/flake.nix +++ b/flake.nix @@ -50,8 +50,8 @@ ]; }; - packages.mqt-qcec-diff-thesis = tx.buildTypstProject (typstProject // typstProjectSrc); - packages.default = self.packages.${system}.mqt-qcec-diff-thesis; + packages.HDA-thesis = tx.buildTypstProject (typstProject // typstProjectSrc); + packages.default = self.packages.${system}.HDA-thesis; apps.watch = flake-utils.lib.mkApp { drv = tx.watchTypstProject typstProject; }; apps.default = self.apps.${system}.watch; diff --git a/glossary.typ b/glossary.typ index 383ce4f..c54e678 100644 --- a/glossary.typ +++ b/glossary.typ @@ -5,8 +5,11 @@ #print-glossary(( (key: "hda", short: "HDA", plural: "HDAs", long: "helper data algorithm", longplural: "helper data algorithms"), + (key: "cdf", short: "CDF", plural: "CDFs", long: "cumulative distribution function", longplural: "cumulative distribution functions"), (key: "ecdf", short: "eCDF", plural: "eCDFs", long: "empirical Cumulative Distribution Function", longplural: "empirical Cumulative Distribution Functions"), (key: "ber", short: "BER", plural: "BERs", long: "bit error rate", longplural: "bit error rates"), - (key: "smhdt", short: "SMHD", plural: "SMHDs", long: "S-Metric Helper Data Method"), - (key: "puf", short: "PUF", plural: "PUFs", long: "physical unclonale function", longplural: "physical unclonale functions") + (key: "smhdt", short: "SMHD", plural: "SMHDs", long: "S-Metric Helper Data method"), + (key: "puf", short: "PUF", plural: "PUFs", long: "physical unclonable function", longplural: "physical unclonale functions"), + (key: "tmhdt", short: "TMHD", plural: "TMHDs", long: "Two Metric Helper Data method"), + (key: "bach", short: "BACH", long: "Boundary Adaptive Clustering with Helper data") )) diff --git a/graphics/25_25_all_error_rates_fixed.svg b/graphics/25_25_all_error_rates_fixed.svg new file mode 100644 index 0000000..75a2281 --- /dev/null +++ b/graphics/25_25_all_error_rates_fixed.svg @@ -0,0 +1,3141 @@ + + + + + + + + 1980-01-01T00:00:00+00:00 + image/svg+xml + + + Matplotlib v3.8.3, https://matplotlib.org/ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/graphics/plots/bach/compare/bad.png b/graphics/plots/bach/compare/bad.png new file mode 100644 index 0000000..f00aae4 Binary files /dev/null and b/graphics/plots/bach/compare/bad.png differ diff --git a/graphics/plots/bach/compare/good.png b/graphics/plots/bach/compare/good.png new file mode 100644 index 0000000..dffb520 Binary files /dev/null and b/graphics/plots/bach/compare/good.png differ diff --git a/graphics/plots/bach/instability/frame_1.png b/graphics/plots/bach/instability/frame_1.png index 4a5e457..f4f17e9 100644 Binary files a/graphics/plots/bach/instability/frame_1.png and b/graphics/plots/bach/instability/frame_1.png differ diff --git a/graphics/plots/bach/instability/frame_18.png b/graphics/plots/bach/instability/frame_18.png index 771d915..367d979 100644 Binary files a/graphics/plots/bach/instability/frame_18.png and b/graphics/plots/bach/instability/frame_18.png differ diff --git a/graphics/plots/bach/instability/frame_25.png b/graphics/plots/bach/instability/frame_25.png index 23c9cb9..2e64dee 100644 Binary files a/graphics/plots/bach/instability/frame_25.png and b/graphics/plots/bach/instability/frame_25.png differ diff --git a/graphics/plots/bach/instability/frame_7.png b/graphics/plots/bach/instability/frame_7.png index 76221cd..8128b19 100644 Binary files a/graphics/plots/bach/instability/frame_7.png and b/graphics/plots/bach/instability/frame_7.png differ diff --git a/graphics/plots/bach/stability/frame_1.png b/graphics/plots/bach/stability/frame_1.png index f633b38..dfd3463 100644 Binary files a/graphics/plots/bach/stability/frame_1.png and b/graphics/plots/bach/stability/frame_1.png differ diff --git a/graphics/plots/bach/stability/frame_25.png b/graphics/plots/bach/stability/frame_25.png index e0206a1..d4b79e0 100644 Binary files a/graphics/plots/bach/stability/frame_25.png and b/graphics/plots/bach/stability/frame_25.png differ diff --git a/graphics/plots/errorrates_changerate.typ b/graphics/plots/errorrates_changerate.typ index 9314d7e..82ee1b6 100644 --- a/graphics/plots/errorrates_changerate.typ +++ b/graphics/plots/errorrates_changerate.typ @@ -5,8 +5,8 @@ plot.plot(size: (10,4), x-tick-step: none, x-ticks: ((1, [1]), (2, [2]), (3, [3]), (4, [4]), (5, [5]), (6, [6])), - y-label: $(x_"1" (m)) / (x_"100" (m))$, - x-label: $m$, + y-label: $frac(op("BER")(1, 2^M),op("BER")(100, 2^M))$, + x-label: $2^M$, y-tick-step: 500, axis-style: "left", x-min: 0, diff --git a/graphics/plots/gray_coding/3dplot.svg b/graphics/plots/gray_coding/3dplot.svg index 094bc31..0e1d1fc 100644 --- a/graphics/plots/gray_coding/3dplot.svg +++ b/graphics/plots/gray_coding/3dplot.svg @@ -38,126 +38,126 @@ z - - - - - - - - - - - - - - - - - - - - - - - + - - + - - + - - + - - + - - + - - + + - + + + + + + + + + + - - - + + + + + + + + + + + + + + + + + + - - - + - - + @@ -518,13 +716,13 @@ L 607.146123 403.715768 - - + @@ -532,13 +730,13 @@ L 572.460407 418.03498 - - + @@ -546,13 +744,13 @@ L 536.775408 432.766724 - - + - - + @@ -615,25 +813,9 @@ L 462.2297 463.541231 - - + + - + + @@ -704,24 +927,32 @@ z + + + + + + + + - - - + - - + @@ -756,13 +987,13 @@ L 652.91112 327.123519 - - + @@ -772,13 +1003,13 @@ L 654.178516 287.267131 - - + @@ -788,13 +1019,13 @@ L 655.461749 246.912703 - - + @@ -803,9 +1034,41 @@ L 656.761118 206.05084 - - + + + - - + + + - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + - - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - + + - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -5704,7 +5993,7 @@ z - - + @@ -5746,58 +6035,6 @@ L 2284 0 L 1503 0 L 191 3500 z -" transform="scale(0.015625)"/> - - - - - + @@ -5891,23 +6128,6 @@ Q 1884 428 2741 428 Q 3075 428 3337 486 Q 3600 544 3809 666 z -" transform="scale(0.015625)"/> - + diff --git a/graphics/plots/temperature/25_5_re.typ b/graphics/plots/temperature/25_5_re.typ index 4870149..8a4d421 100644 --- a/graphics/plots/temperature/25_5_re.typ +++ b/graphics/plots/temperature/25_5_re.typ @@ -26,8 +26,8 @@ plot.plot(size: (10,5), x-tick-step: none, x-ticks: ((0.04, [2]),(2, [100])), - y-label: $"Bit error rate"$, - x-label: $s$, + y-label: $op("BER")(S, 2^2)$, + x-label: $S$, y-tick-step: 1, x-max: 2, //y-ticks : ( diff --git a/graphics/plots/temperature/global_diffs/global_diffs.typ b/graphics/plots/temperature/global_diffs/global_diffs.typ index 4596317..7aac59a 100644 --- a/graphics/plots/temperature/global_diffs/global_diffs.typ +++ b/graphics/plots/temperature/global_diffs/global_diffs.typ @@ -25,23 +25,27 @@ ) plot.plot( - y-label: $"Bit error rate"$, - x-label: "Operating configuration", + y-label: "Bit error rate", + x-label: "Enrollment, reconstruction temperature", + legend: "legend.north", + legend-style: (offset: (2.25, 0), stroke: none), x-tick-step: none, x-ticks: conf, y-format: formatter, y-tick-step: 0.5, axis-style: "scientific-auto", size: (16,6), - plot.add(errorrate, axes: ("x", "y"), style: (stroke: (paint: red))), + plot.add(errorrate, axes: ("x", "y"), style: (stroke: (paint: red)), label: $op("BER")(100, 2^2)$), plot.add-hline(1) ) plot.plot( y2-label: "Temperature difference", + legend: "legend.north", + legend-style: (offset: (-2.25, 0), stroke: none), y2-tick-step: 10, axis-style: "scientific-auto", size: (16,6), - plot.add(diff, axes: ("x1","y2")), + plot.add(diff, axes: ("x1","y2"), label: [Temperature difference]), ) }) diff --git a/graphics/quantizers/bach/sign-based-overlay.typ b/graphics/quantizers/bach/sign-based-overlay.typ index ab498a2..a3492ec 100644 --- a/graphics/quantizers/bach/sign-based-overlay.typ +++ b/graphics/quantizers/bach/sign-based-overlay.typ @@ -10,7 +10,7 @@ legend-style: (orientation: ltr, item: (spacing: 0.5)), x-tick-step: none, x-ticks: ((0, [0]), (100, [0])), - y-label: $cal(Q)(1, x), xi(x)$, + y-label: $cal(Q)(1, x)$, x-label: $x$, y-tick-step: none, y-ticks: ((0, [0]), (ymax, [1])), diff --git a/graphics/quantizers/s-metric/2_2_en.typ b/graphics/quantizers/s-metric/2_2_en.typ index 4dc8597..fa9d11f 100644 --- a/graphics/quantizers/s-metric/2_2_en.typ +++ b/graphics/quantizers/s-metric/2_2_en.typ @@ -3,6 +3,8 @@ #let line_style = (stroke: (paint: black, thickness: 2pt)) #let dashed = (stroke: (dash: "dashed")) #canvas({ + import draw: * + set-style(axes: (shared-zero: false)) plot.plot(size: (8,6), x-tick-step: 0.25, y-label: $cal(E)(2, 2, tilde(x))$, diff --git a/graphics/quantizers/s-metric/3_2_en.typ b/graphics/quantizers/s-metric/3_2_en.typ index 13d6bb7..79c4bfa 100644 --- a/graphics/quantizers/s-metric/3_2_en.typ +++ b/graphics/quantizers/s-metric/3_2_en.typ @@ -3,6 +3,8 @@ #let line_style = (stroke: (paint: black, thickness: 2pt)) #let dashed = (stroke: (dash: "dashed")) #canvas({ + import draw: * + set-style(axes: (shared-zero: false)) plot.plot(size: (8,6), x-tick-step: 0.25, y-label: $cal(E)(3, 2, tilde(x))$, diff --git a/graphics/quantizers/two-bit-enroll.typ b/graphics/quantizers/two-bit-enroll.typ index 0dcc3c7..1e4b82b 100644 --- a/graphics/quantizers/two-bit-enroll.typ +++ b/graphics/quantizers/two-bit-enroll.typ @@ -1,8 +1,10 @@ -#import "@preview/cetz:0.2.2": canvas, plot +#import "@preview/cetz:0.2.2": canvas, plot, draw, palette #let line_style = (stroke: (paint: black, thickness: 2pt)) #let dashed = (stroke: (dash: "dashed")) #canvas({ + import draw: * + set-style(axes: (shared-zero: false)) plot.plot(size: (8,6), x-tick-step: 0.25, y-label: $cal(Q)(2, 1, tilde(x))$, @@ -10,11 +12,12 @@ y-tick-step: none, y-ticks: ((0.25, [00]), (0.5, [01]), (0.75, [10]), (1, [11])), axis-style: "left", - x-min: 0, + //x-min: 0, x-max: 1, y-min: 0, y-max: 1,{ plot.add(((0,0.25), (0.25,0.25), (0.5,0.5), (0.75,0.75), (1, 1)), line: "vh", style: line_style) + //plot.add(((0,0), (0,0)), style: (stroke: none)) plot.add-hline(0.25, 0.5, 0.75, 1, style: dashed) plot.add-vline(0.25, 0.5, 0.75, 1, style: dashed) }) diff --git a/main.pdf b/main.pdf deleted file mode 100644 index 0c4b053..0000000 Binary files a/main.pdf and /dev/null differ diff --git a/main.typ b/main.typ index 48b1db7..3480914 100644 --- a/main.typ +++ b/main.typ @@ -32,7 +32,7 @@ degree: "Bachelor of Science (B.Sc.)", examiner: "Prof. Dr. Georg Sigl", supervisor: "M.Sc. Jonas Ruchti", - submitted: "22.07.2024", + submitted: "30.08.2024", doc ) #set page(footer: locate( @@ -45,10 +45,6 @@ #include "content/introduction.typ" #include "content/SMHD.typ" #include "content/BACH.typ" -//#include "content/state.typ" -//#include "content/implementation.typ" -//#include "content/benchmarks.typ" -#include "content/conclusion.typ" #include "content/outlook.typ" #include "glossary.typ" diff --git a/pseudocode/bach_1.typ b/pseudocode/bach_1.typ index 81bc058..d5c5dca 100644 --- a/pseudocode/bach_1.typ +++ b/pseudocode/bach_1.typ @@ -6,11 +6,12 @@ + *lists*: optimal weights $bold(h)_"opt"$ + $bold(cal(o)) arrow.l bold(cal(o))_"first"$ + #line-label() *repeat* t times: - + *perform* @alg:best_appr for all input values with $bold(cal(o))$: - + *update* $bold(h)_"opt"$ with returned weights + + *perform* #smallcaps[OptimalWeights($bold(cal(0)), bold(x))$]: + + #line-label() *update* $bold(h)_"opt"$ with returned weights + $bold(z)_"opt" arrow.l$ all returned linear combinations - + #line-label() *sort* $bold(z)_"opt"$ in ascending order - + #line-label() *define* new quantizer $cal(Q)^*$ using the @ecdf based on $bold(z)_"opt"$ - + #line-label() *update* $bold(cal(o))$ with newly found quantizer step centers + + *define* new quantizer $cal(Q)^*$ using the @ecdf based on $bold(z)_"opt"$: + + *sort* $bold(z)_"opt"$ in ascending order + + $cal(Q)^* arrow.l $ use @eq:ecdf_inverse with quantizer bounds in the tilde domain + + #line-label() *update* $bold(cal(o))$ with newly found quantizer step centers + *return* $bold(h)_"opt"$ ] diff --git a/pseudocode/bach_find_best_appr.typ b/pseudocode/bach_find_best_appr.typ index bf04bb7..46a1e3e 100644 --- a/pseudocode/bach_find_best_appr.typ +++ b/pseudocode/bach_find_best_appr.typ @@ -1,6 +1,6 @@ #import "@preview/lovelace:0.3.0": * -#pseudocode-list(booktabs: true, numbered-title: [Find best approximation])[ +#pseudocode-list(booktabs: true, numbered-title: [OptimalWeights to approximate $bold(cal(o))$])[ + *inputs*: + $bold(y)$ input values for linear combinations + $bold(cal(o))$ list of optimal points diff --git a/template/conf.typ b/template/conf.typ index 0334610..0bf7c33 100644 --- a/template/conf.typ +++ b/template/conf.typ @@ -20,8 +20,7 @@ school: school ) - pagebreak() - pagebreak() + pagebreak(to: "odd") title_page( title: title, @@ -34,37 +33,16 @@ submitted: submitted ) - pagebreak() - - //set math.equation(numbering: "(1)") - - set page( - paper: "a4", - margin: ( - top: 3cm, - bottom: 3cm, - x: 2cm, - ), - header: [], - footer: [], - //numbering: "1" - ) - - set par(justify: true) - set align(left) - set text( - font: "Times New Roman", - size: 12pt, - ) - set heading(numbering: "1.") show heading: it => locate(loc => { let levels = counter(heading).at(loc) set text(font: "TUM Neue Helvetica") if it.level == 1 [ + #if levels.at(0) != 1 { + pagebreak(to: "odd") + } #set text(size: 24pt) - #pagebreak() #if levels.at(0) != 0 { numbering("1", levels.at(0)) } @@ -90,16 +68,6 @@ ] }) - - - set page(numbering: none) - - contents_page() - - set page(numbering: none) - - pagebreak() - set page( paper: "a4", margin: ( @@ -108,16 +76,30 @@ x: 2cm, ), header: [], - footer: none, + footer: [] ) - //set page(footer: locate( - //loc => if calc.even(loc.page()) { - // align(right, counter(page).display("1")); - //} else { - // align(left, counter(page).display("1")); - //} - //)) + contents_page() + + pagebreak(to: "odd") + + set par(justify: true) + set align(left) + set text( + font: "Times New Roman", + size: 12pt + ) + + set page( + header: [], + footer: locate(loc => + if calc.rem(loc.page(), 2) == 0 { + align(left, text(font: "TUM Neue Helvetica", size: 10pt, counter(page).display("1"))); + } else { + align(right, text(font: "TUM Neue Helvetica", size: 10pt, counter(page).display("1"))); + } + ) + ) doc } diff --git a/template/conf.typ.back b/template/conf.typ.back new file mode 100644 index 0000000..0334610 --- /dev/null +++ b/template/conf.typ.back @@ -0,0 +1,124 @@ +#import "cover.typ": cover_page +#import "title.typ": title_page +#import "contents.typ": contents_page + +#let conf( + title: "", + author: "", + chair: "", + school: "", + degree: "", + examiner: "", + supervisor: "", + submitted: "", + doc +) = { + cover_page( + title: title, + author: author, + chair: chair, + school: school + ) + + pagebreak() + pagebreak() + + title_page( + title: title, + author: author, + chair: chair, + school: school, + degree: degree, + examiner: examiner, + supervisor: supervisor, + submitted: submitted + ) + + pagebreak() + + //set math.equation(numbering: "(1)") + + set page( + paper: "a4", + margin: ( + top: 3cm, + bottom: 3cm, + x: 2cm, + ), + header: [], + footer: [], + //numbering: "1" + ) + + set par(justify: true) + set align(left) + set text( + font: "Times New Roman", + size: 12pt, + ) + + set heading(numbering: "1.") + show heading: it => locate(loc => { + let levels = counter(heading).at(loc) + + set text(font: "TUM Neue Helvetica") + if it.level == 1 [ + #set text(size: 24pt) + #pagebreak() + #if levels.at(0) != 0 { + numbering("1", levels.at(0)) + } + #it.body + #v(1em, weak: true) + ] else if it.level == 2 [ + #set text(size: 16pt) + #v(1em) + #numbering("1.1", levels.at(0), levels.at(1)) + #it.body + #v(1em, weak: true) + ] else if it.level == 3 [ + #set text(size: 16pt) + #v(1em, weak: true) + #numbering("1.1.1", levels.at(0), levels.at(1), levels.at(2)) + #it.body + #v(1em, weak: true) + ] else [ + #set text(size: 12pt) + #v(1em, weak: true) + #it.body + #v(1em, weak: true) + ] + }) + + + + set page(numbering: none) + + contents_page() + + set page(numbering: none) + + pagebreak() + + set page( + paper: "a4", + margin: ( + top: 3cm, + bottom: 3cm, + x: 2cm, + ), + header: [], + footer: none, + ) + + //set page(footer: locate( + //loc => if calc.even(loc.page()) { + // align(right, counter(page).display("1")); + //} else { + // align(left, counter(page).display("1")); + //} + //)) + + doc +} + diff --git a/template/cover.typ b/template/cover.typ index ad2d537..4716c63 100644 --- a/template/cover.typ +++ b/template/cover.typ @@ -6,11 +6,7 @@ chair: "", school: "" ) = { - set text( - font: "TUM Neue Helvetica" - ) - - set page( + page( paper: "a4", margin: ( top: 3cm, @@ -24,23 +20,24 @@ text( fill: tum_blue, size: 8pt, + font: "TUM Neue Helvetica", [#chair \ #school \ Technical University of Munich] ), align(bottom + right, image("resources/TUM_Logo_blau.svg", height: 50%)) ) ], footer: [] - ) + )[ + #v(1cm) - v(1cm) + #align(top + left)[#text(font: "TUM Neue Helvetica", size: 24pt, [*#title*])] + + #v(3cm) - set align(top + left) - text(size: 24pt, [*#title*]) + #text(font: "TUM Neue Helvetica", fill: tum_blue, size: 17pt, [*#author*]) + + #align(bottom + right)[#image("resources/TUM_Tower.png", width: 60%)] + ] - v(3cm) - - text(fill: tum_blue, size: 17pt, [*#author*]) - - set align(bottom + right) - image("resources/TUM_Tower.png", width: 60%) + pagebreak() } diff --git a/template/cover.typ.back b/template/cover.typ.back new file mode 100644 index 0000000..ad2d537 --- /dev/null +++ b/template/cover.typ.back @@ -0,0 +1,46 @@ +#import "colour.typ": * + +#let cover_page( + title: "", + author: "", + chair: "", + school: "" +) = { + set text( + font: "TUM Neue Helvetica" + ) + + set page( + paper: "a4", + margin: ( + top: 3cm, + bottom: 1cm, + x: 1cm, + ), + header: [ + #grid( + columns: (1fr, 1fr), + rows: (auto), + text( + fill: tum_blue, + size: 8pt, + [#chair \ #school \ Technical University of Munich] + ), + align(bottom + right, image("resources/TUM_Logo_blau.svg", height: 50%)) + ) + ], + footer: [] + ) + + v(1cm) + + set align(top + left) + text(size: 24pt, [*#title*]) + + v(3cm) + + text(fill: tum_blue, size: 17pt, [*#author*]) + + set align(bottom + right) + image("resources/TUM_Tower.png", width: 60%) +} diff --git a/template/title.typ b/template/title.typ index bb4a008..311edb3 100644 --- a/template/title.typ +++ b/template/title.typ @@ -10,12 +10,7 @@ supervisor: "", submitted: "" ) = { - set text( - font: "TUM Neue Helvetica", - size: 10pt - ) - - set page( + page( paper: "a4", margin: ( top: 5cm, @@ -29,36 +24,44 @@ text( fill: tum_blue, size: 8pt, + font: "TUM Neue Helvetica", [#chair \ #school \ Technical University of Munich] ), align(bottom + right, image("resources/TUM_Logo_blau.svg", height: 30%)) ) ], footer: [] - ) + )[ + #set text( + font: "TUM Neue Helvetica", + size: 10pt + ) - v(1cm) + #v(1cm) - set align(top + left) - text(size: 24pt, [*#title*]) + #set align(top + left) + #text(size: 24pt, [*#title*]) - v(3cm) + #v(3cm) - text(fill: tum_blue, size: 17pt, [*#author*]) + #text(fill: tum_blue, size: 17pt, [*#author*]) - v(3cm) + #v(3cm) - [Thesis for the attainment of the academic degree] - v(1em) - [*#degree*] - v(1em) - [at the #school of the Technical University of Munich.] + Thesis for the attainment of the academic degree + #v(1em) + *#degree* + #v(1em) + at the #school of the Technical University of Munich. - v(3cm) + #v(3cm) - [*Examiner:*\ #examiner] - v(0em) - [*Supervisor:*\ #supervisor] - v(0em) - [*Submitted:*\ Munich, #submitted] + *Examiner:*\ #examiner + #v(0em) + *Supervisor:*\ #supervisor + #v(0em) + *Submitted:*\ Munich, #submitted + ] + + pagebreak() } diff --git a/template/title.typ.back b/template/title.typ.back new file mode 100644 index 0000000..bb4a008 --- /dev/null +++ b/template/title.typ.back @@ -0,0 +1,64 @@ +#import "colour.typ": * + +#let title_page( + title: "", + author: "", + chair: "", + school: "", + degree: "", + examiner: "", + supervisor: "", + submitted: "" +) = { + set text( + font: "TUM Neue Helvetica", + size: 10pt + ) + + set page( + paper: "a4", + margin: ( + top: 5cm, + bottom: 3cm, + x: 2cm, + ), + header: [ + #grid( + columns: (1fr, 1fr), + rows: (auto), + text( + fill: tum_blue, + size: 8pt, + [#chair \ #school \ Technical University of Munich] + ), + align(bottom + right, image("resources/TUM_Logo_blau.svg", height: 30%)) + ) + ], + footer: [] + ) + + v(1cm) + + set align(top + left) + text(size: 24pt, [*#title*]) + + v(3cm) + + text(fill: tum_blue, size: 17pt, [*#author*]) + + v(3cm) + + [Thesis for the attainment of the academic degree] + v(1em) + [*#degree*] + v(1em) + [at the #school of the Technical University of Munich.] + + v(3cm) + + [*Examiner:*\ #examiner] + v(0em) + [*Supervisor:*\ #supervisor] + v(0em) + [*Submitted:*\ Munich, #submitted] +} diff --git a/termpdf.log b/termpdf.log deleted file mode 100644 index e69de29..0000000