diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d3e6509d0f00fdb0014fadea8eb1525189c91f86..167a069b9c3d88db7a7442290f78145a5cbd9947 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -180,9 +180,9 @@ build-spack-env-on-runner: - mkdir -p $CI_PROJECT_DIR/spack_logs/installed $CI_PROJECT_DIR/spack_logs/not_installed # for succesfully installed packages: keep the spack logs for any package modified during this CI job - PKG_DIR=$CI_PROJECT_DIR/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-10.3.0 - - if cd $PKG_DIR; then find . \( -name ".spack" -o -name ".build" -o -name ".spack_test_results" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/installed \;; fi + - if cd $PKG_DIR; then find . \( -name ".spack" -o -name ".build" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/installed \;; fi # for not succesfully installed packages: also keep the spack logs for any packages that failed - - if cd /tmp/$(whoami)/spack-stage/; then find . -maxdepth 2 -name "*.txt" -exec cp --parents "{}" $CI_PROJECT_DIR/spack_logs/not_installed \;; fi + - if cd /tmp/$(whoami)/spack-stage/; then find . -maxdepth 2 \( -name "*.txt" -o -name ".install_time_tests" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/not_installed \;; fi # - if [ -d /tmp/spack_tests ]; then mv /tmp/spack_tests $CI_PROJECT_DIR; fi artifacts: paths: @@ -215,9 +215,9 @@ sync-gitlab-spack-instance: # for succesfully installed packages: keep the spack logs for any package modified during this CI job # (we use repo.yaml, that is modified at each start of the pipeline, as a reference file) - PKG_DIR=$SPACK_PATH_GITLAB/spack/opt/spack/linux-ubuntu20.04-x86_64/gcc-10.3.0 - - if cd $PKG_DIR; then find . -newer $SPACK_REPO_PATH/repo.yaml \( -name ".spack" -o -name ".spack_test_results" -o -name ".build" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/installed \;; fi + - if cd $PKG_DIR; then find . -newer $SPACK_REPO_PATH/repo.yaml \( -name ".spack" -o -name ".build" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/installed \;; fi # for not succesfully installed packages: also keep the spack logs for any packages that failed - - if cd /tmp/$(whoami)/spack-stage/; then find . -maxdepth 2 -name "*.txt" -exec cp --parents "{}" $CI_PROJECT_DIR/spack_logs/not_installed \;; fi + - if cd /tmp/$(whoami)/spack-stage/; then find . -maxdepth 2 \( -name "*.txt" -o -name ".install_time_tests" \) -exec cp -r --parents "{}" $CI_PROJECT_DIR/spack_logs/not_installed \;; fi artifacts: paths: - spack_logs diff --git a/packages/wf-brainscales2-demos/package.py b/packages/wf-brainscales2-demos/package.py index 5acf551cb328cf87a9d2942de1baa612fd044c4d..580fd5cecc6788fb274352cd253d875af900fef9 100644 --- a/packages/wf-brainscales2-demos/package.py +++ b/packages/wf-brainscales2-demos/package.py @@ -39,20 +39,40 @@ class WfBrainscales2Demos(Package): def install(self, spec, prefix): install_tree(".", join_path(prefix, "notebooks")) + def _nbconvert(self, nb, nb_out): + jupyter = Executable("jupyter") + args = [ + "nbconvert", + "--ExecutePreprocessor.kernel_name=python3", + "--ExecutePreprocessor.timeout=900", + "--execute", + "--to", + "notebook", + nb, + "--output", + nb_out + ] + try: + # execute notebook and save + jupyter(*args, output=str.split, error=str.split) + except ProcessError as e: + # if the notebook execution fails, re-run notebook to produce output with error + # in case of a cell timeout, don't re-run + if "CellTimeoutError" not in e: + jupyter(*(args+["--allow-errors"])) + raise + def _run_notebooks(self, output_dir): mkdirp(output_dir) - # execute notebook and save - jupyter = Executable("jupyter") + # try to run all notebooks, then fail if there are errors + exceptions = [] for fn in glob(join_path(prefix, "notebooks", "ts*.ipynb")) + glob(join_path(prefix, "notebooks", "tp*.ipynb")): - jupyter("nbconvert", - "--ExecutePreprocessor.kernel_name=python3", - "--execute", - "--allow-errors", - "--to", - "notebook", - fn, - "--output", - join_path(output_dir, os.path.basename(fn))) + try: + self._nbconvert(fn, join_path(output_dir, os.path.basename(fn))) + except Exception as e: + exceptions.append(e) + if exceptions: + raise Exception("Errors during notebook execution") def _set_collab_things(self): # enable "EBRAINS lab" mode @@ -66,7 +86,8 @@ class WfBrainscales2Demos(Package): def installcheck(self): self._set_collab_things() # TODO (ECM): Provide a selection of notebooks that perform local-only tests. - #self._run_notebooks(join_path(self.prefix, ".spack_test_results")) + # self._run_notebooks(join_path(self.stage.path, ".install_time_tests")) + # copy_tree(join_path(self.stage.path, ".install_time_tests"), join_path(self.prefix, '.build')) def test_notebooks(self): self._set_collab_things() diff --git a/packages/wf-multi-area-model/package.py b/packages/wf-multi-area-model/package.py index 3b6c6c24cb686d641ec81c0b14c91bad8c72e8f5..b8ff5feb8f4326cc8e756885b10e540904bf1632 100644 --- a/packages/wf-multi-area-model/package.py +++ b/packages/wf-multi-area-model/package.py @@ -13,9 +13,9 @@ class WfMultiAreaModel(Package): git = "https://github.com/INM-6/multi-area-model" maintainer = ["terhorstd", "didi-hou"] - version("v1.1.0", tag="v1.1.0") + version("1.1.1", tag="v1.1.1") + version("1.1.0", tag="v1.1.0") version("master", branch="master") - version("ebrains", branch="lab.ebrains.eu") depends_on("py-nested-dict", type=("run", "test")) depends_on("py-dicthash", type=("run", "test")) @@ -24,30 +24,43 @@ class WfMultiAreaModel(Package): depends_on("py-scipy", type=("run", "test")) depends_on("py-future", type=("run", "test")) depends_on("nest", type=("run", "test")) + depends_on("py-neo", type=("run", "test")) + depends_on("py-elephant", type=("run", "test")) depends_on("r-aod", type=("run", "test")) depends_on("py-notebook", type=("run", "test")) def install(self, spec, prefix): install_tree(".", join_path(prefix, "notebooks")) - def _run_notebooks(self, output_dir): - mkdirp(output_dir) - # execute notebook and save + def _nbconvert(self, nb, nb_out): jupyter = Executable("jupyter") - jupyter("nbconvert", + args = [ + "nbconvert", "--ExecutePreprocessor.kernel_name=python3", "--execute", - "--allow-errors", "--to", "notebook", - join_path(self.prefix, "notebooks", "multi-area-model.ipynb"), + nb, "--output", - join_path(output_dir, "multi-area-model.ipynb")) + nb_out + ] + try: + # execute notebook and save + jupyter(*args) + except ProcessError as e: + # if the above fails, re-run notebook to produce output with error + jupyter(*(args+["--allow-errors"])) + raise + + def _run_notebooks(self, output_dir): + mkdirp(output_dir) + self._nbconvert(join_path(self.prefix, "notebooks", "multi-area-model.ipynb"), join_path(output_dir, "multi-area-model.ipynb")) @run_after("install") @on_package_attributes(run_tests=True) def installcheck(self): - self._run_notebooks(join_path(self.prefix, ".spack_test_results")) + self._run_notebooks(join_path(self.stage.path, ".install_time_tests")) + copy_tree(join_path(self.stage.path, ".install_time_tests"), join_path(self.prefix, '.build')) def test_notebook(self): self._run_notebooks(join_path(self.test_suite.stage, self.spec.format("out-{name}-{version}-{hash:7}"))) diff --git a/spack.yaml b/spack.yaml index f599072f2a3657293e578eb19d66e7e6028dbae3..28faf98c78b6878f7df88806ae23b04156fb9bdb 100644 --- a/spack.yaml +++ b/spack.yaml @@ -61,7 +61,7 @@ spack: - wf-biobb - wf-brainscales2-demos - wf-protein-association-rates@0.1 - - wf-multi-area-model@v1.1.0 + - wf-multi-area-model@1.1.1 - wf-uq-akar4@0.1 #- py-cerebstats #- py-cerebunit