Skip to content
Snippets Groups Projects
Commit 872880d6 authored by weidler's avatar weidler
Browse files

Steps and Workflow.

parent d95e71be
No related branches found
No related tags found
No related merge requests found
venv/
# Created by https://www.toptal.com/developers/gitignore/api/python,pycharm
# Edit at https://www.toptal.com/developers/gitignore?templates=python,pycharm
### PyCharm ###
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
# User-specific stuff
.idea/**/workspace.xml
.idea/**/tasks.xml
.idea/**/usage.statistics.xml
.idea/**/dictionaries
.idea/**/shelf
# AWS User-specific
.idea/**/aws.xml
# Generated files
.idea/**/contentModel.xml
# Sensitive or high-churn files
.idea/**/dataSources/
.idea/**/dataSources.ids
.idea/**/dataSources.local.xml
.idea/**/sqlDataSources.xml
.idea/**/dynamic.xml
.idea/**/uiDesigner.xml
.idea/**/dbnavigator.xml
# Gradle
.idea/**/gradle.xml
.idea/**/libraries
# Gradle and Maven with auto-import
# When using Gradle or Maven with auto-import, you should exclude module files,
# since they will be recreated, and may cause churn. Uncomment if using
# auto-import.
# .idea/artifacts
# .idea/compiler.xml
# .idea/jarRepositories.xml
# .idea/modules.xml
# .idea/*.iml
# .idea/modules
# *.iml
# *.ipr
# CMake
cmake-build-*/
# Mongo Explorer plugin
.idea/**/mongoSettings.xml
# File-based project format
*.iws
# IntelliJ
out/
# mpeltonen/sbt-idea plugin
.idea_modules/
# JIRA plugin
atlassian-ide-plugin.xml
# Cursive Clojure plugin
.idea/replstate.xml
# SonarLint plugin
.idea/sonarlint/
# Crashlytics plugin (for Android Studio and IntelliJ)
com_crashlytics_export_strings.xml
crashlytics.properties
crashlytics-build.properties
fabric.properties
# Editor-based Rest Client
.idea/httpRequests
# Android studio 3.1+ serialized cache file
.idea/caches/build_file_checksums.ser
### PyCharm Patch ###
# Comment Reason: https://github.com/joeblau/gitignore.io/issues/186#issuecomment-215987721
# *.iml
# modules.xml
# .idea/misc.xml
# *.ipr
# Sonarlint plugin
# https://plugins.jetbrains.com/plugin/7973-sonarlint
.idea/**/sonarlint/
# SonarQube Plugin
# https://plugins.jetbrains.com/plugin/7238-sonarqube-community-plugin
.idea/**/sonarIssues.xml
# Markdown Navigator plugin
# https://plugins.jetbrains.com/plugin/7896-markdown-navigator-enhanced
.idea/**/markdown-navigator.xml
.idea/**/markdown-navigator-enh.xml
.idea/**/markdown-navigator/
# Cache file creation bug
# See https://youtrack.jetbrains.com/issue/JBR-2257
.idea/$CACHE_FILE$
# CodeStream plugin
# https://plugins.jetbrains.com/plugin/12206-codestream
.idea/codestream.xml
# Azure Toolkit for IntelliJ plugin
# https://plugins.jetbrains.com/plugin/8053-azure-toolkit-for-intellij
.idea/**/azureSettings.xml
### Python ###
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py,cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
#Pipfile.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
#poetry.lock
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
#pdm.lock
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
# in version control.
# https://pdm.fming.dev/#use-with-ide
.pdm.toml
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# SageMath parsed files
*.sage.py
# Environments
.env
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
#.idea/
### Python Patch ###
# Poetry local configuration file - https://python-poetry.org/docs/configuration/#local-configuration
poetry.toml
# End of https://www.toptal.com/developers/gitignore/api/python,pycharm
# Default ignored files
/shelf/
/workspace.xml
# Editor-based HTTP Client requests
/httpRequests/
# Datasource local storage ignored files
/dataSources/
/dataSources.local.xml
# Sc5 CWL Workflow
## Getting started
To make it easy for you to get started with GitLab, here's a list of recommended next steps.
Already a pro? Just edit this README.md and make it your own. Want to make it easy? [Use the template at the bottom](#editing-this-readme)!
## Add your files
- [ ] [Create](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#create-a-file) or [upload](https://docs.gitlab.com/ee/user/project/repository/web_editor.html#upload-a-file) files
- [ ] [Add files using the command line](https://docs.gitlab.com/ee/gitlab-basics/add-file.html#add-a-file-using-the-command-line) or push an existing Git repository with the following command:
```
cd existing_repo
git remote add origin https://gitlab.ebrains.eu/weidler/sc5-cwl-workflow.git
git branch -M main
git push -uf origin main
```
## Integrate with your tools
- [ ] [Set up project integrations](https://gitlab.ebrains.eu/weidler/sc5-cwl-workflow/-/settings/integrations)
## Collaborate with your team
- [ ] [Invite team members and collaborators](https://docs.gitlab.com/ee/user/project/members/)
- [ ] [Create a new merge request](https://docs.gitlab.com/ee/user/project/merge_requests/creating_merge_requests.html)
- [ ] [Automatically close issues from merge requests](https://docs.gitlab.com/ee/user/project/issues/managing_issues.html#closing-issues-automatically)
- [ ] [Enable merge request approvals](https://docs.gitlab.com/ee/user/project/merge_requests/approvals/)
- [ ] [Automatically merge when pipeline succeeds](https://docs.gitlab.com/ee/user/project/merge_requests/merge_when_pipeline_succeeds.html)
## Test and Deploy
Use the built-in continuous integration in GitLab.
- [ ] [Get started with GitLab CI/CD](https://docs.gitlab.com/ee/ci/quick_start/index.html)
- [ ] [Analyze your code for known vulnerabilities with Static Application Security Testing(SAST)](https://docs.gitlab.com/ee/user/application_security/sast/)
- [ ] [Deploy to Kubernetes, Amazon EC2, or Amazon ECS using Auto Deploy](https://docs.gitlab.com/ee/topics/autodevops/requirements.html)
- [ ] [Use pull-based deployments for improved Kubernetes management](https://docs.gitlab.com/ee/user/clusters/agent/)
- [ ] [Set up protected environments](https://docs.gitlab.com/ee/ci/environments/protected_environments.html)
***
# Editing this README
When you're ready to make this README your own, just edit this file and use the handy template below (or feel free to structure it however you want - this is just a starting point!). Thank you to [makeareadme.com](https://www.makeareadme.com/) for this template.
## Suggestions for a good README
Every project is different, so consider which of these sections apply to yours. The sections used in the template are suggestions for most open source projects. Also keep in mind that while a README can be too long and detailed, too long is better than too short. If you think your README is too long, consider utilizing another form of documentation rather than cutting out information.
## Name
Choose a self-explaining name for your project.
## Description
Let people know what your project can do specifically. Provide context and add a link to any reference visitors might be unfamiliar with. A list of Features or a Background subsection can also be added here. If there are alternatives to your project, this is a good place to list differentiating factors.
## Badges
On some READMEs, you may see small images that convey metadata, such as whether or not all the tests are passing for the project. You can use Shields to add some to your README. Many services also have instructions for adding a badge.
## Visuals
Depending on what you are making, it can be a good idea to include screenshots or even a video (you'll frequently see GIFs rather than actual videos). Tools like ttygif can help, but check out Asciinema for a more sophisticated method.
## Installation
Within a particular ecosystem, there may be a common way of installing things, such as using Yarn, NuGet, or Homebrew. However, consider the possibility that whoever is reading your README is a novice and would like more guidance. Listing specific steps helps remove ambiguity and gets people to using your project as quickly as possible. If it only runs in a specific context like a particular programming language version or operating system or has dependencies that have to be installed manually, also add a Requirements subsection.
## Usage
Use examples liberally, and show the expected output if you can. It's helpful to have inline the smallest example of usage that you can demonstrate, while providing links to more sophisticated examples if they are too long to reasonably include in the README.
## Support
Tell people where they can go to for help. It can be any combination of an issue tracker, a chat room, an email address, etc.
## Roadmap
If you have ideas for releases in the future, it is a good idea to list them in the README.
## Contributing
State if you are open to contributions and what your requirements are for accepting them.
For people who want to make changes to your project, it's helpful to have some documentation on how to get started. Perhaps there is a script that they should run or some environment variables that they need to set. Make these steps explicit. These instructions could also be useful to your future self.
You can also document commands to lint the code or run tests. These steps help to ensure high code quality and reduce the likelihood that the changes inadvertently break something. Having instructions for running tests is especially helpful if it requires external setup, such as starting a Selenium server for testing in a browser.
## Authors and acknowledgment
Show your appreciation to those who have contributed to the project.
## License
For open source projects, say how it is licensed.
## Project status
If you have run out of energy or time for your project, put a note at the top of the README saying that development has slowed down or stopped completely. Someone may choose to fork your project or volunteer to step in as a maintainer or owner, allowing your project to keep going. You can also make an explicit request for maintainers.
default_nproc: 12
\ No newline at end of file
ARG UBUNTU_VERSION=18.04
ARG ARCH=
ARG CUDA=11.2
FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}.1-cudnn8-runtime-ubuntu${UBUNTU_VERSION} as base
ARG DEBIAN_FRONTEND=noninteractive
# key fix
RUN sed -i '/developer\.download\.nvidia\.com\/compute\/cuda\/repos/d' /etc/apt/sources.list.d/*
RUN sed -i '/developer\.download\.nvidia\.com\/compute\/machine-learning\/repos/d' /etc/apt/sources.list.d/*
RUN apt-key del 7fa2af80
COPY cuda-keyring_1.0-1_all.deb cuda-keyring_1.0-1_all.deb
RUN dpkg -i cuda-keyring_1.0-1_all.deb
WORKDIR /usr/local/cuda-11.2/lib64
RUN ln -s libcusolver.so.11 libcusolver.so.10
WORKDIR /
# install packages
RUN apt-get update && apt-get install -y --no-install-recommends \
build-essential \
curl \
git \
swig \
wget \
virtualenv \
libgl1-mesa-dev \
libgl1-mesa-glx \
libglew-dev \
libc6-dev \
libosmesa6-dev \
patchelf \
software-properties-common \
gfortran \
&& apt-get install --no-install-recommends -y file g++ gcc make gdb strace ca-certificates \
&& add-apt-repository -y ppa:deadsnakes/ppa \
&& apt-get update \
&& apt-get --no-install-recommends install -y python3.8-dev python3.8 python3-pip \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/*
# INSTALL MPI
RUN wget -q http://www.mpich.org/static/downloads/3.1.4/mpich-3.1.4.tar.gz
RUN tar xf mpich-3.1.4.tar.gz
WORKDIR mpich-3.1.4
RUN ./configure --disable-fortran --enable-fast=all,O3 --prefix=/usr
RUN make -j$(nproc)
RUN make install
RUN ldconfig
WORKDIR /
# ENVIRONMENT VARIABLES
ENV LD_LIBRARY_PATH="/usr/lib/nvidia:$LD_LIBRARY_PATH"
ENV LD_LIBRARY_PATH="/usr/local/nvidia/lib64:$LD_LIBRARY_PATH"
ENV LD_LIBRARY_PATH="/usr/local/cuda-11.2/lib64:$LD_LIBRARY_PATH"
# matplotlib
ENV MPLCONFIGDIR=/var/cache/matplotlib
# SETUP ENV
ENV VIRTUAL_ENV=/venv
RUN virtualenv --python=python3.8 $VIRTUAL_ENV
ENV PATH="$VIRTUAL_ENV/bin:$PATH"
# INSTALL TOOL
RUN pip install angorapy==0.7.1 \
&& pip install mujoco \
&& pip install imageio \
&& pip install distance
WORKDIR /
COPY train.py train.py
# CLEANUP
RUN pip cache purge
# FINAL
CMD [ "/bin/bash" ]
File added
#!/usr/bin/env cwl-runner
# --------------------------------------------------- COMMAND LINE TOOL INFO ----------------------------------------------------
cwlVersion: v1.0
class: CommandLineTool
# -------------------------------------------------------- BASE COMMAND ---------------------------------------------------------
baseCommand: ['python3', '/train.py']
# ----------------------------------------------------- HINTS/REQUIREMENTS ------------------------------------------------------
$namespaces:
cwltool: http://commonwl.org/cwltool#
requirements:
cwltool:MPIRequirement:
processes: $(inputs.workers)
hints:
DockerRequirement:
dockerPull: docker-registry.ebrains.eu/sc5/sc5_workflow_rltrain:latest
ResourceRequirement:
ramMin: 16000
outdirMin: 1000
# --------------------------------------------------------- TOOL INPUTS ---------------------------------------------------------
inputs:
env:
type: string
inputBinding:
position: 1
pcon:
type: string?
inputBinding:
position: 2
prefix: --pcon
rcon:
type: string?
inputBinding:
position: 3
prefix: --rcon
workers:
type: int?
inputBinding:
position: 4
prefix: --workers
iterations:
type: int?
inputBinding:
position: 5
prefix: --iterations
horizon:
type: int?
inputBinding:
position: 6
prefix: --horizon
# -------------------------------------------------------- TOOL OUTPUTS ---------------------------------------------------------
stdout: output_file.txt
outputs:
out:
type: stdout
out_data:
type: File
outputBinding:
glob: storage/experiments/*/progress.json
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
import pprint
import traceback
import distance
import numpy as np
with np.testing.suppress_warnings() as sup:
sup.filter(DeprecationWarning)
sup.filter(UserWarning)
from angorapy.train import run_experiment
from angorapy.utilities.defaults import autoselect_distribution
import tensorflow as tf
import argparse
import logging
import argcomplete
from gym.spaces import Box, Discrete, MultiDiscrete
from angorapy.configs import hp_config
from angorapy.common.policies import get_distribution_by_short_name
from angorapy.models import get_model_builder
from angorapy.common.const import COLORS
from angorapy.utilities.monitoring import Monitor
from angorapy.utilities.util import env_extract_dims
from angorapy.common.wrappers import make_env
from angorapy.common.transformers import StateNormalizationTransformer, RewardNormalizationTransformer
from angorapy.agent.ppo_agent import PPOAgent
from angorapy.environments import *
from mpi4py import MPI
if __name__ == "__main__":
tf.get_logger().setLevel('INFO')
all_envs = [e.id for e in list(gym.envs.registry.all())]
# parse commandline arguments
parser = argparse.ArgumentParser(description="Train a PPO Agent on some task.")
# general parameters
parser.add_argument("env", nargs='?', type=str, default="LunarLander-v2", help="the target gym environment")
parser.add_argument("--architecture", choices=["simple", "deeper", "wider", "shadow"], default="simple",
help="architecture of the policy")
parser.add_argument("--model", choices=["ffn", "rnn", "lstm", "gru"], default="ffn",
help=f"model type if architecture allows for choices")
parser.add_argument("--distribution", type=str, default=None,
choices=["categorical", "gaussian", "beta", "multi-categorical"])
parser.add_argument("--shared", action="store_true",
help=f"make the model share part of the network for policy and value")
parser.add_argument("--iterations", type=int, default=5000, help=f"number of iterations before training ends")
# meta arguments
parser.add_argument("--pcon", type=str, default=None, help="config name (utilities/hp_config.py) to be loaded")
parser.add_argument("--rcon", type=str, default=None,
help="config (utilities/reward_config.py) of the reward function")
parser.add_argument("--cpu", action="store_true", help=f"use cpu only")
parser.add_argument("--sequential", action="store_true", help=f"run worker sequentially workers")
parser.add_argument("--load-from", type=int, default=None, help=f"load from given agent id")
parser.add_argument("--preload", type=str, default=None, help=f"load visual component weights from pretraining")
parser.add_argument("--export-file", type=int, default=None, help=f"save policy to be loaded in workers into file")
parser.add_argument("--eval", action="store_true", help=f"evaluate additionally to have at least 5 eps")
parser.add_argument("--radical-evaluation", action="store_true", help=f"only record stats from seperate evaluation")
parser.add_argument("--save-every", type=int, default=0, help=f"save agent every given number of iterations")
parser.add_argument("--monitor-frequency", type=int, default=1, help=f"update the monitor every n iterations.")
parser.add_argument("--gif-every", type=int, default=0, help=f"make a gif every n iterations.")
parser.add_argument("--debug", action="store_true", help=f"run in debug mode (eager mode)")
parser.add_argument("--no-monitor", action="store_true", help="dont use a monitor")
# gathering parameters
parser.add_argument("--workers", type=int, default=8, help=f"the number of workers exploring the environment")
parser.add_argument("--horizon", type=int, default=2048,
help=f"number of time steps one worker generates per cycle")
parser.add_argument("--discount", type=float, default=0.99, help=f"discount factor for future rewards")
parser.add_argument("--lam", type=float, default=0.97, help=f"lambda parameter in the GAE algorithm")
parser.add_argument("--no-state-norming", action="store_true", help=f"do not normalize states")
parser.add_argument("--no-reward-norming", action="store_true", help=f"do not normalize rewards")
# optimization parameters
parser.add_argument("--epochs", type=int, default=3, help=f"the number of optimization epochs in each cycle")
parser.add_argument("--batch-size", type=int, default=64, help=f"minibatch size during optimization")
parser.add_argument("--lr-pi", type=float, default=1e-3, help=f"learning rate of the policy")
parser.add_argument("--lr-schedule", type=str, default=None, choices=[None, "exponential"],
help=f"lr schedule type")
parser.add_argument("--clip", type=float, default=0.2, help=f"clipping range around 1 for the objective function")
parser.add_argument("--c-entropy", type=float, default=0.01, help=f"entropy factor in objective function")
parser.add_argument("--c-value", type=float, default=1, help=f"value factor in objective function")
parser.add_argument("--tbptt", type=int, default=16, help=f"length of subsequences in truncated BPTT")
parser.add_argument("--grad-norm", type=float, default=0.5, help=f"norm for gradient clipping, 0 deactivates")
parser.add_argument("--clip-values", action="store_true", help=f"clip value objective")
parser.add_argument("--stop-early", action="store_true", help=f"stop early if threshold of env was surpassed")
parser.add_argument("--experiment-group", type=str, default="default", help="experiment group identifier")
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
is_root = rank == 0
# read arguments
argcomplete.autocomplete(parser)
args = parser.parse_args()
if args.env not in all_envs:
if is_root:
indices = np.argsort([distance.levenshtein(w, args.env) for w in all_envs])[:3]
print(f"Unknown environment {args.env}. Did you mean one of {[all_envs[i] for i in indices]}")
exit()
# if config is given load it as default, then overwrite with any goal given parameters
if args.pcon is not None:
try:
parser.set_defaults(**getattr(hp_config, args.pcon))
args = parser.parse_args()
if is_root:
print(f"Loaded Config {args.pcon}.")
except AttributeError as err:
raise ImportError("Cannot find config under given name. Does it exist in utilities/hp_config.py?")
if args.debug:
tf.config.run_functions_eagerly(True)
if is_root:
logging.warning("YOU ARE RUNNING IN DEBUG MODE!")
try:
run_experiment(args.env, vars(args), use_monitor=not args.no_monitor)
except Exception as e:
if rank == 0:
traceback.print_exc()
FROM python:3.8-slim
# install dependencies
RUN pip install --no-cache-dir argparse numpy matplotlib
# copy python script, make executable and add to path
COPY visualization.py .
RUN chmod +x visualization.py
CMD [ "/bin/bash" ]
cwlVersion: v1.0
class: CommandLineTool
baseCommand: ['python3', '/visualization.py']
hints:
DockerRequirement:
dockerPull: docker-registry.ebrains.eu/sc5/sc5_workflow_visualization:latest
ResourceRequirement:
ramMin: 2048
outdirMin: 4096
inputs:
input_file:
type: File
inputBinding:
position: 1
output_file_name:
type: string
inputBinding:
prefix: --output_file
position: 2
outputs:
plot:
type: File
outputBinding:
glob: $(inputs.output_file_name)
{
"rewards": {
"mean": [
26.58,
45.22,
60.77
],
"stdev": [
13.6
],
"last_cycle": []
},
"lengths": {
"mean": [
26.58
],
"stdev": [
13.6
],
"last_cycle": []
},
"entropies": [
0.6919
],
"vloss": [
49.9441
],
"ploss": [
0.0227
],
"preprocessors": {}
}
\ No newline at end of file
#!/usr/bin/env python3
import argparse
import numpy as np
import matplotlib.pyplot as plt
import json
parser = argparse.ArgumentParser()
parser.add_argument('input_file', help='json file containing the progress indicators')
parser.add_argument('--output_file', default='out.png', help='file where output plot should be written')
args = parser.parse_args()
fig, ax = plt.subplots(figsize=(12, 6))
with open(args.input_file) as f:
data = json.load(f)
plt.plot(data["rewards"]["mean"])
ax.set_xlabel('cycle')
ax.set_ylabel('reward')
ax.legend()
plt.savefig(args.output_file)
# -------------------------------------------------------- WORKFLOW INFO --------------------------------------------------------
cwlVersion: v1.0
class: Workflow
# ------------------------------------------------------- WORKFLOW INPUTS -------------------------------------------------------
inputs:
env: string
pcon: string
rcon: string
workers: int # (32-bit signed)
iterations: int
horizon: int
output_file_name: string
# ------------------------------------------------------- WORKFLOW OUTPUTS ------------------------------------------------------
outputs:
output_file:
type: File
outputSource: reinforcement_learning/out
output_progress:
type: File
outputSource: reinforcement_learning/out_data
output_visualization:
type: File
outputSource: visualization/plot
# ------------------------------------------------------- WORKFLOW STEPS --------------------------------------------------------
steps:
reinforcement_learning:
run: step1/reinforcement_learning_tool.cwl
in:
env: env
pcon: pcon
rcon: rcon
workers: workers
iterations: iterations
horizon: horizon
out: [out, out_data]
visualization:
run: step2/analysis_tool.cwl
in:
input_file: reinforcement_learning/out_data
output_file_name: output_file_name
out: [plot]
\ No newline at end of file
env: CartPole-v1
pcon: discrete_no_norms
rcon: default
workers: 6
horizon: 512
iterations: 2
output_file_name: 'output.png'
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment