Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Draft: Fesom26 rc1 scripts #133

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
Results
22 changes: 22 additions & 0 deletions job_levante
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_bugfix_hnode.yml> "tripyrun.out"
date
22 changes: 22 additions & 0 deletions job_levante_addDVDsergeyandknut
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_addDVDsergeyandknut.yaml> "addDVDsergeyandknut.log"
date
22 changes: 22 additions & 0 deletions job_levante_bugfix_hnode
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_bugfix_hnode.yaml> "bugfix_hnode.log"
date
22 changes: 22 additions & 0 deletions job_levante_bugfix_ice_cutoff
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_bugfix_ice_cutoff.yaml> "bugfix_ice_cutoff.log"
date
22 changes: 22 additions & 0 deletions job_levante_fixsrfstress
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_fixsrfstress.yaml> "fixsrfstress.log"
date
22 changes: 22 additions & 0 deletions job_levante_ice_fct_screening
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_ice_fct_screening.yaml> "ice_fct_screening.log"
date
22 changes: 22 additions & 0 deletions job_levante_ifs_thermo
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_ifs_thermo.yaml> "ifs_thermo.log"
date
22 changes: 22 additions & 0 deletions job_levante_remove_PARMS
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
#!/bin/bash
#SBATCH --job-name=ref
#SBATCH -p compute
#SBATCH --ntasks-per-node=128
#SBATCH --ntasks=128
#SBATCH --time=08:00:00
#SBATCH -o slurm-%j.log
#SBATCH -e slurm-%j.log
#SBATCH -A ba0989

source /home/a/a270092/loadconda.sh
conda activate py39

ulimit -s 102400
ulimit -n 1000000

echo Submitted job: $jobid
squeue -u $USER

date
tripyrun tripyrun_fesom26rc_remove_PARMS.yaml> "remove_PARMS.log"
date
4 changes: 3 additions & 1 deletion templates_notebooks/template_2dmesh.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,8 @@
" from dask.distributed import Client\n",
" # from dask.diagnostics import ProgressBar\n",
" import dask\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down Expand Up @@ -291,7 +293,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.19"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
4 changes: 3 additions & 1 deletion templates_notebooks/template_hmesh.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down Expand Up @@ -359,7 +361,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.19"
"version": "3.9.19"
},
"latex_envs": {
"LaTeX_envs_menu_present": true,
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_hovm.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_hovm_clim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_hquiver.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -185,6 +185,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
6 changes: 2 additions & 4 deletions templates_notebooks/template_hslice.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -177,10 +177,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" \n",
"# # disable dask worker heartbeat --> prevent error ?\n",
"# from dask import config as cfg\n",
"# cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" \n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_hslice_clim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -159,6 +159,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_hslice_isotdep.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -179,6 +179,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transect.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transect_clim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transect_transp.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
7 changes: 5 additions & 2 deletions templates_notebooks/template_transect_transp_t.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -2,14 +2,15 @@
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"execution_count": 2,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"/albedo/home/pscholz/tripyview\n"
"The autoreload extension is already loaded. To reload it, use:\n",
" %reload_ext autoreload\n"
]
}
],
Expand Down Expand Up @@ -194,6 +195,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transect_zmean.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transect_zmean_clim.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transp_dmoc.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -187,6 +187,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transp_dmoc_srfcbflx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -178,6 +178,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transp_dmoc_t.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -194,6 +194,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transp_dmoc_wdiap.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -169,6 +169,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" client = Client(n_workers=parallel_nprc, threads_per_worker=1, memory_limit='{:3.3f} GB'.format(parallel_tmem/parallel_nprc))\n",
Expand Down
2 changes: 2 additions & 0 deletions templates_notebooks/template_transp_ghflx.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -172,6 +172,8 @@
"# start parallel dask client\n",
"if do_parallel and not client_runs:\n",
" from dask.distributed import Client\n",
" from dask import config as cfg\n",
" cfg.set({'distributed.scheduler.worker-ttl': None})\n",
" ##import dask\n",
" ## dask.config.config.get('distributed').get('dashboard').update({'link':'{JUPYTERHUB_SERVICE_PREFIX}/proxy/{port}/status'})\n",
" print(' --> memory_limit: {:3.3f} GB'.format(parallel_tmem/(parallel_nprc)))\n",
Expand Down
Loading