Site Examples

NERSC

NERSC provides High Performance Computing system to support research in the Office of Science program offices. NERSC has one production HPC systems Perlmutter and muller which is Test system for Perlmutter.

Shown below is the buildtest configuration at NERSC. We have defined multiple slurm executors, along with settings for configuring compilers that is available on Perlmutter.

system:
  perlmutter:
    description: Cray Shasta system with AMD CPU and NVIDIA A100 GPUs
    hostnames:
    - login(0[3-9]|[1-3][0-9]|40)
    moduletool: lmod
    poolsize: 8
    buildspecs:
      rebuild: false
      count: 15
      format: name,description
      terse: false
    report:
      count: 25
      terse: false
      format: name,id,state,runtime,returncode
    executors:
      defaults:
        pollinterval: 30
        maxpendtime: 86400
      local:
        bash:
          description: submit jobs on local machine using bash shell
          shell: bash
        sh:
          description: submit jobs on local machine using sh shell
          shell: sh
        csh:
          description: submit jobs on local machine using csh shell
          shell: csh
        zsh:
          description: submit jobs on local machine using zsh shell
          shell: zsh
        python:
          description: submit jobs on local machine using python shell
          shell: python
      slurm:
        regular:
          qos: regular
        debug:
          qos: debug
        xfer:
          qos: xfer
        preempt:
          qos: preempt
    compilers:
      purge: false
      enable_prgenv: true
      prgenv_modules:
        gcc: PrgEnv-gnu
        cray: PrgEnv-cray
        nvhpc: PrgEnv-nvidia
      find:
        gcc: ^gcc\/.*
        cray: ^cce\/.*
        nvhpc: ^nvhpc\/.*
      compiler:
        gcc:
          builtin_gcc:
            cc: /usr/bin/gcc
            cxx: /usr/bin/g++
            fc: /usr/bin/gfortran
          gcc/11.2.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/11.2.0
              purge: false
          gcc/10.3.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/10.3.0
              purge: false
          gcc/12.2.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/12.2.0
              purge: false
        cray:
          cce/15.0.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/15.0.1
              purge: false
          cce/16.0.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/16.0.0
              purge: false
          cce/16.0.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/16.0.1
              purge: false
          cce/17.0.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/17.0.0
              purge: false
        nvhpc:
          nvhpc/22.7:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/22.7
              purge: false
          nvhpc/23.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/23.1
              purge: false
          nvhpc/23.9:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/23.9
              purge: false
    cdash:
      url: https://my.cdash.org
      project: buildtest-nersc
      site: perlmutter
  muller:
    description: Muller is TDS system for Perlmutter
    hostnames:
    - login01|login02
    moduletool: lmod
    poolsize: 8
    buildspecs:
      rebuild: false
      count: 15
      format: name,description
      terse: false
    report:
      count: 25
      terse: false
      format: name,id,state,runtime,returncode
    executors:
      defaults:
        pollinterval: 30
        maxpendtime: 86400
      local:
        bash:
          description: submit jobs on local machine using bash shell
          shell: bash
        sh:
          description: submit jobs on local machine using sh shell
          shell: sh
        csh:
          description: submit jobs on local machine using csh shell
          shell: csh
        zsh:
          description: submit jobs on local machine using zsh shell
          shell: zsh
        python:
          description: submit jobs on local machine using python shell
          shell: python
      slurm:
        regular:
          qos: regular
        debug:
          qos: debug
        xfer:
          qos: xfer
        preempt:
          qos: preempt
    compilers:
      purge: false
      enable_prgenv: true
      prgenv_modules:
        gcc: PrgEnv-gnu
        cray: PrgEnv-cray
        nvhpc: PrgEnv-nvidia
      find:
        gcc: ^gcc\/.*
        cray: ^cce\/.*
        nvhpc: ^nvhpc\/.*
      compiler:
        gcc:
          builtin_gcc:
            cc: /usr/bin/gcc
            cxx: /usr/bin/g++
            fc: /usr/bin/gfortran
          gcc/11.2.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/11.2.0
              purge: false
          gcc/10.3.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/10.3.0
              purge: false
          gcc/12.2.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-gnu
              - gcc/12.2.0
              purge: false
        cray:
          cce/15.0.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/15.0.1
              purge: false
          cce/16.0.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/16.0.1
              purge: false
          cce/16.0.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/16.0.0
              purge: false
          cce/17.0.0:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-cray
              - cce/17.0.0
              purge: false
        nvhpc:
          nvhpc/22.7:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/22.7
              purge: false
          nvhpc/23.9:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/23.9
              purge: false
          nvhpc/23.1:
            cc: cc
            cxx: CC
            fc: ftn
            module:
              load:
              - PrgEnv-nvidia
              - nvhpc/23.1
              purge: false
    cdash:
      url: https://my.cdash.org
      project: buildtest-nersc
      site: muller

Oak Ridge National Laboratory

Summit is a IBM based system hosted at Oak Ridge Leadership Computing Facility (OLCF). The system uses IBM Load Sharing Facility (LSF) as their batch scheduler.

The system keyword is used to define the name of system which in this example is named summit. The hostnames is used to specify a list of hostnames where buildtest can run in order to use this system configuration.

The system comes with several queues, for the purposes of this example we define 3 executors that map to queues batch , test and storage. To declare LSF executors we define them under lsf section within the executors section.

The default batch configuration is defined in defaults, for instance we set the fields pollinterval, maxpendtime and to 30s and 300s each. The field account is used to specify project account where all jobs will be charged. This can be customized to each site but and can be changed in the configuration file or overridden via command line buildtest build --account <ACCOUNT>.

system:
  summit:
    hostnames:
    - login1.summit.olcf.ornl.gov
    - login2.summit.olcf.ornl.gov
    moduletool: lmod
    poolsize: 8
    max_jobs: 10
    pager: false
    buildspecs:
      rebuild: false
      count: 15
      format: name,description
      terse: false
    report:
      count: 25
      terse: false
      format: name,id,state,runtime,returncode
    executors:
      defaults:
        pollinterval: 30
        maxpendtime: 300
        account: gen243-hpctest
      local:
        bash:
          description: submit jobs on local machine using bash shell
          shell: bash
        sh:
          description: submit jobs on local machine using sh shell
          shell: sh
        csh:
          description: submit jobs on local machine using csh shell
          shell: csh
        python:
          description: submit jobs on local machine using python shell
          shell: python
      lsf:
        batch:
          queue: batch
        storage:
          queue: storage
        debug:
          queue: debug
    compilers:
      find:
        gcc: ^(gcc)
      compiler:
        gcc:
          builtin_gcc:
            cc: /usr/bin/gcc
            cxx: /usr/bin/g++
            fc: /usr/bin/gfortran
          gcc/12.1.0:
            cc: gcc
            cxx: g++
            fc: gfortran
            module:
              load:
              - gcc/12.1.0
              purge: false
          gcc/9.3.0:
            cc: gcc
            cxx: g++
            fc: gfortran
            module:
              load:
              - gcc/9.3.0
              purge: false
          gcc/12.2.0:
            cc: gcc
            cxx: g++
            fc: gfortran
            module:
              load:
              - gcc/12.2.0
              purge: false
          gcc/9.3.0-compiler_only:
            cc: gcc
            cxx: g++
            fc: gfortran
            module:
              load:
              - gcc/9.3.0-compiler_only
              purge: false

Argonne National Laboratory

Joint Laboratory for System Evaluation (JLSE) provides a testbed of emerging HPC systems, the default scheduler is Cobalt, this is defined in the cobalt section defined in the executor field.

We set default launcher to qsub defined with launcher: qsub. This is inherited for all batch executors. In each cobalt executor the queue property will specify the queue name to submit job, for instance the executor yarrow with queue: yarrow will submit job using qsub -q yarrow when using this executor.

system:
  jlse:
    # hostnames on JLSE where jobs are run are jlsebatch[1-2]
    hostnames: ['^jlsebatch\d{1}$']
    moduletool: environment-modules
    poolsize: 8
    max_jobs: 10
    pager: False
    buildspecs:
      rebuild: False
      count: 15
      format: "name,description"
      terse: False
    report:
      count: 25
      terse: False
      format: "name,id,state,runtime,returncode"
    executors:
      defaults:
        pollinterval: 30
        maxpendtime: 300
      local:
        bash:
          description: submit jobs on local machine using bash shell
          shell: bash
        sh:
          description: submit jobs on local machine using sh shell
          shell: sh
        csh:
          description: submit jobs on local machine using csh shell
          shell: csh
        python:
          description: submit jobs on local machine using python shell
          shell: python
      cobalt:
        iris:
          queue: iris
    compilers:
      find:
        gcc: "^(gcc)"
      compiler:
        gcc:
          builtin_gcc:
            cc: /usr/bin/gcc
            cxx: /usr/bin/g++
            fc: /usr/bin/gfortran