up [pdf]
import os

os.environ['OMP_NUM_THREADS'] = '4'

from rsf.proj import *

# Input data to use in example:
# produces random numbers with uniform distribution [-1000,1000]
Flow('random',None,
    'spike n1=1024 n2=1024 n3=130 | noise rep=y range=1000 mean=0 type=n seed=2013')

Flow('clip_seq','random','clip clip=500')

# 1: In this case scons will split the input, process separately, and 
#    cat all the individual outputs into one target.  
Flow('clip_scons','random',' clip clip=500',split=[3,130],reduce='cat')

# 2: Now, I am specifying the type of parallelization using OpenMP,
#    it is parallelizing over the slower axis (the third one) 
Flow('clip_omp_scons','random',' clip clip=500',split=[3,'omp'],reduce='cat')

# 3: In this case I use explicit parallelization with MPI [n,'mpi',m] 
#    in this case m is the number of CPU's to use, n is the axis to split
Flow('clip_mpi_scons','random','clip clip=500',split=[3,'mpi'],np=8,reduce='cat')

# 4: OMP parallelization in the code. It uses the number of maximum number of cores
#    available if $OMP_NUM_THREADS doesn't exist.
Program('clip_omp','Mclip.c')
Flow('clip_omp_incore','random clip_omp.exe','./${SOURCES[1]} clip=500')

# 5: Hybrid case, OMP parallelization in the code, and extra parallelization with MPI.
#    In a single computer doesn't make too much sense but with a cluster it does.
Flow('clip_hybrid','random clip_omp.exe','./${SOURCES[1]} clip=500',split=[3,'mpi'],np=4,reduce='cat')

End()

sfspike
sfnoise
sfmath
sfclip
sfomp