pegasus.catalog.site=XML3
## sites.xml will be generated by submit.sh
pegasus.catalog.site.file=sites.xml

## replica is a registry of files generated by different workflows for them to share files.
# If one file to be generated in a workflow is already present in replica, 
#  Pegasus will skip the job(s) that generate that file.
# Be careful about the name-clash among different workflows. 
#pegasus.catalog.replica=File
#pegasus.catalog.replica.file=replica.txt
# 2012.4.3 database as replica catalog
#pegasus.catalog.replica.db.url=jdbc:postgresql://localhost:5432/pegasus
#pegasus.catalog.replica.db.user=yh
#pegasus.catalog.replica.db.password=

## pegasus.dir.storage.deep:
# Setting this property to true, 
#  the relative submit directory structure is replicated on the output site.
pegasus.dir.storage.deep=false

## pegasus.dir.useTimestamp:
# True results in the timestamp being added to  
#  the name of the submit directory.
pegasus.dir.useTimestamp=true

## Use symblinks if files are on the same site as the computing nodes.
pegasus.transfer.links=true

#clusters.size=3
## Pegasus will try any failed job 3 times.
dagman.retry=3

#pegasus.dir.submit.logs=/var/pegasus_tmp/

pegasus.condor.logs.symlink=false

## Force-generation of *.arg files to store ultra-long job arguments.
## During conversion from the dag .xml to *.sub (condor sub files),
# if a single argument's length is >2049, it'll be truncated to 2049.
# However, if you force pegasus to generate *.arg files as supplement to *.sub files.
# The *.arg files won't have this problem.
pegasus.gridstart.invoke.length=2000

## Increase the nubmer of stagein and stageout jobs to 10 (default 4) to reduce parallel IO.
stageout.clusters=10
stagein.clusters=10

## -B ...: Resize the data section size for stdio capture, default is 262144.
#pegasus.gridstart.arguments=-B 400000
pegasus.gridstart.arguments=-B 50000

## monitord could blow out the memory on large worklfows.
# run it later with '--replay' to re-create the jobstate.log file,
# or re-populate the stats database from scratch.
#pegasus.monitord.event=false

## Enable cleanup job clustering. this would reduce the number of jobs on each level to N.
#pegasus.file.cleanup.clusters.num=50

## Enable cleanup job clustering. This parameter means N cleanup jobs on each level would be clustered as one job.
pegasus.file.cleanup.clusters.size=15
