1+ [server]
2+ # Host or IP address to which EDPS server binds, e.g. localhost or 0.0.0.0
3+ host =0.0.0.0
4+
5+ # EDPS port number, e.g. 5000
6+ port =5000
7+
8+ [application]
9+ # Comma-separated list of directories where workflows are installed.
10+ # If not specified, EDPS will search for workflows in the pipeline installation tree.
11+ # The naming convention for workflows is: <instrument>/<instrument>_wkf.py, e.g. espresso/espresso_wkf.py
12+ workflow_dir =
13+
14+ [executor]
15+ # esorex is the command to execute pipeline recipes and it is installed with the pipeline.
16+ # Please make sure that the path provided here can be located using the "which" command.
17+ esorex_path =esorex
18+
19+ # Path where pipeline plugins are installed.
20+ # This configuration is used for ESO internal operations and can be left empty.
21+ pipeline_path =
22+
23+ # genreport is the command to execute quality control plots and it is installed with the Adari package.
24+ genreport_path =genreport
25+
26+ # EDPS data directory where recipe products, logs and quality control plots are saved.
27+ # The files are organised in a directory structure under the base directory, defined as:
28+ # <instrument>/<data reduction task>/<unique identifier>/<files>
29+ # Example: ESPRESSO/bias/fbf31155-a731-47f5-abf2-6445adce6c4b/master_bias.fits
30+ # Please make sure that this directory has enough disk space available for storing the pipeline products,
31+ # and consider enabling automatic data cleaning in the [cleanup] section.
32+ base_dir =EDPS_data
33+
34+ # If true, a dummy command is executed instead of esorex
35+ dummy =False
36+
37+ # If true, EDPS will attempt to execute a data reduction step even if the previous step has failed.
38+ continue_on_error =False
39+
40+ # Number of concurrent data reductions processes.
41+ # Running concurrent data reductions will increase performance if sufficient resources are available,
42+ # but can also lead to pipeline crashes if not enough memory is available to execute parallel reductions.
43+ processes =1
44+
45+ # Number of CPUs (cores) available for data reduction. EDPS will not exceed the number of cores when scheduling
46+ # data reduction tasks.
47+ cores =1
48+
49+ # Pipeline recipes are parallelized using OpenMP. EDPS uses this parameter to set the number of threads when
50+ # running a recipe, up to the available cores: OMP_NUM_THREADS=min(default_omp_threads, cores)
51+ default_omp_threads =1
52+
53+ # Execution ordering. All orderings follow topological order so parent tasks are always placed before their children.
54+ # Options: dfs, bfs, type, dynamic
55+ # dfs - depth-first, give preference to reaching final reduction target quicker
56+ # bfs - breadth-first, give preference to following reduction cascade level by level
57+ # type - same as bfs, but make sure to process same type of data together (eg. first all BIASes)
58+ # dynamic - immediately run whichever job is ready (has all needed inputs), no stalling but order is unpredictable
59+ ordering =dfs
60+
61+ # If provided, the recipe products will be renamed according to the following scheme:
62+ # <prefix>.<instrument>.YYYY-MM-DDThh:mm.ss.mss.fits (Example: QC1.ESPRESSO.2023-02-09T17:30:14.326.fits),
63+ # where timestamp is taken from the moment of renaming the file.
64+ # Note that the renaming occurs in the base directory, not in the package (output) directory.
65+ output_prefix =
66+
67+ # In case EDPS was stopped while some jobs were waiting to be executed, should we execute them after restart.
68+ resume_on_startup =False
69+
70+ # EDPS will automatically re-execute a job if it's failed but needed as association, but only within this time window.
71+ reexecution_window_minutes =60
72+
73+ [generator]
74+ # Path to yaml file defining locations of static calibrations for each of the workflows.
75+ # This configuration is used for ESO internal operations and can be left empty.
76+ # EDPS will automatically load static calibrations delivered with the pipeline.
77+ calibrations_config_file =
78+
79+ # Path to yaml file defining locations of recipe and workflow parameters for each of the workflows.
80+ # This configuration is used for ESO internal operations and can be left empty.
81+ # EDPS will automatically load recipe and workflow parameters delivered with the pipeline.
82+ parameters_config_file =
83+
84+ # In case multiple matching associated inputs (e.g. calibrations) are available, which ones should be used.
85+ # Options: raw, master, raw_per_quality_level, master_per_quality_level
86+ # raw - use reduced raw data results even if master calibrations closer in time are available
87+ # master - use master calibrations even if results of reduced raw data closer in time are available
88+ # raw_per_quality_level - use calibrations closest in time but prefer reduced raw data results
89+ # master_per_quality_level - use calibrations closest in time but prefer master calibrations
90+ association_preference =raw_per_quality_level
91+
92+ # URL to ESO-provided list of calibration breakpoints.
93+ breakpoints_url =
94+
95+ # Comma-separated list of workflows which should be combined together into one.
96+ # This allows to submit data from different instruments to a single workflow "edps.workflow.meta_wkf"
97+ meta_workflow =
98+
99+ [repository]
100+ # Clear the EDPS bookkeeping database on startup.
101+ # This will cause all tasks to be re-executed even if they have been executed before on the same data.
102+ truncate =False
103+
104+ # Should we use local database for bookkeeping (currently always True).
105+ local =True
106+
107+ # Path where the bookkeeping database should be stored.
108+ path =db.json
109+
110+ # Type of bookkeeping database to use.
111+ # Options: tiny, memory, caching
112+ # tiny - directly use TinyDB json-file-based database
113+ # memory - use fast in-memory non-persistent database
114+ # caching - use in-memory cache on top of persistent TinyDB database for higher performance
115+ type =caching
116+
117+ # How many changes are needed to trigger TinyDB flushing data to disk.
118+ flush_size =10
119+
120+ # How often automatically data should be flushed, regardless of changes.
121+ flush_timeout =60
122+
123+ # Minimum amount of available disk space (in MB) required to flush data to disk.
124+ min_disk_space_mb =100
125+
126+ [cleanup]
127+ # Should automatic cleanup of reduced data be enabled.
128+ enabled =False
129+
130+ # How much time needs to pass since data got reduced to consider them for removal.
131+ cleanup_older_than_seconds =1209600
132+
133+ # How often should we check if there are data to be removed.
134+ cleanup_check_period_seconds =3600
135+
136+ [packager]
137+ # Location where selected products should be placed.
138+ package_base_dir =
139+
140+ # Method to place files in the package directory. Options: link, symlink, copy.
141+ # link - create hardlinks
142+ # symlink - create symbolic links
143+ # copy - copy the files
144+ mode =symlink
145+
146+ # Directory and filename pattern to use when placing files in the package directory.
147+ # The pattern can contain any string, header keywords enclosed in $ (e.g. $pro.catg$),
148+ # and the following predefined special variables:
149+ # $NIGHT - year-month-day of when the data was taken
150+ # $FILENAME - original name of the file
151+ # $EXT - original extension of the file name
152+ # $TASK - name of EDPS task which produced the file
153+ # $TIMESTAMP - timestamp when data were submitted for reduction
154+ # $DATASET - dataset name, derived from the first raw input file
155+ # Example: $DATASET/$TIMESTAMP/$object$_$pro.catg$.$EXT
156+ pattern =$DATASET/$TIMESTAMP/$object$_$pro.catg$.$EXT
157+
158+ # Comma-separated list of product categories to place in the package directory.
159+ # Empty means all products matching reduction target.
160+ categories =
0 commit comments