Skip to content

Commit

Permalink
Merge pull request #9 from nsoranzo/add_missing_native_specs
Browse files Browse the repository at this point in the history
Add some missing native specifications
  • Loading branch information
natefoo authored May 3, 2018
2 parents ef29dc8 + fe78d17 commit 44cc67e
Show file tree
Hide file tree
Showing 10 changed files with 62 additions and 23 deletions.
29 changes: 20 additions & 9 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -101,29 +101,40 @@ List of parameters that can be passed in the `drmaa_native_specification` attrib
| --acctg-freq=*list* | Define the job accounting sampling interval |
| --comment=*string* | An arbitrary comment |
| -C, --constraint=*list* | Specify a list of constraints |
| -c, --cpus-per-task=*n* | Number of processors per task |
| --contiguous | If set, then the allocated nodes must form a contiguous set |
| -d, --dependency=*list* | Defer the start of this job until the specified dependencies have been satisfied completed |
| --exclusive | Allocate nodenumber of tasks to invoke on each nodes in exclusive mode when cpu consumable resource is enabled |
| --gres=*list* | Specifies a comma delimited list of generic consumable resources |
| -k, --no-kill | Do not automatically terminate a job of one of the nodes it has been allocated fails |
| -L, --licenses=*license* | Specification of licenses |
| -M, --clusters=*list* | Comma delimited list of clusters to issue commands to |
| --mail-type=*type* | Notify user by email when certain event types occur. Valid type values are BEGIN, END, FAIL, REQUEUE, and ALL (any state change) |
| --mem=*MB* | Minimum amount of real memory |
| --mem-per-cpu=*MB* | Maximum amount of real memory per allocated cpu required by a job |
| --mincpus=*n* | Minimum number of logical processors (threads) per node |
| -N, --nodes=*minnodes[-maxnodes]* | Number of nodes on which to run |
| -n, --ntasks=*n* | Number of tasks |
| --no-requeue | Specifies that the batch job should not be requeued after node failure |
| --ntasks-per-node=*n* | Number of tasks to invoke on each node |
| -p, --partition=*partition* | Partition requested |
| --qos=*qos* | Quality of Serice |
| --requeue | If set, permit the job to be requeued |
| --reservation=*name* | Allocate resources from named reservation |
| -s, --share | Job allocation can share nodes with other running jobs |
| --tmp=*size[units]* | Specify a minimum amount of temporary disk space |
| -w, --nodelist=*hosts* | Request a specific list of hosts |
| -t, --time=*hours:minutes* | Set a maximum job wallclock time |
| -n, --ntasks=*n* | Number of tasks |
| --gres=*list* | Specifies a comma delimited list of generic consumable resources |
| --no-kill | Do not automatically terminate a job of one of the nodes it has been allocated fails |
| --licenses=*license* | Specification of licenses |
| --mail-type=*type* | Notify user by email when certain event types occur. Valid type values are BEGIN, END, FAIL, REQUEUE, and ALL (any state change) |
| --no-requeue | Specifies that the batch job should not be requeued after node failure |
| -x, --exclude=*nodelist* | Explicitly exclude certain nodes from the resources granted to the job |
| --tmp=*size[units]* | Specify a minimum amount of temporary disk space |
| -M, --clusters=*list* | Comma delimited list of clusters to issue commands to |

Additionally, the following parameters to `drmaa_native_specification` are supported, but their use is discouraged in
favor of the corresponding DRMAA job attributes:

| Native specification | DRMAA job attribute | Description |
|----------------------------|---------------------|-----------------------------------------------------------------------------------------------|
| -e, --error=*pattern* | drmaa_output_path | Connect the batch script's standard error directly to the file name specified in the pattern |
| -J, --job-name=*name* | drmaa_job_name | Specify a name for the job allocation |
| -o, --output=*pattern* | drmaa_error_path | Connect the batch script's standard output directly to the file name specified in the pattern |
| -t, --time=*hours:minutes* | drmaa_wct_hlimit | Set a maximum job wallclock time |

Description of each parameter can be found in `man sbatch`.

Expand Down
Empty file modified slurm_drmaa/Makefile.am
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/drmaa.c
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/job.c
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/job.h
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/session.c
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/session.h
100755 → 100644
Empty file.
Empty file modified slurm_drmaa/slurm_drmaa.conf.example
100755 → 100644
Empty file.
56 changes: 42 additions & 14 deletions slurm_drmaa/util.c
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,9 @@ enum slurm_native {
SLURM_NATIVE_NO_REQUEUE,
SLURM_NATIVE_EXCLUDE,
SLURM_NATIVE_TMP,
SLURM_NATIVE_DEPENDENCY
SLURM_NATIVE_DEPENDENCY,
SLURM_NATIVE_STDOUT,
SLURM_NATIVE_STDERR
};

void
Expand Down Expand Up @@ -212,7 +214,7 @@ slurmdrmaa_add_attribute(job_desc_msg_t *job_desc, unsigned attr, const char *va
fsd_log_debug(("# pn_min_memory = %s",value));
job_desc->pn_min_memory = fsd_atoi(value);
}
else {
else {
fsd_log_debug(("mem value defined lower or equal to mem-per-cpu or value defined before"));
}
break;
Expand All @@ -221,7 +223,7 @@ slurmdrmaa_add_attribute(job_desc_msg_t *job_desc, unsigned attr, const char *va
fsd_log_debug(("# pn_min_memory (MEM_PER_CPU) = %s",value));
job_desc->pn_min_memory = fsd_atoi(value) | MEM_PER_CPU;
}
else {
else {
fsd_log_debug(("mem-per-cpu value defined lower or equal to mem or value defined before"));
}
break;
Expand Down Expand Up @@ -283,11 +285,11 @@ slurmdrmaa_add_attribute(job_desc_msg_t *job_desc, unsigned attr, const char *va
break;
case SLURM_NATIVE_NTASKS:
fsd_log_debug(("# ntasks = %s",value));
job_desc->num_tasks = fsd_atoi(value);
job_desc->num_tasks = fsd_atoi(value);
break;
case SLURM_NATIVE_TIME_LIMIT:
fsd_log_debug(("# time_limit = %s",value));
job_desc->time_limit = slurmdrmaa_datetime_parse(value);
job_desc->time_limit = slurmdrmaa_datetime_parse(value);
break;
case SLURM_NATIVE_GRES:
fsd_log_debug(("# gres = %s",value));
Expand Down Expand Up @@ -321,12 +323,20 @@ slurmdrmaa_add_attribute(job_desc_msg_t *job_desc, unsigned attr, const char *va
fsd_log_debug(("# dependency = %s", value));
job_desc->dependency = fsd_strdup(value);
break;
case SLURM_NATIVE_STDOUT:
fsd_log_debug(("# stdout = %s", value));
job_desc->std_out = fsd_strdup(value);
break;
case SLURM_NATIVE_STDERR:
fsd_log_debug(("# stderr = %s", value));
job_desc->std_err = fsd_strdup(value);
break;
default:
fsd_exc_raise_fmt(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE,"Invalid attribute");
}
}

void
void
slurmdrmaa_parse_additional_attr(job_desc_msg_t *job_desc,const char *add_attr,char **clusters_opt)
{
char * volatile name = NULL;
Expand All @@ -343,7 +353,7 @@ slurmdrmaa_parse_additional_attr(job_desc_msg_t *job_desc,const char *add_attr,c
/*
* TODO: move it to slurmdrmaa_add_attribute
if (value == NULL) {
fsd_exc_raise_fmt(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE,
fsd_exc_raise_fmt(FSD_DRMAA_ERRNO_INVALID_ATTRIBUTE_VALUE,
"Invalid native specification: %s Missing '='.", add_attr_copy);
} */

Expand All @@ -365,6 +375,9 @@ slurmdrmaa_parse_additional_attr(job_desc_msg_t *job_desc,const char *add_attr,c
else if (strcmp(name,"cpus-per-task") == 0) {
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_CPUS_PER_TASK,value);
}
else if (strcmp(name, "error") == 0) {
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_STDERR, value);
}
else if(strcmp(name,"exclusive") == 0) {
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_EXCLUSIVE,NULL);
}
Expand All @@ -386,6 +399,9 @@ slurmdrmaa_parse_additional_attr(job_desc_msg_t *job_desc,const char *add_attr,c
else if (strcmp(name,"ntasks-per-node") == 0) {
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_NTASKS_PER_NODE,value);
}
else if (strcmp(name, "output") == 0) {
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_STDOUT, value);
}
else if (strcmp(name,"partition") == 0) {
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_PARTITION,value);
}
Expand Down Expand Up @@ -458,7 +474,7 @@ slurmdrmaa_parse_additional_attr(job_desc_msg_t *job_desc,const char *add_attr,c
fsd_log_return(( "" ));
}

void
void
slurmdrmaa_parse_native(job_desc_msg_t *job_desc, const char * value)
{
char *arg = NULL;
Expand Down Expand Up @@ -498,10 +514,22 @@ slurmdrmaa_parse_native(job_desc_msg_t *job_desc, const char * value)
break;
case 'c' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_CPUS_PER_TASK, arg);
break;
break;
case 'd':
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_DEPENDENCY, arg);
break;
case 'e':
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_STDERR, arg);
break;
case 'k':
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_NO_KILL, NULL);
break;
case 'N' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_NODES, arg);
break;
case 'o':
slurmdrmaa_add_attribute(job_desc, SLURM_NATIVE_STDOUT, arg);
break;
case 'p' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_PARTITION, arg);
break;
Expand All @@ -516,10 +544,10 @@ slurmdrmaa_parse_native(job_desc_msg_t *job_desc, const char * value)
break;
case 't' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_TIME_LIMIT, arg);
break;
break;
case 'n' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_NTASKS, arg);
break;
break;
case 'x' :
slurmdrmaa_add_attribute(job_desc,SLURM_NATIVE_EXCLUDE, arg);
break;
Expand Down Expand Up @@ -565,13 +593,13 @@ slurmdrmaa_parse_native(job_desc_msg_t *job_desc, const char * value)
job_desc->min_cpus = job_desc->num_tasks * job_desc->cpus_per_task ;
fsd_log_debug((
"set min_cpus to ntasks*cpus_per_task: %d",
job_desc->min_cpus
job_desc->min_cpus
));
} else {
job_desc->min_cpus = job_desc->num_tasks ;
job_desc->min_cpus = job_desc->num_tasks ;
fsd_log_debug((
"set min_cpus to ntasks: %d",
job_desc->min_cpus
job_desc->min_cpus
));
}
fsd_free(native_spec_copy);
Expand Down
Empty file modified slurm_drmaa/util.h
100755 → 100644
Empty file.

0 comments on commit 44cc67e

Please sign in to comment.