Skip to content

Support for 24.5.x #348

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Sep 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 6 additions & 6 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@ pyslurm is the Python client library for the [Slurm Workload Manager](https://sl

* [Slurm](https://slurm.schedmd.com) - Slurm shared library and header files
* [Python](https://www.python.org) - >= 3.6
* [Cython](https://cython.org) - >= 0.29.36
* [Cython](https://cython.org) - >= 0.29.37

This Version is for Slurm 23.11.x
This Version is for Slurm 24.05.x

## Versioning

In pyslurm, the versioning scheme follows the official Slurm versioning. The
first two numbers (`MAJOR.MINOR`) always correspond to Slurms Major-Release,
for example `23.11`.
for example `24.05`.
The last number (`MICRO`) is however not tied in any way to Slurms `MICRO`
version, but is instead PySlurm's internal Patch-Level. For example, any
pyslurm 23.11.X version should work with any Slurm 23.11.X release.
pyslurm 24.05.X version should work with any Slurm 24.05.X release.

## Installation

Expand All @@ -29,8 +29,8 @@ the corresponding paths to the necessary files.
You can specify those with environment variables (recommended), for example:

```shell
export SLURM_INCLUDE_DIR=/opt/slurm/23.11/include
export SLURM_LIB_DIR=/opt/slurm/23.11/lib
export SLURM_INCLUDE_DIR=/opt/slurm/24.05/include
export SLURM_LIB_DIR=/opt/slurm/24.05/lib
```

Then you can proceed to install pyslurm, for example by cloning the Repository:
Expand Down
2 changes: 1 addition & 1 deletion build_requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
setuptools>=59.2.0
wheel>=0.37.0
Cython>=0.29.36
Cython>=0.29.37
packaging>=21.3
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
requires = [
"setuptools>=59.2.0",
"wheel>=0.37.0",
"Cython>=0.29.36",
"Cython>=0.29.37",
"packaging>=21.3"
]

Expand Down
2 changes: 1 addition & 1 deletion pyslurm/__version__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,4 @@
# The last Number "Z" is the current Pyslurm patch version, which should be
# incremented each time a new release is made (except when migrating to a new
# Slurm Major release, then set it back to 0)
__version__ = "23.11.0"
__version__ = "24.5.0"
4 changes: 1 addition & 3 deletions pyslurm/core/job/job.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ from pyslurm.slurm cimport (
job_info_msg_t,
slurm_job_info_t,
slurm_job_state_string,
slurm_job_reason_string,
slurm_job_state_reason_string,
slurm_job_share_string,
slurm_job_batch_script,
slurm_get_job_stdin,
Expand Down Expand Up @@ -358,8 +358,6 @@ cdef class Job:
Whether the Job should be killed on an invalid dependency.
spreads_over_nodes (bool):
Whether the Job should be spread over as many nodes as possible.
power_options (list):
Options set for Power Management.
is_cronjob (bool):
Whether this Job is a cronjob.
cronjob_time (str):
Expand Down
6 changes: 1 addition & 5 deletions pyslurm/core/job/job.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -650,7 +650,7 @@ cdef class Job:
if self.ptr.state_desc:
return cstr.to_unicode(self.ptr.state_desc)

return cstr.to_unicode(slurm_job_reason_string(self.ptr.state_reason))
return cstr.to_unicode(slurm_job_state_reason_string(self.ptr.state_reason))

@property
def is_requeueable(self):
Expand Down Expand Up @@ -1177,10 +1177,6 @@ cdef class Job:
def spreads_over_nodes(self):
return u64_parse_bool_flag(self.ptr.bitflags, slurm.SPREAD_JOB)

@property
def power_options(self):
return power_type_int_to_list(self.ptr.power_flags)

@property
def is_cronjob(self):
return u64_parse_bool_flag(self.ptr.bitflags, slurm.CRON_JOB)
Expand Down
1 change: 0 additions & 1 deletion pyslurm/core/job/sbatch_opts.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,6 @@ SBATCH_OPTIONS = [
SbatchOpt("O", "overcommit", "overcommit", True),
SbatchOpt("s", "oversubscribe", "resource_sharing", "yes"),
SbatchOpt("p", "partition", "partition"),
SbatchOpt(None, "power", "power_options"),
SbatchOpt(None, "prefer", None),
SbatchOpt(None, "priority", "priority"),
SbatchOpt(None, "profile", "profile_types"),
Expand Down
4 changes: 0 additions & 4 deletions pyslurm/core/job/submission.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -251,9 +251,6 @@ cdef class JobSubmitDescription:
partitions (Union[list, str]):
A list of partitions the Job may use.
This is the same as -p/--partition from sbatch.
power_options (list):
A list of power management plugin options for the Job.
This is the same as --power from sbatch.
accounting_gather_frequency (Union[dict, str]):
Interval for accounting info to be gathered.
This is the same as --acctg-freq from sbatch.
Expand Down Expand Up @@ -600,7 +597,6 @@ cdef class JobSubmitDescription:
log_files_open_mode
overcommit
partitions
power_options
profile_types
accounting_gather_frequency
qos
Expand Down
1 change: 0 additions & 1 deletion pyslurm/core/job/submission.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,6 @@ cdef class JobSubmitDescription:
ptr.requeue = u16_bool(self.is_requeueable)
ptr.wait_all_nodes = u16_bool(self.wait_all_nodes)
ptr.mail_type = mail_type_list_to_int(self.mail_types)
ptr.power_flags = power_type_list_to_int(self.power_options)
ptr.profile = acctg_profile_list_to_int(self.profile_types)
ptr.shared = shared_type_str_to_int(self.resource_sharing)

Expand Down
33 changes: 1 addition & 32 deletions pyslurm/core/job/util.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -178,37 +178,6 @@ def acctg_profile_int_to_list(flags):
return profiles


def power_type_list_to_int(types):
"""Convert a str or list of str with power types to uint8_t."""
cdef uint8_t flags = 0

if not types:
return slurm.NO_VAL8

if isinstance(types, str):
types = types.split(",")

for typ in types:
typ = typ.casefold()

if "level" == typ:
flags |= slurm.SLURM_POWER_FLAGS_LEVEL
else:
raise ValueError("Invalid power type: {typ}.")

return flags


def power_type_int_to_list(flags):
"""Convert uint8_t power type flags to a list of strings."""
types = []

if flags & slurm.SLURM_POWER_FLAGS_LEVEL:
types.append("LEVEL")

return types


def shared_type_str_to_int(typ):
"""Convert a job-sharing type str to its numerical representation."""
if not typ:
Expand All @@ -227,7 +196,7 @@ def shared_type_str_to_int(typ):
raise ValueError(f"Invalid resource_sharing type: {typ}.")


def cpu_gov_str_to_int(gov):
def cpu_gov_str_to_int(gov):
"""Convert a cpu governor str to is numerical representation."""
if not gov:
return u32(None)
Expand Down
17 changes: 0 additions & 17 deletions pyslurm/core/node.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -658,12 +658,6 @@ cdef class Node:
def cpu_binding(self, val):
self.info.cpu_bind=self.umsg.cpu_bind = cpubind_to_num(val)

@property
def cap_watts(self):
if not self.info.power:
return 0
return u32_parse(self.info.power.cap_watts, on_noval=0)

@property
def current_watts(self):
if not self.info.energy:
Expand All @@ -676,17 +670,6 @@ cdef class Node:
return 0
return u32_parse(self.info.energy.ave_watts, on_noval=0)

@property
def external_sensors(self):
if not self.info.ext_sensors:
return {}

return {
"joules_total": u64_parse(self.info.ext_sensors.consumed_energy),
"current_watts": u32_parse(self.info.ext_sensors.current_watts),
"temperature": u32_parse(self.info.ext_sensors.temperature)
}

@property
def _node_state(self):
idle_cpus = self.idle_cpus
Expand Down
4 changes: 3 additions & 1 deletion pyslurm/core/partition.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ cdef class Partition:

This can also return [UNLIMITED][pyslurm.constants.UNLIMITED]
min_nodes (int):
Minimum number of Nodes that must be requested by Jobs
Minimum number of Nodes that must be requested by Jobs
max_time (int):
Max Time-Limit in minutes that Jobs can request

Expand Down Expand Up @@ -211,6 +211,8 @@ cdef class Partition:
Whether only root is able to use a Partition
requires_reservation (bool):
Whether a reservation is required to use a Partition
power_down_on_idle (bool):
Whether nodes power down on idle after running jobs
"""
cdef:
partition_info_t *ptr
Expand Down
38 changes: 22 additions & 16 deletions pyslurm/core/partition.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -601,39 +601,39 @@ cdef class Partition:

@property
def is_default(self):
return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_DEFAULT)
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_DEFAULT)

@is_default.setter
def is_default(self, val):
u16_set_bool_flag(&self.ptr.flags, val,
u32_set_bool_flag(&self.ptr.flags, val,
slurm.PART_FLAG_DEFAULT, slurm.PART_FLAG_DEFAULT_CLR)

@property
def allow_root_jobs(self):
return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_NO_ROOT)
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_NO_ROOT)

@allow_root_jobs.setter
def allow_root_jobs(self, val):
u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_NO_ROOT,
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_NO_ROOT,
slurm.PART_FLAG_NO_ROOT_CLR)

@property
def is_user_exclusive(self):
return u16_parse_bool_flag(self.ptr.flags,
return u32_parse_bool_flag(self.ptr.flags,
slurm.PART_FLAG_EXCLUSIVE_USER)

@is_user_exclusive.setter
def is_user_exclusive(self, val):
u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_EXCLUSIVE_USER,
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_EXCLUSIVE_USER,
slurm.PART_FLAG_EXC_USER_CLR)

@property
def is_hidden(self):
return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_HIDDEN)
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_HIDDEN)

@is_hidden.setter
def is_hidden(self, val):
u16_set_bool_flag(&self.ptr.flags, val,
u32_set_bool_flag(&self.ptr.flags, val,
slurm.PART_FLAG_HIDDEN, slurm.PART_FLAG_HIDDEN_CLR)

@property
Expand All @@ -642,27 +642,36 @@ cdef class Partition:

@least_loaded_nodes_scheduling.setter
def least_loaded_nodes_scheduling(self, val):
u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_LLN,
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_LLN,
slurm.PART_FLAG_LLN_CLR)

@property
def is_root_only(self):
return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_ROOT_ONLY)
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_ROOT_ONLY)

@is_root_only.setter
def is_root_only(self, val):
u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_ROOT_ONLY,
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_ROOT_ONLY,
slurm.PART_FLAG_ROOT_ONLY_CLR)

@property
def requires_reservation(self):
return u16_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_REQ_RESV)
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_REQ_RESV)

@requires_reservation.setter
def requires_reservation(self, val):
u16_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_REQ_RESV,
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_REQ_RESV,
slurm.PART_FLAG_REQ_RESV_CLR)

@property
def power_down_on_idle(self):
return u32_parse_bool_flag(self.ptr.flags, slurm.PART_FLAG_PDOI)

@power_down_on_idle.setter
def power_down_on_idle(self, val):
u32_set_bool_flag(&self.ptr.flags, val, slurm.PART_FLAG_PDOI,
slurm.PART_FLAG_PDOI_CLR)

# TODO: tres_fmt_str


Expand Down Expand Up @@ -747,9 +756,6 @@ def _select_type_int_to_list(stype):
if stype & slurm.CR_PACK_NODES:
out.append("PACK_NODES")

if stype & slurm.CR_OTHER_CONS_TRES:
out.append("OTHER_CONS_TRES")

if stype & slurm.CR_CORE_DEFAULT_DIST_BLOCK:
out.append("CORE_DEFAULT_DIST_BLOCK")

Expand Down
2 changes: 1 addition & 1 deletion pyslurm/db/job.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ from pyslurm.slurm cimport (
try_xmalloc,
slurmdb_job_cond_def_start_end,
slurm_job_state_string,
slurm_job_reason_string,
slurm_job_state_reason_string,
slurmdb_create_job_rec,
slurmdb_job_modify,
)
Expand Down
2 changes: 1 addition & 1 deletion pyslurm/db/job.pyx
Original file line number Diff line number Diff line change
Expand Up @@ -799,7 +799,7 @@ cdef class Job:

@property
def state_reason(self):
return cstr.to_unicode(slurm_job_reason_string
return cstr.to_unicode(slurm_job_state_reason_string
(self.ptr.state_reason_prev))

@property
Expand Down
2 changes: 1 addition & 1 deletion pyslurm/db/step.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ from pyslurm.slurm cimport (
try_xmalloc,
slurmdb_job_cond_def_start_end,
slurm_job_state_string,
slurm_job_reason_string,
slurm_job_state_reason_string,
)
from pyslurm.db.util cimport SlurmList, SlurmListItem
from pyslurm.db.connection cimport Connection
Expand Down
4 changes: 2 additions & 2 deletions pyslurm/db/util.pxd
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@
from pyslurm cimport slurm
from pyslurm.utils cimport cstr
from pyslurm.slurm cimport (
ListIterator,
list_itr_t,
List,
slurm_list_iterator_create,
slurm_list_iterator_destroy,
Expand Down Expand Up @@ -52,7 +52,7 @@ cdef class SlurmListItem:
cdef class SlurmList:
cdef:
List info
ListIterator itr
list_itr_t *itr

cdef readonly:
owned
Expand Down
Loading
Loading