Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
58 changes: 58 additions & 0 deletions .github/workflows/ci-code.yml
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ jobs:
- name: Setup environment
run: .github/workflows/setup.sh

- name: Setup upterm session
uses: lhotari/action-upterm@v1

- name: Run test suite
env:
AIIDA_TEST_PROFILE: test_aiida
Expand Down Expand Up @@ -107,6 +110,9 @@ jobs:
- name: Setup SSH on localhost
run: .github/workflows/setup_ssh.sh

- name: Setup upterm session
uses: lhotari/action-upterm@v1

- name: Run test suite
env:
AIIDA_WARN_v3: 0
Expand All @@ -133,3 +139,55 @@ jobs:
verdi devel check-load-time
verdi devel check-undesired-imports
.github/workflows/verdi.sh


test-pytest-fixtures:
# Who watches the watchmen?
# Here we test the pytest fixtures in isolation from the rest of aiida-core test suite,
# since they can be used outside of aiida core context, e.g. in plugins.
# Unlike in other workflows in this file, we purposefully don't setup a test profile.

runs-on: ubuntu-24.04
timeout-minutes: 10

services:
postgres:
image: postgres:10
env:
POSTGRES_DB: test_aiida
POSTGRES_PASSWORD: ''
POSTGRES_HOST_AUTH_METHOD: trust
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- 5432:5432
rabbitmq:
image: rabbitmq:3.8.14-management
ports:
- 5672:5672
- 15672:15672

steps:
- uses: actions/checkout@v4

- name: Install aiida-core
uses: ./.github/actions/install-aiida-core
with:
python-version: '3.9'
from-lock: 'true'
extras: tests

- name: Test legacy pytest fixtures
run: pytest --cov aiida --noconftest src/aiida/manage/tests/test_pytest_fixtures.py

- name: Upload coverage report
if: github.repository == 'aiidateam/aiida-core'
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
name: test-pytest-fixtures
files: ./coverage.xml
fail_ci_if_error: false # don't fail job, if coverage upload fails
36 changes: 17 additions & 19 deletions src/aiida/cmdline/commands/cmd_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,16 @@
verdi daemon start
"""

ACTION_TIMEOUT = OverridableOption(
'-t',
'--timeout',
type=click.FloatRange(0, float('inf')),
default=float('inf'),
show_default=True,
help='Time in seconds to wait for a response before timing out. '
'If timeout is 0 the command does not wait for response.',
)


def valid_projections():
"""Return list of valid projections for the ``--project`` option of ``verdi process list``.
Expand Down Expand Up @@ -320,15 +330,7 @@ def process_status(call_link_label, most_recent_node, max_depth, processes):
@verdi_process.command('kill')
@arguments.PROCESSES()
@options.ALL(help='Kill all processes if no specific processes are specified.')
@OverridableOption(
'-t',
'--timeout',
type=click.FLOAT,
default=5.0,
show_default=True,
help='Time in seconds to wait for a response of the kill task before timing out.',
)()
@options.WAIT()
@ACTION_TIMEOUT()
@OverridableOption(
'-F',
'--force',
Expand All @@ -338,7 +340,7 @@ def process_status(call_link_label, most_recent_node, max_depth, processes):
'Note: This may lead to orphaned jobs on your HPC and should be used with caution.',
)()
@decorators.with_dbenv()
def process_kill(processes, all_entries, timeout, wait, force):
def process_kill(processes, all_entries, timeout, force):
"""Kill running processes.

Kill one or multiple running processes."""
Expand Down Expand Up @@ -368,7 +370,6 @@ def process_kill(processes, all_entries, timeout, wait, force):
force=force,
all_entries=all_entries,
timeout=timeout,
wait=wait,
)
except control.ProcessTimeoutException as exception:
echo.echo_critical(f'{exception}\n{REPAIR_INSTRUCTIONS}')
Expand All @@ -380,10 +381,9 @@ def process_kill(processes, all_entries, timeout, wait, force):
@verdi_process.command('pause')
@arguments.PROCESSES()
@options.ALL(help='Pause all active processes if no specific processes are specified.')
@options.TIMEOUT()
@options.WAIT()
@ACTION_TIMEOUT()
@decorators.with_dbenv()
def process_pause(processes, all_entries, timeout, wait):
def process_pause(processes, all_entries, timeout):
"""Pause running processes.

Pause one or multiple running processes."""
Expand All @@ -404,7 +404,6 @@ def process_pause(processes, all_entries, timeout, wait):
msg_text='Paused through `verdi process pause`',
all_entries=all_entries,
timeout=timeout,
wait=wait,
)
except control.ProcessTimeoutException as exception:
echo.echo_critical(f'{exception}\n{REPAIR_INSTRUCTIONS}')
Expand All @@ -416,10 +415,9 @@ def process_pause(processes, all_entries, timeout, wait):
@verdi_process.command('play')
@arguments.PROCESSES()
@options.ALL(help='Play all paused processes if no specific processes are specified.')
@options.TIMEOUT()
@options.WAIT()
@ACTION_TIMEOUT()
@decorators.with_dbenv()
def process_play(processes, all_entries, timeout, wait):
def process_play(processes, all_entries, timeout):
"""Play (unpause) paused processes.

Play (unpause) one or multiple paused processes."""
Expand All @@ -435,7 +433,7 @@ def process_play(processes, all_entries, timeout, wait):

with capture_logging() as stream:
try:
control.play_processes(processes, all_entries=all_entries, timeout=timeout, wait=wait)
control.play_processes(processes, all_entries=all_entries, timeout=timeout)
except control.ProcessTimeoutException as exception:
echo.echo_critical(f'{exception}\n{REPAIR_INSTRUCTIONS}')

Expand Down
7 changes: 0 additions & 7 deletions src/aiida/cmdline/params/options/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -125,7 +125,6 @@
'USER_LAST_NAME',
'VERBOSITY',
'VISUALIZATION_FORMAT',
'WAIT',
'WITH_ELEMENTS',
'WITH_ELEMENTS_EXCLUSIVE',
'active_process_states',
Expand Down Expand Up @@ -690,12 +689,6 @@ def set_log_level(ctx, _param, value):
help='Time in seconds to wait for a response before timing out.',
)

WAIT = OverridableOption(
'--wait/--no-wait',
default=False,
help='Wait for the action to be completed otherwise return as soon as it is scheduled.',
)

FORMULA_MODE = OverridableOption(
'-f',
'--formula-mode',
Expand Down
7 changes: 4 additions & 3 deletions src/aiida/engine/daemon/execmanager.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
from aiida.orm.utils.log import get_dblogger_extra
from aiida.repository.common import FileType
from aiida.schedulers.datastructures import JobState
from aiida.transports import has_magic

if TYPE_CHECKING:
from aiida.transports import Transport
Expand Down Expand Up @@ -465,7 +466,7 @@ async def stash_calculation(calculation: CalcJobNode, transport: Transport) -> N
target_basepath = target_base / uuid[:2] / uuid[2:4] / uuid[4:]

for source_filename in source_list:
if transport.has_magic(source_filename):
if has_magic(source_filename):
copy_instructions = []
for globbed_filename in await transport.glob_async(source_basepath / source_filename):
target_filepath = target_basepath / Path(globbed_filename).relative_to(source_basepath)
Expand Down Expand Up @@ -679,7 +680,7 @@ async def retrieve_files_from_list(
if isinstance(item, (list, tuple)):
tmp_rname, tmp_lname, depth = item
# if there are more than one file I do something differently
if transport.has_magic(tmp_rname):
if has_magic(tmp_rname):
remote_names = await transport.glob_async(workdir.joinpath(tmp_rname))
local_names = []
for rem in remote_names:
Expand All @@ -702,7 +703,7 @@ async def retrieve_files_from_list(
else:
abs_item = item if item.startswith('/') else str(workdir.joinpath(item))

if transport.has_magic(abs_item):
if has_magic(abs_item):
remote_names = await transport.glob_async(abs_item)
local_names = [os.path.split(rem)[1] for rem in remote_names]
else:
Expand Down
Loading
Loading