Skip to content

Commit

Permalink
fixing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
Thomas Erland Clausen committed Feb 26, 2025
1 parent f11d957 commit 747c488
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 143 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -58,126 +58,3 @@ def test_does_not_raise(
args=any_calculator_args,
infrastructure_settings=infrastructure_settings,
)

@pytest.mark.disable_autouse
def test_add_info_log_record_to_azure_monitor_with_expected_settings(
self,
any_calculator_args: CalculatorArgs,
infrastructure_settings: InfrastructureSettings,
integration_test_configuration: IntegrationTestConfiguration,
) -> None:
"""
Assert that the calculator job adds log records to Azure Monitor with the expected settings:
- cloud role name = "dbr-calculation-engine-tests"
- severity level = 1
- message <the message>
- operation id has value
- custom field "Subsystem" = "unit-tests"
- custom field "calculation_id" = <the calculation id>
- custom field "CategoryName" = "Energinet.DataHub." + <logger name>
Debug level is not tested as it is not intended to be logged by default.
"""
cleanup_logging()
logging_settings = config.LoggingSettings(
cloud_role_name="dbr-calculation-engine-tests",
subsystem="unit-tests",
orchestration_instance_id=uuid.uuid4(),
applicationinsights_connection_string=integration_test_configuration.get_applicationinsights_connection_string(),
)
config.configure_logging(logging_settings=logging_settings)
# Arrange
self.prepare_command_line_arguments(any_calculator_args)

config.add_extras(dict(calculation_id=any_calculator_args.calculation_id))

# Act
with pytest.raises(SystemExit):
start_with_deps(
args=any_calculator_args,
infrastructure_settings=infrastructure_settings,
)

# Assert
# noinspection PyTypeChecker
logs_client = LogsQueryClient(integration_test_configuration.credential)

query = f"""
AppTraces
| where AppRoleName == "dbr-calculation-engine-tests"
| where SeverityLevel == 1
| where Message startswith_cs "Started executing function"
| where OperationId != "00000000000000000000000000000000"
| where Properties.Subsystem == "unit-tests"
| where Properties.calculation_id == "{any_calculator_args.calculation_id}"
| where Properties.CategoryName == "Energinet.DataHub.start_with_deps"
| count
"""

workspace_id = integration_test_configuration.get_analytics_workspace_id()

def assert_logged():
actual = logs_client.query_workspace(
workspace_id, query, timespan=timedelta(minutes=5)
)
assert_row_count(actual, 1)

# Assert, but timeout if not succeeded
wait_for_condition(
assert_logged, timeout=timedelta(minutes=3), step=timedelta(seconds=10)
)

@staticmethod
def prepare_command_line_arguments(any_calculator_args):
any_calculator_args.calculation_id = str(
uuid.uuid4()
) # Ensure unique calculation id
sys.argv = []
sys.argv.append("--dummy=")
sys.argv.append(f"--calculation-id={str(any_calculator_args.calculation_id)}")
sys.argv.append("--grid-areas=[123]")
sys.argv.append("--period-start-datetime=2023-01-31T23:00:00Z")
sys.argv.append("--period-end-datetime=2023-01-31T23:00:00Z")
sys.argv.append("--calculation-type=balance_fixing")
sys.argv.append("--created-by-user-id=19e0586b-838a-4ea2-96ce-6d923a89c922")


def wait_for_condition(callback: Callable, *, timeout: timedelta, step: timedelta):
"""
Wait for a condition to be met, or timeout.
The function keeps invoking the callback until it returns without raising an exception.
"""
start_time = time.time()
while True:
elapsed_ms = int((time.time() - start_time) * 1000)
# noinspection PyBroadException
try:
callback()
print(f"Condition met in {elapsed_ms} ms")
return
except Exception:
if elapsed_ms > timeout.total_seconds() * 1000:
print(
f"Condition failed to be met before timeout. Timed out after {elapsed_ms} ms",
file=sys.stderr,
)
raise
time.sleep(step.seconds)
print(f"Condition not met after {elapsed_ms} ms. Retrying...")


def assert_row_count(actual, expected_count):
actual = cast(LogsQueryResult, actual)
table = actual.tables[0]
row = table.rows[0]
value = row["Count"]
count = cast(int, value)
assert count == expected_count


def cleanup_logging() -> None:
config.set_extras({})
config.set_is_instrumented(False)
config.set_tracer(None)
config.set_tracer_name("")
os.environ.pop("OTEL_SERVICE_NAME", None)
41 changes: 24 additions & 17 deletions source/databricks/calculation_engine/tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,14 @@
from unittest.mock import patch


# def cleanup_logging() -> None:
# config.set_extras({})
# config.set_is_instrumented(False)
# config.set_tracer(None)
# config.set_tracer_name("")
# os.environ.pop("OTEL_SERVICE_NAME", None)


@pytest.fixture(scope="session")
def test_files_folder_path(tests_path: str) -> str:
return f"{tests_path}/test_files"
Expand Down Expand Up @@ -421,26 +429,25 @@ def grid_loss_metering_point_ids_input_data_written_to_delta(
)


@pytest.fixture(scope="function", autouse=True)
def configure_logging_dummy(request):
@pytest.fixture(scope="session", autouse=True)
def configure_logging_dummy():
"""
Configures the logging initially.
"""
if "disable_autouse" in request.keywords:
yield
else:

# patch to avoid error when trying to configure azure monitor
with patch(
"geh_common.telemetry.logging_configuration.configure_azure_monitor"
):
logging_settings = config.LoggingSettings(
cloud_role_name="dbr-calculation-engine-tests",
subsystem="unit-tests",
orchestration_instance_id=uuid.uuid4(),
applicationinsights_connection_string="connectionString",
)
yield config.configure_logging(logging_settings=logging_settings)
# cleanup_logging()
# if "disable_autouse" in request.keywords:
# yield
# else:

# patch to avoid error when trying to configure azure monitor
with patch("geh_common.telemetry.logging_configuration.configure_azure_monitor"):
logging_settings = config.LoggingSettings(
cloud_role_name="dbr-calculation-engine-tests",
subsystem="unit-tests",
orchestration_instance_id=uuid.uuid4(),
applicationinsights_connection_string="connectionString",
)
yield config.configure_logging(logging_settings=logging_settings)


@pytest.fixture(scope="session")
Expand Down
4 changes: 1 addition & 3 deletions tox.ini
Original file line number Diff line number Diff line change
Expand Up @@ -6,8 +6,6 @@ max-line-length = 120

[pytest]
testpaths = source/databricks/tests
markers =
acceptance_test
disable_autouse
markers = acceptance_test
# ignores entry point tests locally, because the virtualenv or wheel that are being installed messes with navigation after being run
addopts = --ignore=source/databricks/tests/entry_points

0 comments on commit 747c488

Please sign in to comment.