Skip to content

run_config_smoke_tests

Public callable

Run 00_env_config readiness smoke checks for configuration bootstrap.

Use this during environment bootstrap to verify Spark availability, Fabric runtime context access, required path mappings, notebook naming policy, and optional AI/IO import readiness before executing downstream notebook steps.

Parameters:

Name Type Description Default
config FrameworkConfig

Validated framework configuration to evaluate.

required
env str

Environment key used when resolving required target paths.

"Sandbox"
required_targets list[str] | None

Required targets expected in config.path_config. Defaults to ["Source", "Unified"] when not provided.

None
check_ai bool

Whether to run the Fabric AI availability check.

True
check_io_import bool

Whether to test importability of fabric_io helpers.

False
notebook_name str | None

Notebook name to validate against configured naming prefixes.

None
ai_result dict[str, Any] | None

Optional precomputed AI availability payload to reuse instead of re-running the runtime import check.

None

Returns:

Type Description
list[ConfigSmokeCheckResult]

Ordered check results with pass, warn, fail, or skipped statuses for each readiness dimension.

Raises:

Type Description
ValueError

Propagated from config/path validation helpers when required targets or configured environments are invalid.

Notes

This helper performs validation and lightweight import/runtime checks only. It does not create or mutate Fabric resources.

Examples:

>>> checks = run_config_smoke_tests(config=my_config, env="Sandbox", notebook_name="00_env_config")
>>> any(c.status == "fail" for c in checks)
False
Source code in src/fabricops_kit/config.py
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
def run_config_smoke_tests(
    config: FrameworkConfig,
    env: str = "Sandbox",
    required_targets: list[str] | None = None,
    check_ai: bool = True,
    check_io_import: bool = False,
    notebook_name: str | None = None,
    ai_result: dict[str, Any] | None = None,
) -> list[ConfigSmokeCheckResult]:
    """Run 00_env_config readiness smoke checks for configuration bootstrap.

    Use this during environment bootstrap to verify Spark availability, Fabric
    runtime context access, required path mappings, notebook naming policy, and
    optional AI/IO import readiness before executing downstream notebook steps.

    Parameters
    ----------
    config : FrameworkConfig
        Validated framework configuration to evaluate.
    env : str, default="Sandbox"
        Environment key used when resolving required target paths.
    required_targets : list[str] | None, optional
        Required targets expected in ``config.path_config``. Defaults to
        ``["Source", "Unified"]`` when not provided.
    check_ai : bool, default=True
        Whether to run the Fabric AI availability check.
    check_io_import : bool, default=False
        Whether to test importability of ``fabric_io`` helpers.
    notebook_name : str | None, optional
        Notebook name to validate against configured naming prefixes.
    ai_result : dict[str, Any] | None, optional
        Optional precomputed AI availability payload to reuse instead of
        re-running the runtime import check.

    Returns
    -------
    list[ConfigSmokeCheckResult]
        Ordered check results with ``pass``, ``warn``, ``fail``, or ``skipped``
        statuses for each readiness dimension.

    Raises
    ------
    ValueError
        Propagated from config/path validation helpers when required targets or
        configured environments are invalid.

    Notes
    -----
    This helper performs validation and lightweight import/runtime checks only.
    It does not create or mutate Fabric resources.

    Examples
    --------
    >>> checks = run_config_smoke_tests(config=my_config, env="Sandbox", notebook_name="00_env_config")
    >>> any(c.status == "fail" for c in checks)
    False
    """
    from .runtime import validate_notebook_name

    results: list[ConfigSmokeCheckResult] = []
    required_targets = required_targets or ["Source", "Unified"]
    spark_ready, spark_message = _check_spark_session()
    results.append(ConfigSmokeCheckResult("spark_session", "pass" if spark_ready else "warn", spark_message))

    runtime_meta = _get_fabric_runtime_metadata(notebook_name=notebook_name)
    runtime_status = "pass" if runtime_meta.get("runtime_available") else "skipped"
    runtime_message = "Fabric runtime context is readable." if runtime_meta.get("runtime_available") else "notebookutils.runtime unavailable outside Fabric runtime."
    results.append(ConfigSmokeCheckResult("fabric_runtime_context", runtime_status, runtime_message))
    try:
        for target in required_targets:
            p = get_path(env=env, target=target, config=config)
            missing = [attr for attr in ("workspace_id", "house_id", "house_name", "root") if not getattr(p, attr, None)]
            if missing:
                results.append(ConfigSmokeCheckResult(f"path:{target}", "fail", f"Missing required fields: {missing}"))
            elif str(p.root).startswith("abfss://"):
                results.append(ConfigSmokeCheckResult(f"path:{target}", "pass", "Path is populated and ABFSS formatted."))
            else:
                results.append(ConfigSmokeCheckResult(f"path:{target}", "warn", "Path root is populated but not ABFSS-formatted."))
    except Exception as exc:
        results.append(ConfigSmokeCheckResult("path_resolution", "fail", str(exc)))

    if notebook_name:
        errors = validate_notebook_name(notebook_name, config=config)
        results.append(ConfigSmokeCheckResult("notebook_naming", "pass" if not errors else "fail", "; ".join(errors) or "Notebook name is valid."))
    else:
        results.append(ConfigSmokeCheckResult("notebook_naming", "skipped", "Notebook name check skipped."))

    if check_ai:
        ai_status = ai_result or check_fabric_ai_functions_available()
        results.append(ConfigSmokeCheckResult("fabric_ai", "pass" if ai_status.get("available") else "warn", ai_status.get("message", "")))
    else:
        results.append(ConfigSmokeCheckResult("fabric_ai", "skipped", "AI check disabled."))

    if check_io_import:
        try:
            from .fabric_io import lakehouse_table_read  # noqa: F401
            results.append(ConfigSmokeCheckResult("fabric_io_import", "pass", "fabric_io helpers are importable."))
        except Exception as exc:
            results.append(ConfigSmokeCheckResult("fabric_io_import", "fail", str(exc)))
    else:
        results.append(ConfigSmokeCheckResult("fabric_io_import", "skipped", "IO import check disabled."))
    return results