Skip to content

PyLogShield Logger

The main logger class that extends Python's standard logging.Logger with additional features.

Quick Reference

Python
from pylogshield import get_logger, PyLogShield

# Recommended: Use get_logger for singleton pattern
logger = get_logger("my_app", log_level="INFO", enable_json=True)

# Alternative: Direct instantiation
logger = PyLogShield("my_app", log_level="INFO")

Common Parameters

Parameter Type Default Description
name str Required Logger name
log_level str \| int INFO Logging level
enable_json bool False Output JSON format
use_queue bool False Async logging
use_rich bool False Rich console output
rate_limit_seconds float 0.0 Rate limiting interval
log_directory str \| Path ~/.logs Log file directory
log_file str {name}.log Log file name
rotate_file bool False Enable log rotation
rotate_max_bytes int 5000000 Max file size before rotation
rotate_backup_count int 5 Number of backup files
add_console bool True Add a console handler on creation
enable_metrics bool False Enable metrics tracking
enable_context_scrubber bool True Remove cloud credentials
enable_context bool False Install ContextFilter; pairs with log_context()/async_log_context()
queue_maxsize int 0 Max async queue size (0 = unbounded); only used when use_queue=True
flowchart TB
    INIT(["PyLogShield(\n  enable_json=True,\n  use_queue=True,\n  enable_context=True\n)"])

    subgraph HANDLERS ["Configured Handlers"]
        F["FileHandler / RotatingFileHandler\n(log_directory / rotate_file)"]
        C["ConsoleHandler / RichHandler\n(add_console / use_rich)"]
        J["JsonFormatter\n(enable_json=True)"]
        Q["QueueHandler → QueueListener\n(use_queue=True, queue_maxsize)"]
    end

    subgraph FILTERS ["Active Filters"]
        CF["ContextFilter\n(enable_context=True)"]
        CS["ContextScrubber\n(enable_context_scrubber=True)"]
        KF["KeywordFilter\n(log_filter=...)"]
    end

    subgraph FEATURES ["Runtime Features"]
        RL["RateLimiter\n(rate_limit_seconds > 0)"]
        MT["LogMetricsHandler\n(enable_metrics=True)"]
    end

    INIT --> HANDLERS
    INIT --> FILTERS
    INIT --> FEATURES

Examples

Basic Logging

Python
from pylogshield import get_logger

logger = get_logger("my_app")

logger.debug("Debug message")
logger.info("Info message")
logger.warning("Warning message")
logger.error("Error message")
logger.critical("Critical message")

Sensitive Data Masking

Python
logger = get_logger("secure_app")

# Enable masking with mask=True
logger.info({"user": "john", "password": "secret"}, mask=True)
# Output: {"user": "john", "password": "***"}

JSON Logging

Python
logger = get_logger("json_app", enable_json=True)

logger.info("User logged in")
# Output: {"timestamp": "2024-01-15T10:30:00+00:00", "level": "INFO", ...}

Log Rotation

Python
logger = get_logger(
    "rotating_app",
    rotate_file=True,
    rotate_max_bytes=10_000_000,  # 10 MB
    rotate_backup_count=5
)

Context Propagation

Python
from pylogshield import get_logger
from pylogshield.context import log_context

logger = get_logger("api", enable_context=True, enable_json=True)

with log_context(request_id="abc-123"):
    logger.info("Processing")
    # JSON output includes request_id field

Exception Logging with Masking

Python
try:
    connect_db(password=secret)
except Exception:
    logger.exception("DB connection failed", mask=True)
    # exception .args are masked; traceback locals are NOT redacted

Warning

mask=True does not redact traceback frame locals. See Sensitive Data Masking for details.

Replacing an Existing Logger (force=True)

Python
import logging

# Third-party code may already have a standard logger registered
logging.getLogger("shared_service")

# get_logger raises TypeError by default — use force=True to replace it
logger = get_logger("shared_service", force=True, enable_json=True)

Warning

force=True emits a UserWarning before replacing the existing logger. Any code that already holds a reference to the old logger will stop receiving records once it is replaced.

Production Setup with Async + Rotation + Context

Python
from pylogshield import get_logger, add_sensitive_fields
from pylogshield.context import log_context

# Register domain-specific sensitive fields once at startup
add_sensitive_fields(["account_number", "sort_code", "national_id"])

logger = get_logger(
    "payments",
    log_level="INFO",
    enable_json=True,           # Structured output for ELK / Datadog
    rotate_file=True,           # Rotate when file hits 50 MB
    rotate_max_bytes=50_000_000,
    rotate_backup_count=10,
    use_queue=True,             # Non-blocking: log calls return immediately
    queue_maxsize=100_000,      # Drop new messages if queue fills (not block)
    rate_limit_seconds=1.0,     # At most 1 identical message per second
    enable_metrics=True,        # Count logs by level
    enable_context=True,        # Allow log_context() injection
)

def process_payment(user_id: int, amount: float, account_number: str) -> None:
    with log_context(user_id=user_id, operation="payment"):
        logger.info(
            {"account_number": account_number, "amount": amount},
            mask=True,          # account_number → "***"
        )
        # ... business logic ...
        logger.info("Payment authorised")

# At application shutdown — flush remaining queued messages
logger.shutdown()
metrics = logger.get_metrics()
print(f"Logged {metrics['count']} records in {metrics['elapsed']:.1f}s")

API Reference

PyLogShield(name: str, *, log_level: Union[LogLevel, str, int] = logging.INFO, enable_json: bool = False, use_queue: bool = False, use_rich: bool = False, rate_limit_seconds: float = 0.0, log_directory: Union[str, Path, None] = None, log_file: Optional[str] = None, rotate_file: bool = False, rotate_max_bytes: int = 5000000, rotate_backup_count: int = 5, add_console: bool = True, enable_metrics: bool = False, log_filter: Optional[Union[logging.Filter, KeywordFilter, Iterable[str]]] = None, enable_context_scrubber: bool = True, enable_context: bool = False, queue_maxsize: int = 0)

Bases: Logger

A structured logger with redaction, rate limiting, async support, and context scrubbing.

This logger extends Python's standard logging.Logger with features commonly needed in data engineering and production environments:

  • Sensitive Data Masking: Automatically redact passwords, tokens, API keys, etc.
  • Rate Limiting: Prevent log flooding from repetitive messages.
  • JSON Formatting: Structured logging for log aggregation systems.
  • Async Logging: Non-blocking logging via queue handlers.
  • Rich Console: Colorized output for development environments.
  • Context Scrubbing: Remove cloud credentials from log records.
  • Metrics: Track log volume and rates.
PARAMETER DESCRIPTION

name

The name of the logger.

TYPE: str

log_level

The logging level. Default is logging.INFO.

TYPE: LogLevel or str or int DEFAULT: INFO

enable_json

Whether to output logs in JSON format. Default is False.

TYPE: bool DEFAULT: False

use_queue

Whether to use async logging via queue handlers. Default is False.

TYPE: bool DEFAULT: False

use_rich

Whether to use Rich library for colorized console output. Default is False.

TYPE: bool DEFAULT: False

rate_limit_seconds

Minimum seconds between identical log messages. Default is 0.0 (disabled).

TYPE: float DEFAULT: 0.0

log_directory

Directory for log files. Default is ~/.logs.

TYPE: str or Path or None DEFAULT: None

log_file

Name of the log file. Default is "{name}.log".

TYPE: str or None DEFAULT: None

rotate_file

Whether to enable log file rotation. Default is False.

TYPE: bool DEFAULT: False

rotate_max_bytes

Maximum file size before rotation. Default is 5,000,000 bytes.

TYPE: int DEFAULT: 5000000

rotate_backup_count

Number of backup files to keep. Default is 5.

TYPE: int DEFAULT: 5

add_console

Whether to add a console handler. Default is True.

TYPE: bool DEFAULT: True

enable_metrics

Whether to enable logging metrics. Default is False.

TYPE: bool DEFAULT: False

log_filter

Filter for log messages. Default is None.

TYPE: Filter or KeywordFilter or Iterable[str] or None DEFAULT: None

enable_context_scrubber

Whether to scrub cloud credentials from log records. Default is True.

TYPE: bool DEFAULT: True

enable_context

Whether to enable context propagation via :func:~pylogshield.context.log_context / :func:~pylogshield.context.async_log_context. When True a :class:~pylogshield.context.ContextFilter is added to the logger so that any fields set in the active context block are automatically attached to every log record. Default is False.

TYPE: bool DEFAULT: False

queue_maxsize

Maximum size of the async logging queue when use_queue=True. 0 means unbounded. Positive integers cap the queue size; when the queue is full, new messages are dropped (non-blocking put). Default is 0.

TYPE: int DEFAULT: 0

ATTRIBUTE DESCRIPTION
log_level

Current logging level.

TYPE: int

enable_json

Whether JSON formatting is enabled.

TYPE: bool

limiter

Rate limiter instance (if rate limiting is enabled).

TYPE: RateLimiter or None

metrics_handler

Metrics handler (if metrics are enabled).

TYPE: LogMetricsHandler or None

log_directory

Directory where log files are stored.

TYPE: Path

log_file

Name of the log file.

TYPE: str

log_file_path

Full path to the log file.

TYPE: Path

Examples:

Python Console Session
>>> from pylogshield import get_logger
>>> logger = get_logger("my_app", log_level="INFO", enable_json=True)
>>> logger.info({"user": "john", "password": "secret123"}, mask=True)
# Output: {"user": "john", "password": "***"}
METHOD DESCRIPTION
info
debug
warning
error
critical
exception

Log an error with exception info.

set_log_level

Change the logger and handler levels at runtime.

get_metrics

Return logging metrics if metrics are enabled.

context

Return a sync context manager that injects fields into all logs.

async_context

Return an async context manager that injects fields into all logs.

shutdown

Stop any background listener and clean up resources.

from_config

Create a PyLogShield instance from a dictionary configuration.

add_sensitive_fields

Add field names to the sensitive data redaction registry.

Source code in src/pylogshield/core.py
Python
def __init__(
    self,
    name: str,
    *,
    log_level: Union[LogLevel, str, int] = logging.INFO,
    enable_json: bool = False,
    use_queue: bool = False,
    use_rich: bool = False,
    rate_limit_seconds: float = 0.0,
    log_directory: Union[str, Path, None] = None,
    log_file: Optional[str] = None,
    rotate_file: bool = False,
    rotate_max_bytes: int = 5_000_000,
    rotate_backup_count: int = 5,
    add_console: bool = True,
    enable_metrics: bool = False,
    log_filter: Optional[
        Union[logging.Filter, KeywordFilter, Iterable[str]]
    ] = None,
    enable_context_scrubber: bool = True,
    enable_context: bool = False,
    queue_maxsize: int = 0,
) -> None:
    resolved_level = self._resolve_log_level(log_level)
    super().__init__(name, level=resolved_level)
    self.log_level = resolved_level

    self.enable_json = enable_json
    self.limiter = (
        RateLimiter(min_interval=rate_limit_seconds)
        if rate_limit_seconds > 0
        else None
    )
    self.metrics_handler = LogMetricsHandler() if enable_metrics else None
    self._queue_listener: Optional[QueueListener] = None
    self.log_directory = self._initialize_log_directory(log_directory)
    self.log_file = log_file or f"{name}.log"
    self.log_file_path = self.log_directory / self.log_file

    handlers: List[logging.Handler] = []

    if add_console:
        handlers.append(
            create_rich_handler(self.log_level)
            if use_rich
            else create_console_handler(self.log_level, json_format=enable_json)
        )

    if self.log_file_path:
        ensure_log_dir(str(self.log_file_path))
        if rotate_file:
            handlers.append(
                create_rotating_file_handler(
                    self.log_file_path,
                    self.log_level,
                    max_bytes=rotate_max_bytes,
                    backup_count=rotate_backup_count,
                    json_format=enable_json,
                )
            )
        else:
            handlers.append(
                create_file_handler(
                    self.log_file_path, self.log_level, json_format=enable_json
                )
            )

    if enable_metrics and self.metrics_handler is not None:
        handlers.append(self.metrics_handler)

    if log_filter is not None:
        filt = self._normalize_log_filter(log_filter)
        for h in handlers:
            h.addFilter(filt)

    if enable_context_scrubber:
        scrubber = ContextScrubber()
        for h in handlers:
            h.addFilter(scrubber)

    if enable_context:
        # Add to the logger (not individual handlers) so the filter fires
        # once per record in the calling thread/task — before the record
        # is handed off to any QueueHandler — ensuring the ContextVar is
        # read while the correct context is still active.
        self.addFilter(ContextFilter())

    if use_queue and handlers:
        q: Queue = Queue(queue_maxsize)
        self.addHandler(_SilentQueueHandler(q))
        self._queue_listener = QueueListener(
            q, *handlers, respect_handler_level=True
        )
        self._queue_listener.start()
    else:
        for h in handlers:
            self.addHandler(h)

info(msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None

Source code in src/pylogshield/core.py
Python
def info(self, msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None:
    self._log_with_processing(logging.INFO, msg, *args, mask=mask, **kwargs)

debug(msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None

Source code in src/pylogshield/core.py
Python
def debug(self, msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None:
    self._log_with_processing(logging.DEBUG, msg, *args, mask=mask, **kwargs)

warning(msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None

Source code in src/pylogshield/core.py
Python
def warning(self, msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None:
    self._log_with_processing(logging.WARNING, msg, *args, mask=mask, **kwargs)

error(msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None

Source code in src/pylogshield/core.py
Python
def error(self, msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None:
    self._log_with_processing(logging.ERROR, msg, *args, mask=mask, **kwargs)

critical(msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None

Source code in src/pylogshield/core.py
Python
def critical(self, msg: Any, *args: Any, mask: bool = False, **kwargs: Any) -> None:
    self._log_with_processing(logging.CRITICAL, msg, *args, mask=mask, **kwargs)

exception(msg: Any, *args: Any, mask: bool = False, exc_info: bool = True, **kwargs: Any) -> None

Log an error with exception info.

When mask=True, string args of the active exception are scrubbed before the record is emitted.

Note: traceback locals are not masked — use a traceback-filtering tool if local variable values must also be redacted.

Source code in src/pylogshield/core.py
Python
def exception(  # type: ignore[override]
    self,
    msg: Any,
    *args: Any,
    mask: bool = False,
    exc_info: bool = True,
    **kwargs: Any,
) -> None:
    """Log an error with exception info.

    When ``mask=True``, string args of the active exception are scrubbed
    before the record is emitted.

    Note: traceback *locals* are not masked — use a traceback-filtering
    tool if local variable values must also be redacted.
    """
    self._log_with_processing(
        logging.ERROR, msg, *args, mask=mask, exc_info=exc_info, **kwargs
    )

set_log_level(level: Union[LogLevel, str, int]) -> None

Change the logger and handler levels at runtime.

PARAMETER DESCRIPTION

level

The new logging level to set.

TYPE: LogLevel or str or int

Source code in src/pylogshield/core.py
Python
def set_log_level(self, level: Union[LogLevel, str, int]) -> None:
    """Change the logger and handler levels at runtime.

    Parameters
    ----------
    level : LogLevel or str or int
        The new logging level to set.
    """
    resolved = self._resolve_log_level(level)
    self.setLevel(resolved)
    self.log_level = resolved
    for handler in self.handlers:
        handler.setLevel(resolved)

get_metrics() -> Optional[Dict[str, Any]]

Return logging metrics if metrics are enabled.

RETURNS DESCRIPTION
dict or None

Dictionary with log counts and rates per level, total count, and elapsed time. Returns None if metrics are disabled.

Examples:

Python Console Session
>>> logger = get_logger("app", enable_metrics=True)
>>> logger.info("test")
>>> metrics = logger.get_metrics()
>>> print(f"Total logs: {metrics['count']}")
Source code in src/pylogshield/core.py
Python
def get_metrics(self) -> Optional[Dict[str, Any]]:
    """Return logging metrics if metrics are enabled.

    Returns
    -------
    dict or None
        Dictionary with log counts and rates per level, total count,
        and elapsed time. Returns None if metrics are disabled.

    Examples
    --------
    >>> logger = get_logger("app", enable_metrics=True)
    >>> logger.info("test")
    >>> metrics = logger.get_metrics()
    >>> print(f"Total logs: {metrics['count']}")
    """
    if self.metrics_handler is not None:
        return self.metrics_handler.logs_per_second()
    return None

context(**fields: Any)

Return a sync context manager that injects fields into all logs.

Shorthand for :func:~pylogshield.context.log_context. Requires the logger to have been created with enable_context=True.

PARAMETER DESCRIPTION

**fields

Key/value pairs to attach to every log record emitted inside the with block.

TYPE: Any DEFAULT: {}

Examples:

Python Console Session
>>> with logger.context(request_id="abc", user_id=42):
...     logger.info("Processing order")
Source code in src/pylogshield/core.py
Python
def context(self, **fields: Any):
    """Return a sync context manager that injects *fields* into all logs.

    Shorthand for :func:`~pylogshield.context.log_context`.  Requires the
    logger to have been created with ``enable_context=True``.

    Parameters
    ----------
    **fields : Any
        Key/value pairs to attach to every log record emitted inside the
        ``with`` block.

    Examples
    --------
    >>> with logger.context(request_id="abc", user_id=42):
    ...     logger.info("Processing order")
    """
    return log_context(**fields)

async_context(**fields: Any)

Return an async context manager that injects fields into all logs.

Shorthand for :func:~pylogshield.context.async_log_context. Requires the logger to have been created with enable_context=True.

PARAMETER DESCRIPTION

**fields

Key/value pairs to attach to every log record emitted inside the async with block.

TYPE: Any DEFAULT: {}

Examples:

Python Console Session
>>> async with logger.async_context(request_id="xyz"):
...     logger.info("Async handler")
Source code in src/pylogshield/core.py
Python
def async_context(self, **fields: Any):
    """Return an async context manager that injects *fields* into all logs.

    Shorthand for :func:`~pylogshield.context.async_log_context`.
    Requires the logger to have been created with ``enable_context=True``.

    Parameters
    ----------
    **fields : Any
        Key/value pairs to attach to every log record emitted inside the
        ``async with`` block.

    Examples
    --------
    >>> async with logger.async_context(request_id="xyz"):
    ...     logger.info("Async handler")
    """
    return async_log_context(**fields)

shutdown() -> None

Stop any background listener and clean up resources.

This should be called when the logger is no longer needed to properly stop the background queue listener thread if async logging is enabled.

Source code in src/pylogshield/core.py
Python
def shutdown(self) -> None:
    """Stop any background listener and clean up resources.

    This should be called when the logger is no longer needed to properly
    stop the background queue listener thread if async logging is enabled.
    """
    if self._queue_listener is not None:
        self._queue_listener.stop()
        self._queue_listener = None

from_config(name: str, config: Mapping[str, Any]) -> 'PyLogShield' classmethod

Create a PyLogShield instance from a dictionary configuration.

PARAMETER DESCRIPTION

name

The name of the logger.

TYPE: str

config

Configuration dictionary with optional keys: level, enable_json, use_queue, use_rich, rate_limit_seconds, log_directory, log_file, rotate_file, rotate_max_bytes, rotate_backup_count, add_console, enable_metrics, log_filter, enable_context_scrubber, enable_context, queue_maxsize.

TYPE: Mapping[str, Any]

RETURNS DESCRIPTION
PyLogShield

A new PyLogShield instance configured from the provided dictionary.

Examples:

Python Console Session
>>> config = {"level": "DEBUG", "enable_json": True, "rotate_file": True}
>>> logger = PyLogShield.from_config("my_app", config)
Source code in src/pylogshield/core.py
Python
@classmethod
def from_config(cls, name: str, config: Mapping[str, Any]) -> "PyLogShield":
    """Create a PyLogShield instance from a dictionary configuration.

    Parameters
    ----------
    name : str
        The name of the logger.
    config : Mapping[str, Any]
        Configuration dictionary with optional keys: level, enable_json,
        use_queue, use_rich, rate_limit_seconds, log_directory, log_file,
        rotate_file, rotate_max_bytes, rotate_backup_count, add_console,
        enable_metrics, log_filter, enable_context_scrubber, enable_context,
        queue_maxsize.

    Returns
    -------
    PyLogShield
        A new PyLogShield instance configured from the provided dictionary.

    Examples
    --------
    >>> config = {"level": "DEBUG", "enable_json": True, "rotate_file": True}
    >>> logger = PyLogShield.from_config("my_app", config)
    """
    # Optional log-filter construction from config
    lf = config.get("log_filter")
    lf_obj: Optional[logging.Filter] = None
    if isinstance(lf, dict) and "keywords" in lf:
        lf_obj = KeywordFilter(
            lf.get("keywords", []),
            include=bool(lf.get("include", True)),
            case_insensitive=bool(lf.get("case_insensitive", True)),
        )
    elif isinstance(lf, (list, tuple, set, frozenset)):
        lf_obj = KeywordFilter(list(lf))
    elif isinstance(lf, logging.Filter):
        lf_obj = lf

    return cls(
        name,
        log_level=LogLevel.parse(config.get("level", "INFO")),
        enable_json=bool(config.get("enable_json", False)),
        use_queue=bool(config.get("use_queue", False)),
        use_rich=bool(config.get("use_rich", False)),
        rate_limit_seconds=float(config.get("rate_limit_seconds", 0.0)),
        log_directory=config.get("log_directory"),
        log_file=config.get("log_file"),
        rotate_file=bool(config.get("rotate_file", False)),
        rotate_max_bytes=int(config.get("rotate_max_bytes", 5_000_000)),
        rotate_backup_count=int(config.get("rotate_backup_count", 5)),
        add_console=bool(config.get("add_console", True)),
        enable_metrics=bool(config.get("enable_metrics", False)),
        log_filter=lf_obj,
        enable_context_scrubber=bool(config.get("enable_context_scrubber", True)),
        enable_context=bool(config.get("enable_context", False)),
        queue_maxsize=int(config.get("queue_maxsize", 0)),
    )

add_sensitive_fields(fields: List[str]) -> None staticmethod

Add field names to the sensitive data redaction registry.

PARAMETER DESCRIPTION

fields

List of field names to add to the sensitive registry.

TYPE: list of str

Examples:

Python Console Session
>>> PyLogShield.add_sensitive_fields(["ssn", "credit_card"])
Source code in src/pylogshield/core.py
Python
@staticmethod
def add_sensitive_fields(fields: List[str]) -> None:
    """Add field names to the sensitive data redaction registry.

    Parameters
    ----------
    fields : list of str
        List of field names to add to the sensitive registry.

    Examples
    --------
    >>> PyLogShield.add_sensitive_fields(["ssn", "credit_card"])
    """
    cfg_add_sensitive_fields(fields)

get_logger Function

get_logger(name: str = 'default_logger', *, force: bool = False, **kwargs: Any) -> PyLogShield

Return a named PyLogShield instance, creating it if necessary.

This is the recommended way to obtain a PyLogShield instance. It integrates with Python's logging manager to ensure logger names are unique and reusable.

PARAMETER DESCRIPTION

name

Logger name. Default is "default_logger".

TYPE: str DEFAULT: 'default_logger'

force

If True, replace a non-PyLogShield with the same name. Default is False.

TYPE: bool DEFAULT: False

**kwargs

Additional arguments passed to the PyLogShield constructor when creating a new instance (e.g., log_level, enable_json, use_rich).

TYPE: Any DEFAULT: {}

RETURNS DESCRIPTION
PyLogShield

A PyLogShield instance with the specified name.

RAISES DESCRIPTION
TypeError

If a logger with the name exists but is not a PyLogShield and force=False.

Examples:

Python Console Session
>>> logger = get_logger("my_app", log_level="DEBUG", enable_json=True)
>>> logger.info("Application started")
Python Console Session
>>> # Get the same logger instance later
>>> same_logger = get_logger("my_app")
>>> same_logger is logger
True
Source code in src/pylogshield/__init__.py
Python
def get_logger(
    name: str = "default_logger", *, force: bool = False, **kwargs: Any
) -> PyLogShield:
    """Return a named PyLogShield instance, creating it if necessary.

    This is the recommended way to obtain a PyLogShield instance. It integrates
    with Python's logging manager to ensure logger names are unique and reusable.

    Parameters
    ----------
    name : str, optional
        Logger name. Default is "default_logger".
    force : bool, optional
        If True, replace a non-PyLogShield with the same name.
        Default is False.
    **kwargs : Any
        Additional arguments passed to the PyLogShield constructor when
        creating a new instance (e.g., log_level, enable_json, use_rich).

    Returns
    -------
    PyLogShield
        A PyLogShield instance with the specified name.

    Raises
    ------
    TypeError
        If a logger with the name exists but is not a PyLogShield
        and force=False.

    Examples
    --------
    >>> logger = get_logger("my_app", log_level="DEBUG", enable_json=True)
    >>> logger.info("Application started")

    >>> # Get the same logger instance later
    >>> same_logger = get_logger("my_app")
    >>> same_logger is logger
    True
    """
    logging._acquireLock()  # type: ignore[attr-defined]
    try:
        existing = logging.Logger.manager.loggerDict.get(name)
        if existing is not None:
            # Check if it's a PyLogShield (direct instance check is most reliable)
            if isinstance(existing, PyLogShield):
                return existing

            # Duck-typed compatibility check for subclasses or similar implementations
            if hasattr(existing, "_log_with_processing") and hasattr(existing, "_mask"):
                return existing  # type: ignore[return-value]

            if not force:
                raise TypeError(
                    f"Logger '{name}' already exists but is not a compatible PyLogShield. "
                    f"Actual type: {type(existing).__name__}. "
                    f"Use force=True to replace it."
                )

            warnings.warn(
                f"get_logger: replacing existing logger '{name}' "
                f"(type: {type(existing).__name__}) with a new PyLogShield instance. "
                f"Any references to the old logger will no longer receive log records.",
                UserWarning,
                stacklevel=3,
            )
            logging.Logger.manager.loggerDict.pop(name, None)

        logger = PyLogShield(name=name, **kwargs)
        logging.Logger.manager.loggerDict[name] = logger
    finally:
        logging._releaseLock()  # type: ignore[attr-defined]
    return logger