1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174
|
#!/usr/bin/env python3
import numpy as np
import sys
import os
import argparse
from pathlib import Path
def calculate_nmse(reference, test):
mse = np.mean((test - reference) ** 2)
ref_var = np.var(reference)
if ref_var == 0:
nmse = float('inf') if mse > 0 else 0.0
return mse, mse, ref_var
nmse = mse / ref_var
return nmse, mse, ref_var
def load_logits(file_path):
if not os.path.exists(file_path):
raise FileNotFoundError(f"File not found: {file_path}")
if file_path.suffix == '.npy':
return np.load(file_path)
elif file_path.suffix == '.bin':
return np.fromfile(file_path, dtype=np.float32)
else:
# Try to load as text file
try:
# If it has index format "0: value", extract just values
data = []
with open(file_path, 'r') as f:
for line in f:
if ':' in line:
# Format: "index: value"
value = float(line.split(':')[1].strip())
else:
# Just the value
value = float(line.strip())
data.append(value)
return np.array(data, dtype=np.float32)
except:
return np.loadtxt(file_path, dtype=np.float32)
def interpret_nmse(nmse):
"""Provide interpretation of NMSE value"""
if nmse == 0:
return "Perfect match", "š"
elif nmse < 1e-6:
return "Essentially identical", "ā
"
elif nmse < 1e-4:
return "Excellent match", "ā
"
elif nmse < 1e-3:
return "Very good match", "š"
elif nmse < 1e-2:
return "Good match", "š"
elif nmse < 0.1:
return "Acceptable match", "ā ļø"
elif nmse < 1.0:
return "Poor match", "ā"
else:
return "Very poor match (worse than noise)", "ā"
def main():
parser = argparse.ArgumentParser(description='Validate model logits')
parser.add_argument('-m', '--model-path', required=True, help='Path to the model directory')
args = parser.parse_args()
model_name = os.path.basename(args.model_path)
data_dir = Path("data")
pytorch_file = data_dir / f"pytorch-{model_name}.bin"
llamacpp_file = data_dir / f"llamacpp-{model_name}.bin"
print(f"Model name: {model_name}")
print(f"PyTorch logits file: {pytorch_file}")
print(f"llama.cpp logits file: {llamacpp_file}")
reference_file = pytorch_file
test_file = llamacpp_file
print("š NMSE Check for Model Comparison")
print("=" * 50)
print(f"Reference (ground truth): {reference_file}")
print(f"Test (to evaluate): {test_file}")
print()
try:
print("Loading reference logits...")
reference = load_logits(reference_file)
print(f" Shape: {reference.shape}, Type: {reference.dtype}")
print("Loading test logits...")
test = load_logits(test_file)
print(f" Shape: {test.shape}, Type: {test.dtype}")
# Check shapes match
if reference.shape != test.shape:
print(f"\nā Error: Shape mismatch!")
print(f" Reference: {reference.shape}")
print(f" Test: {test.shape}")
sys.exit(1)
print(f"\nā
Shapes match: {reference.shape}")
nmse, mse, ref_var = calculate_nmse(reference, test)
# Additional metrics
max_abs_error = np.max(np.abs(test - reference))
mean_abs_error = np.mean(np.abs(test - reference))
# Results
print(f"\nš METRICS")
print("=" * 30)
print(f"MSE (Mean Squared Error): {mse:.6e}")
print(f"Reference Variance: {ref_var:.6e}")
print(f"NMSE: {nmse:.6e}")
print(f"Max Absolute Error: {max_abs_error:.6f}")
print(f"Mean Absolute Error: {mean_abs_error:.6f}")
# NMSE in dB (common in signal processing)
if nmse > 0:
nmse_db = 10 * np.log10(nmse)
print(f"NMSE (dB): {nmse_db:.2f} dB")
# Interpretation
interpretation, emoji = interpret_nmse(nmse)
print(f"\nšÆ INTERPRETATION")
print("=" * 30)
print(f"{emoji} {interpretation}")
# Detailed guidance
print(f"\nš GUIDANCE")
print("=" * 30)
if nmse < 1e-3:
print("ā
EXCELLENT: Your GGML conversion is working very well!")
print(" The differences are negligible for practical use.")
elif nmse < 1e-2:
print("š GOOD: Your GGML conversion is working well.")
print(" Small differences are likely due to precision/quantization.")
elif nmse < 0.1:
print("ā ļø ACCEPTABLE: Conversion is working but with some differences.")
print(" Check if you're using quantization (Q4, Q8, etc.)")
print(" Test generation quality to see if it's acceptable.")
else:
print("ā PROBLEMATIC: Large differences detected.")
print(" Check your conversion process for potential issues.")
print(" Verify you're using the same model weights.")
# NMSE benchmarks
print(f"\nš NMSE BENCHMARKS")
print("=" * 30)
print("< 1e-6: Essentially identical")
print("< 1e-4: Excellent (typical for good conversions)")
print("< 1e-3: Very good")
print("< 1e-2: Good (acceptable for most use cases)")
print("< 0.1: Acceptable (may need verification)")
print("> 1.0: Poor (worse than random)")
# Exit code based on NMSE
if nmse < 1e-2:
print(f"\nā
RESULT: PASS (NMSE = {nmse:.2e})")
sys.exit(0)
else:
print(f"\nā RESULT: NEEDS REVIEW (NMSE = {nmse:.2e})")
sys.exit(1)
except Exception as e:
print(f"ā Error: {e}")
sys.exit(1)
if __name__ == "__main__":
main()
|