-

@ a7a57e6e:a2e75f5f
2025-02-26 20:47:35
here is pytorch implementation to train correct input values from output values of sha256 hashes
//
import torch
import torch.nn as nn
import torch.optim as optim
import numpy as np
import argparse
import hashlib
import random
import os
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
self.layer1 = nn.Linear(256, 256)
self.layer2 = nn.Linear(256, 256)
self.output_layer = nn.Linear(256, 256)
def forward(self, x):
x = torch.relu(self.layer1(x))
x = torch.relu(self.layer2(x))
x = self.output_layer(x)
return x
def train_model(model, X_train, y_train, epochs=10, batch_size=32, save_path='model_weights.pth'):
criterion = nn.MSELoss()
optimizer = optim.Adam(model.parameters())
X_train_tensor = torch.tensor(X_train, dtype=torch.float32)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32)
for epoch in range(epochs):
permutation = torch.randperm(X_train_tensor.size()[0])
for i in range(0, X_train_tensor.size()[0], batch_size):
indices = permutation[i:i + batch_size]
batch_x, batch_y = X_train_tensor[indices], y_train_tensor[indices]
optimizer.zero_grad()
outputs = model(batch_x)
loss = criterion(outputs, batch_y)
loss.backward()
optimizer.step()
torch.save(model.state_dict(), save_path)
print(f"Model weights saved to {save_path}")
def train(args):
random_numbers = [random.randint(0, 2**256) for _ in range(100000)]
random_numbers_bitlists = []
hashes = []
inputs = []
try:
os.remove("trainingdata")
except OSError:
pass
with open("trainingdata", "a") as samplesfile:
for number in random_numbers:
number_bit_list = [int((number >> i) & 1) for i in range(256)]
random_numbers_bitlists.append(number_bit_list)
number_bytes = number.to_bytes(32, byteorder='big')
digest = hashlib.sha256(number_bytes).digest()
hashint = int.from_bytes(digest, byteorder='big')
bit_list = [int((hashint >> i) & 1) for i in range(256)]
samplesfile.write(f"{hex(hashint)[2:]} {hex(number)[2:]}\n")
inputs.append(bit_list)
X_train = np.array(inputs, dtype=np.uint8)
y_train = np.array(random_numbers_bitlists, dtype=np.uint8)
model = NeuralNetwork()
train_model(model, X_train, y_train, epochs=args.num_epochs)
def process_infer_input(input_string):
if len(input_string) == 64:
binary_values = []
for i in range(0, 64, 2):
hex_pair = input_string[i:i+2]
decimal_value = int(hex_pair, 16)
binary_representation = format(decimal_value, '08b')
binary_values.extend([int(char) for char in binary_representation])
return np.array(binary_values).reshape(1, -1)
else:
binary_values = [int(char) for char in input_string]
return np.array(binary_values).reshape(1, -1)
def infer(args):
X_test = process_infer_input(args.input)
X_test_tensor = torch.tensor(X_test, dtype=torch.float32)
model = NeuralNetwork()
model.load_state_dict(torch.load('model_weights.pth'))
model.eval()
with torch.no_grad():
predictions = model(X_test_tensor)
binstr = ''.join(map(str, np.round(predictions.numpy()).flatten().astype(int)))
print(f"bin: {binstr}")
print(f"hex: {hex(int(binstr, 2))[2:]}")
def main():
parser = argparse.ArgumentParser(description='Train or infer with a neural network.')
subparsers = parser.add_subparsers(dest='command')
train_parser = subparsers.add_parser('train', help='Train the model')
train_parser.add_argument('num_epochs', type=int, nargs='?', default=1, help='Number of epochs to train')
train_parser.set_defaults(func=train)
infer_parser = subparsers.add_parser('infer', help='Infer with the model')
infer_parser.add_argument('input', type=str, help='Path to input data for inference')
infer_parser.set_defaults(func=infer)
args = parser.parse_args()
if 'func' in args:
args.func(args)
else:
parser.print_help()
if __name__ == "__main__":
main()