Skip to content

Releases: spockoo/QuantumBinaryMiner

z15v2.py

30 Aug 14:34
247a2c8
Compare
Choose a tag to compare

import numpy as np
import cirq
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.backend import clear_session
import keras_tuner as kt
import os
import time
import pickle
import gc
import random
import string
from matplotlib import pyplot as plt
from matplotlib.animation import FuncAnimation
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.model_selection import train_test_split
from subprocess import Popen

Configuration des chemins

MINER_PATH = "C:/TOYOURFOLDER/NBminer_Win"
MINER_EXECUTABLE = "nbminer.exe"
POOL_URL = "POOL"
USER = "USER"
PASSWORD = "x"

INTERCEPT_CONSTANT = 0.60
BATCH_SIZE = 10 # Taille des lots pour écrire sur disque

MODEL_FILE_PATH = "model.keras"
CIRCUIT_FILE_PATH = "quantum_circuit.pkl"

Fréquence initiale pour la simulation quantique

quantum_circuit_repetitions = 10
initial_qubits = 2

def generate_unique_filename(prefix="file", extension=".log"):
timestamp = time.strftime("%Y%m%d_%H%M%S")
random_id = ''.join(random.choices(string.ascii_letters + string.digits, k=6))
filename = f"{prefix}{timestamp}{random_id}{extension}"
return os.path.join(os.getcwd(), filename)

def log_data(data, log_file_path):
try:
with open(log_file_path, "a") as log_file:
log_file.write(data + "\n")
except Exception as e:
print(f"Erreur lors de l'enregistrement des données: {e}")

def save_file(obj, file_path, log_file_path, mode='wb'):
try:
with open(file_path, mode) as file:
pickle.dump(obj, file)
log_data(f"Fichier sauvegardé à {file_path}", log_file_path)
except Exception as e:
log_data(f"Erreur lors de la sauvegarde du fichier: {e}", log_file_path)

def load_file(file_path, log_file_path, mode='rb'):
try:
if os.path.isfile(file_path):
with open(file_path, mode) as file:
obj = pickle.load(file)
log_data(f"Fichier chargé depuis {file_path}", log_file_path)
return obj
else:
log_data(f"Le fichier n'existe pas à {file_path}", log_file_path)
return None
except Exception as e:
log_data(f"Erreur lors du chargement du fichier: {e}", log_file_path)
return None
def normal_distribution(mu, sigma, size=1):
return np.random.normal(mu, sigma, size)

Exemple d'utilisation

print(normal_distribution(0, 1, size=5))
def triangular_distribution(left, mode, right, size=1):
return np.random.triangular(left, mode, right, size)

Exemple d'utilisation

print(triangular_distribution(0, 0.5, 1, size=5))
def uniform_distribution(a, b, size=1):
return np.random.uniform(a, b, size)

Exemple d'utilisation

print(uniform_distribution(0, 1, size=5))
def gamma_distribution(alpha, beta, size=1):
return np.random.gamma(alpha, beta, size)

Exemple d'utilisation

print(gamma_distribution(2, 2, size=5))
def exponential_distribution(lam, size=1):
return np.random.exponential(1/lam, size)

Exemple d'utilisation

print(exponential_distribution(1, size=5))
def lognormal_distribution(mu, sigma, size=1):
return np.random.lognormal(mu, sigma, size)

Exemple d'utilisation

print(lognormal_distribution(0, 1, size=5))

def beta_distribution(alpha, beta, size=1):
return np.random.beta(alpha, beta, size)

Exemple d'utilisation

print(beta_distribution(2, 5, size=5))
def pareto_distribution(alpha, xm, size=1):
return np.random.pareto(alpha, size) + xm

Exemple d'utilisation

print(pareto_distribution(2.62, 1, size=5))
def weibull_distribution(alpha, beta, size=1):
return np.random.weibull(alpha, size) * beta

Exemple d'utilisation

print(weibull_distribution(1.5, 1, size=5))

def circular_uniform_distribution(size=1):
return np.random.uniform(0, 2 * np.pi, size)

Exemple d'utilisation

print(circular_uniform_distribution(size=5))
from scipy.stats import vonmises
def von_mises_distribution(mu, kappa, size=1):
return vonmises.rvs(kappa, loc=mu, size=size)
import numpy as np
from scipy.stats import norm, uniform, triang, lognorm, expon, gamma, beta, pareto, weibull_min, vonmises

def decrypt_distribution(distribution_name, values, **params):
"""
Décrypte les valeurs en utilisant les paramètres de distribution spécifiés.

Parameters:
- distribution_name (str): Nom de la distribution (e.g., 'normal', 'uniform', 'triangular', etc.)
- values (array-like): Les valeurs à décrypter (probabilités ou quantiles)
- **params: Les paramètres spécifiques à chaque distribution.

Returns:
- array: Les valeurs décryptées.
"""
if distribution_name == 'normal':
    mu = params.get('mu', 0)
    sigma = params.get('sigma', 1)
    return norm.ppf(values, loc=mu, scale=sigma)

elif distribution_name == 'uniform':
    a = params.get('a', 0)
    b = params.get('b', 1)
    return uniform.ppf(values, loc=a, scale=b - a)

elif distribution_name == 'triangular':
    left = params.get('left', 0)
    mode = params.get('mode', 0.5)
    right = params.get('right', 1)
    return triang.ppf(values, c=(mode - left) / (right - left), loc=left, scale=right - left)

elif distribution_name == 'lognormal':
    mu = params.get('mu', 0)
    sigma = params.get('sigma', 1)
    return lognorm.ppf(values, s=sigma, scale=np.exp(mu))

elif distribution_name == 'exponential':
    lam = params.get('lam', 1)
    return expon.ppf(values, scale=1/lam)

elif distribution_name == 'gamma':
    alpha = params.get('alpha', 1)
    beta = params.get('beta', 1)
    return gamma.ppf(values, a=alpha, scale=beta)

elif distribution_name == 'beta':
    alpha = params.get('alpha', 1)
    beta = params.get('beta', 1)
    return beta.ppf(values, a=alpha, b=beta)

elif distribution_name == 'pareto':
    alpha = params.get('alpha', 1)
    xm = params.get('xm', 1)
    return pareto.ppf(values, alpha) + xm

elif distribution_name == 'weibull':
    alpha = params.get('alpha', 1)
    beta = params.get('beta', 1)
    return weibull_min.ppf(values, alpha) * beta

elif distribution_name == 'circular_uniform':
    return values  # Les valeurs sont déjà sur le cercle [0, 2*pi]

elif distribution_name == 'von_mises':
    mu = params.get('mu', 0)
    kappa = params.get('kappa', 1)
    return vonmises.ppf(values, kappa, loc=mu)

else:
    raise ValueError("Distribution non supportée")

Exemple d'utilisation

distribution_name = 'normal'
params = {'mu': 0, 'sigma': 1}
values = np.array([0.1, 0.5, 0.9])
print(decrypt_distribution(distribution_name, values, **params))

Exemple d'utilisation

print(von_mises_distribution(mu=0, kappa=1, size=5)) # Génère 5 échantillons suivant une distribution de Von Mises

def build_model(hp, input_shape):
model = Sequential([
Dense(hp.Int('units', min_value=64, max_value=128, step=32), activation='relu', input_shape=(input_shape,)),
Dense(32, activation='relu'),
Dense(1)
])
model.compile(optimizer='adam', loss='mean_squared_error', metrics=['mae'])
return model

def hyperparameter_tuning(X, y):
tuner = kt.Hyperband(
lambda hp: build_model(hp, X.shape[1]),
objective='val_loss',
max_epochs=5,
directory='tuner',
project_name='hyperparameter_tuning'
)

X_train, X_val, y_train, y_val = train_test_split(X, y, test_size=0.2, random_state=42)
tuner.search(X_train, y_train, epochs=5, validation_data=(X_val, y_val))

best_model = tuner.get_best_models(num_models=1)[0]
best_params = tuner.get_best_hyperparameters(num_trials=1)[0].values

return best_model, best_params

def retry_on_failure(func, max_attempts=3, *args, **kwargs):
attempts = 0
while attempts < max_attempts:
try:
return func(*args, **kwargs)
except Exception as e:
attempts += 1
log_data(f"Tentative {attempts}/{max_attempts} échouée pour {func.name}: {e}", kwargs.get('log_file_path', ''))
if attempts == max_attempts:
log_data(f"Échec permanent de {func.name} après {max_attempts} tentatives.", kwargs.get('log_file_path', ''))
raise e
time.sleep(1) # Réduire l'attente avant de réessayer

def train_tf_model(X, y, log_file_path, existing_model=None):
if existing_model:
return fine_tune_model(existing_model, X, y, log_file_path)
model, params = hyperparameter_tuning(X, y)
return fine_tune_model(model, X, y, log_file_path)

def fine_tune_model(model, X_train, y_train, log_file_path):
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
try:
start_time = time.time()
early_stopping = EarlyStopping(monitor='val_loss', patience=2)
history = model.fit(X_train, y_train, epochs=20, batch_size=16, validation_data=(X_val, y_val), verbose=0, callbacks=[early_stopping])
y_pred = model.predict(X_val)
rmse = np.sqrt(np.mean((y_val - y_pred.flatten()) ** 2))
r2 = r2_score(y_val, y_pred)
log_data(f"Temps de fine-tuning pour cette itération: {time.time() - start_time:.2f} secondes", log_file_path)
clear_session() # Libère la mémoire utilisée par les graphes de TensorFlow
gc.collect() # Appelle le garbage collector manuellement
return model, rmse, r2
except Exception as e:
log_data(f"Erreur pe...

Read more