使用 kivy 动态显示控制台输出

问题描述 投票:0回答:1

我正在创建一个接口来训练人工神经网络(代理模型),并且在 Training_Screen 类中,我想在训练模型 = create_classes_instance.train_model(a,b,c,d,e,f,g) 时打印控制台输出被调用...因此它将显示相对于 print(f"Epoch: {epoch} | Training Loss: {train_loss} | Validation Loss: {val_loss_epoch} | Test Loss: {test_loss_epoch}")... 的文本按照我编写代码的方式,显示了文本,但是,例如,它没有在控制台中显示为打印。为了说明我的观点,当我打印时,for循环中的消息会逐渐显示(随着计数器递增),但是对于代码,首先代码等待 create_classes_instance.train_model(a,b,c,d,e, f,g) 运行后,将所有内容一起显示...

`类 print_time(): def print_train_time(self,start:float, end:float, device: torch.device = None): 总时间 = 结束 - 开始 print(f"训练时间:{total_time:.3f}秒") 返回总时间

类prepare_data():

def data(self, excel_workspace):
    self.Data = pd.read_excel(excel_workspace)
    df = pd.DataFrame(self.Data)
    self.df = df.sample(frac=1, random_state=42).reset_index(drop=True)

def variables(self,a,number_of_input_variables,number_of_output_variables):
    prepare_data_instance = prepare_data()
    prepare_data_instance.data(a)
    df = prepare_data_instance.df
    #df = prepare_data().data(a)
    self.input_variable = []
    self.output_variable = []
    self.output_variable_normalized = []
    
    for i in range(number_of_input_variables):
        self.input_variable.append(df.loc[:,df.columns[i]].values)
    for i in range(number_of_output_variables):
        self.output_variable.append(df.loc[:,df.columns[i+number_of_input_variables]].values)
    for i in range(number_of_output_variables):
        self.normalized_output = (self.output_variable[i] - np.min(self.output_variable[i])) / (np.max(self.output_variable[i]) - np.min(self.output_variable[i]))
        self.output_variable_normalized.append(self.normalized_output)
        
    self.X = torch.tensor(np.column_stack(tuple(self.input_variable)))
    self.Y = torch.tensor(np.column_stack(tuple(self.output_variable_normalized)))
    self.Ynn = torch.tensor(np.column_stack(tuple(self.output_variable)))
    
def split_data(self,a,b,c,per_train,per_val,per_test):
    prepare_data_instance = prepare_data()
    prepare_data_instance.variables(a,b,c)

    self.X = prepare_data_instance.X
    self.Y = prepare_data_instance.Y
    self.Ynn = prepare_data_instance.Ynn

    train_no = int(len(self.X)*per_train)
    val_no = int(len(self.X)*per_val)
    test_no = int(len(self.X)*per_test)

    if len(self.X) != train_no + val_no + test_no:
        test_no = int(len(self.X)*per_test)+1

    first_split = train_no
    second_split = train_no + val_no
    third_split = second_split + test_no

    self.X_train = self.X[0:first_split].to(torch.float32)
    self.X_val = self.X[first_split:second_split].to(torch.float32)
    self.X_test = self.X[second_split:third_split].to(torch.float32)

    self.Y_train = []
    self.Y_val = []
    self.Y_test = []

    for i in range(c):
        self.Y_train.append(self.Y[0:first_split,i].to(torch.float32))
        self.Y_val.append(self.Y[first_split:second_split,i].to(torch.float32))
        self.Y_test.append(self.Y[second_split:third_split,i].to(torch.float32))
    
def batches_func(self,a,b,c,d,e,f,layer_specs_list):

    prepare_data_instance = prepare_data()
    prepare_data_instance.split_data(a,b,c,d,e,f)
    X_train = prepare_data_instance.X_train         #split_data[0]
    X_val = prepare_data_instance.X_val             #split_data[1]
    X_test = prepare_data_instance.X_test           #split_data[2]
    Y_train = prepare_data_instance.Y_train         #split_data[3]
    Y_val = prepare_data_instance.Y_val             #split_data[4]
    Y_test = prepare_data_instance.Y_test           #split_data[5]


    batch_no = []
    for layer_spec in layer_specs_list:
        for layer in layer_spec:
            if 'batch_no' in layer:
                batch_no.append(layer['batch_no'])
                
    batch_size = []
    for i in range(c):
        batch_size.append(int(len(X_train))/batch_no[i])

    #data_X_train = X_train
    #data_X_val   = X_val
    #data_X_test  = X_test

    data_Y_train    =   []
    data_Y_val      =   []
    data_Y_test     =   []

    for i in range(c):
        data_Y_train.append(Y_train[i])
        data_Y_val.append(Y_val[i])
        data_Y_test.append(Y_test[i])
        #data_Y_train[0] -> 1st output variable
        #data_Y_train[1] -> 2nd output variable
        #data_Y_train[2] -> 3rd output variable

    batches_X_train = []
    self.batches_X_val = []
    self.batches_X_test = []

    batches_Y_train = []
    self.batches_Y_val = []
    self.batches_Y_test = []

    for i in range(c):
        for j in range(batch_no[i]):
            batches_X_train.append(X_train[int(batch_size[i])*j:int(batch_size[i])*(j+1)])
            batches_Y_train.append(data_Y_train[i][int(batch_size[i])*j:int(batch_size[i])*(j+1)].unsqueeze(1))
        for j in range(1):
            '''
            batches_X_val.append(data_X_val[len(data_X_val)*j:len(data_X_val)*(j+1)])
            batches_X_test.append(data_X_test[len(data_X_test)*j:len(data_X_test)*(j+1)])
            batches_Y_val.append(data_Y_val[len(data_Y_val)*j:len(data_Y_val)*(j+1)])
            batches_Y_test.append(data_Y_test[len(data_Y_test)*j:len(data_Y_test)*(j+1)])
            '''
            self.batches_X_val.append(X_val[len(X_val)*j:len(X_val)*(j+1)])
            self.batches_X_test.append(X_test[len(X_test)*j:len(X_test)*(j+1)])
            self.batches_Y_val.append(data_Y_val[i][len(X_val)*j:len(X_val*(j+1))].unsqueeze(1))
            self.batches_Y_test.append(data_Y_test[i][len(X_test)*j:len(X_test*(j+1))].unsqueeze(1))

    self.batches_X_train_sep = []
    self.batches_Y_train_sep = []

    intervals = [0] + [sum(batch_no[:i+1]) for i in range(len(batch_no))]
    
    # Separate the list of tensors into groups based on intervals
    for i in range(len(intervals) - 1):
        start_index = intervals[i]
        end_index = intervals[i + 1]
        self.batches_X_train_sep.append(batches_X_train[start_index:end_index])
        self.batches_Y_train_sep.append(batches_Y_train[start_index:end_index])
    #batches_X_train_sep[0] -> model 1
    #batches_Y_train_sep[0] -> model 1
    #batches_X_train_sep[1] -> model 2
    #batches_Y_train_sep[1] -> model 2
    #batches_X_train_sep[2] -> model 3
    #batches_Y_train_sep[2] -> model 3

    #batches_X_val[0] -> model 1
    #batches_X_val[1] -> model 2
    #batches_X_val[2] -> model 3
    #batches_X_test[0] -> model 1
    #batches_X_test[1] -> model 2
    #batches_X_test[2] -> model 3

    #batches_Y_val[0] -> model 1
    #batches_Y_val[1] -> model 2
    #batches_Y_val[2] -> model 3
    #batches_Y_test[0] -> model 1
    #batches_Y_test[1] -> model 2
    #batches_Y_test[2] -> model 3

类创建_类():

def create_init(self,layers, loss_function_class, optimizer, learning_rate):
    def _init_layers(self):
        super(self.__class__, self).__init__()
        for name, layer in layers.items():
            if isinstance(layer, nn.Module):
                self.add_module(name, layer)
        self.loss_function = loss_function_class()
        self.optimizer = optimizer(self.parameters(), lr=learning_rate)
    return _init_layers

def create_forward(self,layers):
    def _forward(self, x):
        for name in layers.keys():
            x = getattr(self, name)(x)
        return x
    return _forward

def create_model_classes(self,layer_specs_list):
    
    create_classes_instance = create_classes()

    self.classes = {}
    self.instances = []
    self.losses = []
    self.optimizers = []

    torch.manual_seed(42)

    for i, layer_specs in enumerate(layer_specs_list):
        #print(f"i = {i}")
        #print(f"layer_specs = {layer_specs}")
        class_name = f'Model{i+1}'
        #print(class_name)
        num_layers = len(layer_specs)-1
        #print(f"num_layers: {num_layers}")

        layers = {}
        for j in range(num_layers):
            #print(f"j = {j}")
            layer_info = layer_specs[j]
            #print(f"layer_info = {layer_info}")
            if j==0:
                in_features = layer_info['in_features']
            else:
                in_features = layer_specs[j-1].get('out_features',0)
            #print(f"in_features = {in_features}")
            out_features = layer_info.get('out_features',1)
            #print(f"out_features = {out_features}")
            layers[f'layer{j + 1}'] = nn.Linear(in_features=in_features, out_features=out_features)
            activation_func = layer_info.get('activation', nn.ReLU)
            layers[f'activation_function{j + 1}'] = activation_func()

        loss_function_class = layer_specs[num_layers].get('loss_function', nn.L1Loss)  # Default: Mean Absolute Error (MAE) loss
        optimizer = layer_specs[num_layers].get('optimizer', optim.Adam)  # Default: Adam optimizer
        learning_rate = layer_specs[num_layers].get('learning_rate', 0.001)  # Default learning rate

        class_dict = {
            '__init__': create_classes_instance.create_init(layers,loss_function_class,optimizer,learning_rate),
            'forward': create_classes_instance.create_forward(layers)
        }
        model_class = type(class_name, (nn.Module,), class_dict)
        self.classes[class_name] = model_class
        model_instance = model_class()
        self.instances.append(model_instance)

        #Setting up the loss function, optimizer and learning rate
        loss_function = loss_function_class()
        self.losses.append(loss_function)
        optimizer_instance = optimizer(model_instance.parameters(), lr=learning_rate)
        self.optimizers.append(optimizer_instance)
    
    #return classes, instances, losses, self.optimizers

def train_model(self,a,b,c,d,e,f,g):
    
    prepare_data_instance = prepare_data()
    prepare_data_instance.batches_func(a,b,c,d,e,f,g)

    create_classes_instance = create_classes()
    create_classes_instance.create_model_classes(g)

    epoch_count = []
    train_loss_values = []
    test_loss_values = []
    val_loss_values = []

    train_values = []
    test_values = []
    val_values = []

    #epochs = []
    #epochs.append(151)
    #epochs.append(151)
    #epochs.append(151)

    epochs = []

    for sublist in g:
        last_dict = sublist[-1]  # Get the last dictionary in the sublist
        epochs.append(last_dict['epochs'])  # Append the value associated with the key 'epochs'
    
    plot_epoch = 10

    #model_classes = create_model_classes(g)
    #batches = batches_func(a,b,c,d,e,f,g)
    
    trained_models = []

    for i in range(c):
        print(f"Model{i}")
        torch.manual_seed(42)
        model = create_classes_instance.instances[i]
        #model = model_classes[1][i]
        loss_fn_type = create_classes_instance.losses[i]
        #loss_fn_type = model_classes[2][i]
        optimizer_type = create_classes_instance.optimizers[i]
        #optimizer_type = model_classes[3][i]
        X_train_batches = prepare_data_instance.batches_X_train_sep[i]
        #X_train_batches = batches[0][i]
        Y_train_batches = prepare_data_instance.batches_Y_train_sep[i]
        #Y_train_batches = batches[1][i]
        X_val_batches = prepare_data_instance.batches_X_val[i]
        #X_val_batches = batches[2][i]
        Y_val_batches = prepare_data_instance.batches_Y_val[i]
        #Y_val_batches = batches[3][i]
        X_test_batches = prepare_data_instance.batches_X_test[i]
        #X_test_batches = batches[4][i]
        Y_test_batches = prepare_data_instance.batches_Y_test[i]
        #Y_test_batches = batches[5][i]
        
        #start_time = timer()
        for epoch in range(epochs[i]):
            #Set the model i to training mode
            model.train()   
            #1. Forward pass
            for j in range(len(X_train_batches)):
                y_pred = model(X_train_batches[j])
                
                #2. Calculate the loss
                train_loss = loss_fn_type(y_pred,Y_train_batches[j])

                #3. Optimizer zero grad
                optimizer_type.zero_grad()

                #4. Perform backpropagation
                train_loss.backward()

                #5. Step the optimizer
                optimizer_type.step()
        
            with torch.no_grad():
                for j in range(len(X_train_batches)):
                    y_pred = model(X_train_batches[j])
                    train_values.append(y_pred)

                test_pred =  model(X_test_batches)
                test_values.append(test_pred)
                test_loss_epoch = loss_fn_type(test_pred,Y_test_batches)

                val_pred = model(X_val_batches)
                val_values.append(val_pred)
                val_loss_epoch = loss_fn_type(val_pred,Y_val_batches)

            if epoch %(plot_epoch) == 0:
                epoch_count.append(epoch)
                train_loss_values.append(train_loss)
                test_loss_values.append(test_loss_epoch)
                val_loss_values.append(val_loss_epoch)

                print(f"Epoch: {epoch} | Training Loss: {train_loss} | Validation Loss: {val_loss_epoch} | Test Loss: {test_loss_epoch}")
                


        #end_time = timer()
        #print_time_instance = print_time()
        #x = print_time_instance.print_train_time(start_time,end_time,"cpu")
        #print(f"Training Time - Model{i} - {x}")
        trained_models.append(model)
        
    #print(trained_models)
    return trained_models

#------------------------------------------------ -------------------------------------------------- ----------------------------#

类Training_Screen(屏幕):

def train_models(self):

    self.num_models = int(self.manager.get_screen('Especificar_Dados').ids.number_of_output_variables.text)
    print(f"self.num_models = {self.num_models}")

    Dynamic_Screen_Screen = self.manager.get_screen('DynamicScreen')

    self.lr_list = Dynamic_Screen_Screen.ids.lr_list_kv.text
    self.lr_list = ast.literal_eval(self.lr_list)
    print(f"self.lr_list = {self.lr_list}")
    #print("lr_list stored")

    self.epochs_list = Dynamic_Screen_Screen.ids.epochs_list_kv.text
    self.epochs_list = ast.literal_eval(self.epochs_list)
    print(f"self.epochs_list = {self.epochs_list}")
    #print("epochs_list stored")

    self.nl_list = Dynamic_Screen_Screen.ids.nl_list_kv.text
    self.nl_list = ast.literal_eval(self.nl_list)
    print(f"self.nl_list = {self.nl_list}")
    #print("nl_list stored")

    self.loss_fn_list = Dynamic_Screen_Screen.ids.loss_fn_list_kv.text
    self.loss_fn_list = ast.literal_eval(self.loss_fn_list)
    print(f"self.loss_fn_list = {self.loss_fn_list}")
    #print("loss_fn_list stored")

    self.opt_list = Dynamic_Screen_Screen.ids.opt_list_kv.text
    self.opt_list = ast.literal_eval(self.opt_list)
    print(f"self.opt_list = {self.opt_list}")
    #print("opt_list stored")

    Dynamic_Screen4_Screen = self.manager.get_screen('DynamicScreen4')

    self.nn_list = Dynamic_Screen4_Screen.ids.nn_list_kv.text
    self.nn_list = ast.literal_eval(self.nn_list)
    print(f"self.nn_list = {self.nn_list}")
    #print("nn_list stored")

    self.act_fn_list = Dynamic_Screen4_Screen.ids.act_fn_list_kv.text
    self.act_fn_list = ast.literal_eval(self.act_fn_list)
    print(f"self.act_fn_list = {self.act_fn_list}")
    #print("act_fn_list stored")

    
    if self.num_models > 4:

        Dynamic_Screen2_Screen = self.manager.get_screen('DynamicScreen2')

        self.lr_list2 = Dynamic_Screen2_Screen.ids.lr_list_kv2.text
        self.lr_list2 = ast.literal_eval(self.lr_list2)
        #print("lr_list2 stored")

        self.epochs_list2 = Dynamic_Screen2_Screen.ids.epochs_list_kv2.text
        self.epochs_list2 = ast.literal_eval(self.epochs_list2)
        #print("epochs_list2 stored")

        self.nl_list2 = Dynamic_Screen2_Screen.ids.nl_list_kv2.text
        self.nl_list2 = ast.literal_eval(self.nl_list2)
        #print("nl_list2 stored")

        self.loss_fn_list2 = Dynamic_Screen2_Screen.ids.loss_fn_list_kv2.text
        self.loss_fn_list2 = ast.literal_eval(self.loss_fn_list2)
        #print("loss_fn_list2 stored")

        self.opt_list2 = Dynamic_Screen2_Screen.ids.opt_list_kv2.text
        self.opt_list2 = ast.literal_eval(self.opt_list2)
        #print("opt_list2 stored")

        Dynamic_Screen5_Screen = self.manager.get_screen('DynamicScreen5')

        self.nn_list2 = Dynamic_Screen5_Screen.ids.nn_list_kv5.text
        self.nn_list2 = ast.literal_eval(self.nn_list2)
        #print("nn_list2 stored")

        self.act_fn_list2 = Dynamic_Screen5_Screen.ids.act_fn_list_kv5.text
        self.act_fn_list2 = ast.literal_eval(self.act_fn_list2)
        #print("act_fn_list2 stored")

        if self.num_models > 8:
            Dynamic_Screen3_Screen = self.manager.get_screen('DynamicScreen3')

            self.lr_list3 = Dynamic_Screen3_Screen.ids.lr_list_kv3.text
            self.lr_list3 = ast.literal_eval(self.lr_list3)
            #print("lr_list3 stored")

            self.epochs_list3 = Dynamic_Screen3_Screen.ids.epochs_list_kv3.text
            self.epochs_list3 = ast.literal_eval(self.epochs_list3)
            #print("epochs_list3 stored")

            self.nl_list3 = Dynamic_Screen3_Screen.ids.nl_list_kv3.text
            self.nl_list3 = ast.literal_eval(self.nl_list3)
            #print("nl_list3 stored")

            self.loss_fn_list3 = Dynamic_Screen3_Screen.ids.loss_fn_list_kv3.text
            self.loss_fn_list3 = ast.literal_eval(self.loss_fn_list3)
            #print("loss_fn_list3 stored")

            self.opt_list3 = Dynamic_Screen3_Screen.ids.opt_list_kv3.text
            self.opt_list3 = ast.literal_eval(self.opt_list3)
            #print("opt_list3 stored")

            Dynamic_Screen6_Screen = self.manager.get_screen('DynamicScreen6')

            self.nn_list3 = Dynamic_Screen6_Screen.ids.nn_list_kv6.text
            self.nn_list3 = ast.literal_eval(self.nn_list3)
            #print("nn_list3 stored")

            self.act_fn_list3 = Dynamic_Screen6_Screen.ids.act_fn_list_kv6.text
            self.act_fn_list3 = ast.literal_eval(self.act_fn_list3)
            #print("act_fn_list3 stored")

            self.lr_list = self.lr_list3
            #print("lr_list operation")
            self.epochs_list = self.epochs_list3
            #print("epochs_list operation")
            self.nl_list = self.nl_list3
            #print("nl_list operation")
            self.loss_fn_list = self.loss_fn_list3
            #print("loss_fn_list operation")
            self.opt_list = self.opt_list3
            #print("opt_list operation")
            self.nn_list = self.nn_list3
            #print("nn_list operation")
            self.act_fn_list = self.act_fn_list3
            #print("act_fn_list operation")
            
        else:
            self.lr_list = self.lr_list2
            #print("lr_list operation")
            self.epochs_list = self.epochs_list2 
            #print("epochs_list operation")
            self.nl_list = self.nl_list2 
            #print("nl_list operation")
            self.loss_fn_list = self.loss_fn_list2
            #print("loss_fn_list operation")
            self.opt_list = self.opt_list2
            #print("opt_list operation")
            self.nn_list = self.nn_list2
            #print("nn_list operation")
            self.act_fn_list = self.act_fn_list2
            #print("act_fn_list operation")

    Dynamic_Screen7_Screen = self.manager.get_screen('DynamicScreen7')

    self.nb_list = Dynamic_Screen7_Screen.ids.nb_list_kv7.text
    self.nb_list = ast.literal_eval(self.nb_list)
    print(f"self.nb_list = {self.nb_list}")
    #print("nb_list operation")

    '''
    print(f"lr_list:{self.lr_list}")
    print(f"epochs_list:{self.epochs_list}")
    print(f"nl_list:{self.nl_list}")
    print(f"loss_fn_list:{self.loss_fn_list}")
    print(f"opt_list:{self.opt_list}")
    print(f"nn_list:{self.nn_list}")
    print(f"act_fn_list:{self.act_fn_list}")
    print(f"nb_list:{self.nb_list}")
    '''

    # Convert strings to corresponding functions
    self.loss_fn_list = [nn.L1Loss if item == 'MAE' else nn.MSELoss for item in self.loss_fn_list]
    self.opt_list = [optim.SGD if item == 'SGD' else optim.Adam for item in self.opt_list]
    # Define a dictionary mapping activation function names to function objects
    #activation_functions = {
    #    'ReLU': nn.ReLU,
    #    'Sigmoid': nn.Sigmoid,
    #    'Tanh': nn.Tanh
    #}

    # Use list comprehension to convert activation function names to function objects
    #self.act_fn_list = [activation_functions.get(item, nn.ReLU)() for item in self.act_fn_list]
    #self.act_fn_list = [nn.ReLU if item == 'ReLU' else nn.Sigmoid if item == 'Sigmoid' else nn.Tanh for item in self.act_fn_list]
    self.act_fn_list = [nn.ReLU if item == 'ReLU' else nn.Sigmoid if item == 'Sigmoid' else nn.Tanh for item in self.act_fn_list]
    

    # Define configurations for each model using a loop
    g = []
    for i in range(self.num_models):
        model_config = []
        start_idx = sum(self.nl_list[:i])  # Calculate the start index for accessing nn_list
        for j in range(self.nl_list[i]):
            if j == 0:
                model_config.append({'in_features': self.num_models, 'out_features': self.nn_list[start_idx + j], 'activation': self.act_fn_list[start_idx + j]})
            elif j == self.nl_list[i] - 1:
                model_config.append({'activation': self.act_fn_list[start_idx + j]})
            else:
                model_config.append({'out_features': self.nn_list[start_idx + j], 'activation': self.act_fn_list[start_idx + j]})
        
        model_config.append({'loss_function': self.loss_fn_list[i], 'optimizer': self.opt_list[i], 'learning_rate': self.lr_list[i], 'batch_no': self.nb_list[i], 'epochs': self.epochs_list[i]})
        g.append(model_config)
    
    a = self.manager.get_screen('Carregar_Dados').ids.file_input.text 
    b = int(self.manager.get_screen('Especificar_Dados').ids.number_of_input_variables.text)
    c = int(self.manager.get_screen('Especificar_Dados').ids.number_of_output_variables.text)
    d = float(self.manager.get_screen('Dividir_Dados').ids.train_percentage.text)/100
    e = float(self.manager.get_screen('Dividir_Dados').ids.validation_percentage.text)/100
    f = float(self.manager.get_screen('Dividir_Dados').ids.testing_percentage.text)/100

    #create_classes_instance = create_classes()
    #trained_models = create_classes_instance.train_model(a,b,c,d,e,f,g)
    #print(trained_models)

    a1 = r"C:\Users\willi\Área de Trabalho\Masters\Data_Biodiesel.xlsx"
    b1 = 3                                                                   #number of input variables
    c1 = 3                                                                   #number of output variables
    d1 = 0.7                                                                 #percentage of training set
    e1 = 0.15                                                                #percentage of validation set
    f1 = 0.15                                                                #percentage of testing set 
    g1 = [
        [
            {'in_features': 3, 'out_features': 32, 'activation': nn.ReLU},
            {'out_features': 64, 'activation': nn.ReLU},
            {'out_features': 32, 'activation': nn.ReLU},
            #{'out_features': 16, 'activation': nn.ReLU},
            {'activation': nn.Sigmoid},
            {'loss_function': nn.L1Loss, 'optimizer': optim.Adam, 'learning_rate': 1e-3, 'batch_no': 718, 'epochs':21}
        ],
        [
            {'in_features': 3, 'out_features': 32, 'activation': nn.ReLU},
            {'out_features': 64, 'activation': nn.ReLU},
            {'out_features': 32, 'activation': nn.ReLU},
            #{'out_features': 16, 'activation': nn.ReLU},
            {'activation': nn.Sigmoid},
            {'loss_function': nn.L1Loss, 'optimizer': optim.Adam, 'learning_rate': 1e-3, 'batch_no': 359, 'epochs':21}
        ],
        [
            {'in_features': 3, 'out_features': 32, 'activation': nn.ReLU},
            {'out_features': 64, 'activation': nn.ReLU},
            {'out_features': 32, 'activation': nn.ReLU},
            #{'out_features': 16, 'activation': nn.ReLU},
            {'activation': nn.Sigmoid},
            {'loss_function': nn.L1Loss, 'optimizer': optim.Adam, 'learning_rate': 1e-3, 'batch_no': 359, 'epochs':21}
        ]
    ]

    '''
    create_classes_instance = create_classes()
    trained_models = create_classes_instance.train_model(a,b,c,d,e,f,g1)
    print(trained_models)

    print(f"g = {g}\n")
    print(f"g1 = {g1}\n")
    print(f"Equal = {g==g1}")
    '''
    
    
    # Redirecting stdout to capture console output
    old_stdout = sys.stdout
    redirected_output = StringIO()
    sys.stdout = redirected_output

    create_classes_instance = create_classes()
    trained_models = create_classes_instance.train_model(a,b,c,d,e,f,g)
    
    # Reset stdout
    sys.stdout = old_stdout

    # Get the captured console output
    console_output = redirected_output.getvalue()

    # Update TextInput widget with the captured output
    self.update_text_input(console_output)
    
def update_text_input(self, text):
    
    # Split the captured output into lines
    lines = text.split('\n')

    # Define a function to update TextInput widget with each line of output
    def update_line(dt):
        if lines:
            line = lines.pop(0)
            # Update the text input with the next line
            self.manager.get_screen('Training_Screen').ids.train_output.text += line + '\n'
            # Schedule the next update after a short delay
            Clock.schedule_once(update_line, 0.1)

    # Start updating TextInput widget with the first line of output
    update_line(0)`
python console kivy
1个回答
0
投票

如果我理解您的问题,那么您在编写 Kivy 应用程序时遇到了一个常见问题。当您在 Kivy 应用程序中进行长时间运行的计算时,您应该在另一个 thread 中运行该计算,并使用 Clock.schedule.once()@mainthread 调用另一个仅更新 GUI 以显示文本的方法你要的那个。如果您在主线程中运行该计算(这正是我希望您所做的),那么在该计算完成之前(当 GUI 有机会再次使用主线程时),GUI 元素将不会更新。

© www.soinside.com 2019 - 2024. All rights reserved.