将 Python 类转换为 R 的 R6 类 - 错误

问题描述 投票:0回答:1

我正在从一个名为“graphST”的工具转换一个Python类(https://github.com/JinmiaoChenLab/GraphST/blob/d62b0b7b6cd38ee285f3ac8cd67b7341a10bcc74/GraphST/GraphST.py)。

现在是我的(https://github.com/myanna1416/GraphSTR_WORK/blob/main/ZorFirstRproject.R

我收到的错误是:

Error in R6::R6Class("GraphST", public = list(adata = NULL, features = NULL,  : 
  All items in public, private, and active must have unique names. 

这是我创建的 R6 类,它抛出错误:

GraphST <- R6::R6Class(
  "GraphST",
  public = list(
    adata = NULL,
    features = NULL,
    features_a = NULL,
    label_CSL = NULL,
    adj = NULL,
    graph_neigh = NULL,
    dim_input = NULL,
    dim_output = NULL,
    feat_sc = NULL,
    feat_sp = NULL,
    n_cell = NULL,
    n_spot = NULL,
    dim_input_sc = NULL,
    dim_output_sc = NULL,
    emb_rec = NULL,
    adata_sc = NULL,
    # Adding adata_sc for scRNA-seq data
    device = "cpu",
    learning_rate = 0.001,
    learning_rate_sc = 0.01,
    weight_decay = 0.00,
    epochs = 600,
    dim_input = 3000,
    dim_output = 64,
    random_seed = 41,
    alpha = 10,
    beta = 1,
    theta = 0.1,
    lamda1 = 10,
    lamda2 = 1,
    deconvolution = FALSE,
    datatype = "10X",
    
    #constructor function
    initialize = function(adata,
                          adata_sc = NULL,
                          device = "cpu",
                          learning_rate = 0.001,
                          learning_rate_sc = 0.01,
                          weight_decay = 0.00,
                          epochs = 600,
                          random_seed = 41,
                          alpha = 10,
                          beta = 1,
                          theta = 0.1,
                          lamda1 = 10,
                          lamda2 = 1,
                          deconvolution = FALSE,
                          datatype = "10X") {
      self$adata <- adata
      self$adata_sc <- adata_sc  # Initialize adata_sc
      self$device <- device
      self$learning_rate <- learning_rate
      self$learning_rate_sc <- learning_rate_sc
      self$weight_decay <- weight_decay
      self$epochs <- epochs
      self$random_seed <- random_seed
      self$alpha <- alpha
      self$beta <- beta
      self$theta <- theta
      self$lamda1 <- lamda1
      self$lamda2 <- lamda2
      self$deconvolution <- deconvolution
      self$datatype <- datatype
      self$features_a <- NULL
      self$label_CSL <- NULL
      self$adj <- NULL
      self$graph_neigh <- NULL
      self$dim_input <- NULL
      self$dim_output <- NULL
      self$feat_sc <- NULL
      self$feat_sp <- NULL
      self$n_cell <- NULL
      self$n_spot <- NULL
      self$dim_input_sc <- NULL
      self$dim_output_sc <- NULL
      self$emb_rec <- NULL 
      
      
      
      
      set.seed(self$random_seed)  # Fix the seed for reproducibility
      
      
      
      self$adata <- preprocess(self$adata)
      
      self$adata <- construct_interaction(self$adata)
      
      
      self$adata <- add_contrastive_label(self$adata)
      
      self$adata <- get_feature(self$adata)
      
      # Convert 'feat' to a torch tensor and move to the specified device
      self$features <-
        torch_tensor(as.array(self$adata@misc$feat),
                     dtype = torch_float32())$to(device = self$device)
      # Convert 'feat_a' to a torch tensor and move to the specified device
      self$features_a <-
        torch_tensor(as.array(self$adata@misc$feat_a),
                     dtype = torch_float32())$to(device = self$device)
      # Convert 'label_CSL' to a torch tensor and move to the specified device
      self$label_CSL <-
        torch_tensor(as.array(self$adata@misc$label_CSL),
                     dtype = torch_float32())$to(device = self$device)
      # For 'adj', we simply reference it as it does not necessarily need conversion for this context
      self$adj <- self$adata@misc$adj
      n <- nrow(self$adj)
      self$graph_neigh <-
        torch_tensor(as.array(self$adata@misc$graph_neigh) + diag(rep(1, n)),
                     dtype = torch_float32())$to(device = self$device)
      self$dim_input <- dim(self$features)[2]
      self$dim_output <- dim_output
      
      self$adj <- preprocess_adj(self$adj)
      self$adj <- torch_tensor(as.array(self$adj), dtype = torch_float32())$to(device = self$device)
      
      if (self$deconvolution) {
        self$adata_sc <-
          self$adata_sc$clone(deep = TRUE)
        
        # Replace NA (equivalent to NaN in Python) with 0 for self$feat_sc
        self$feat_sc[is.na(self$feat_sc)] <- 0
        
        # Convert to a torch tensor and move to the specified device
        self$feat_sc <-
          torch_tensor(self$feat_sc, dtype = torch_float32)$to(device = self$device)
        
        # Repeat the process for self$feat_sp
        self$feat_sp[is.na(self$feat_sp)] <- 0
        self$feat_sp <-
          torch_tensor(self$feat_sp, dtype = torch_float32)$to(device = self$device)
        
        # Check if self$adata_sc exists
        if (!is.null(self$adata_sc)) {
          # Assuming self$feat_sc is a torch tensor, use dim() to get dimensions
          self$dim_input <- dim(self$feat_sc)[2]
        }
        
        # Set the number of cells and spots based on observations in adata_sc and adata
        # Assuming adata_sc and adata are lists or similar objects with n_obs (number of observations) property
        self$n_cell <- self$adata_sc$n_obs
        self$n_spot <- self$adata$n_obs
      }
    },
    
    train = function() {
      self$model <-
        Encoder$new(self$dim_input, self$dim_output, self$graph_neigh)$to(device = self$device)
      
      self$loss_CSL <- nn_bce_with_logits_loss()
      self$optimizer <-
        optim_adam(
          self$model$parameters(),
          lr = self$learning_rate,
          weight_decay = self$weight_decay
        )
      
      cat("Begin to train ST data...\n")
      pb <-
        progress_bar$new(total = self$epochs, format = "  [:bar] :percent :elapsed/:est")
      
      for (epoch in 1:self$epochs) {
        self$model$train()
        
        self$features_a2 <-
          permute_features(self$features)  # Ensure this function is defined to shuffle features
        list(hidden_feat, emb, ret, ret_a) <-
          self$model(self$features, self$features_a2, self$adj)
        
        loss_sl_1 <- self$loss_CSL(ret, self$label_CSL)
        loss_sl_2 <- self$loss_CSL(ret_a, self$label_CSL)
        loss_feat <- mse_loss(self$features, emb)
        
        loss <- self$alpha * loss_feat + self$beta * (loss_sl_1 + loss_sl_2)
        
        self$optimizer$zero_grad()
        loss$backward()
        self$optimizer$step()
        
        pb$tick()
      }
      
      cat("Optimization finished for ST data!\n")
      
      no_grad({
        self$model$eval()
        if (self$deconvolution) {
          self$emb_rec <-
            self$model(self$features, self$features_a, self$adj)[[2]]
          return(self$emb_rec)
        } else {
          self$emb_rec <-
            self$model(self$features, self$features_a, self$adj)[[2]]$detach()$cpu()$numpy()
          self$adata@misc[['emb']] <- self$emb_rec
          return(self$adata)
        }
      })
    }
  )
)
python r python-3.x type-conversion r6
1个回答
0
投票

dim_input
类定义中重复出现
dim_output
R6
会导致错误。在
R6
类中,公共、私有和活动中的所有项目都必须具有唯一的名称。因此,定义
dim_input
dim_output
两次会导致错误。

© www.soinside.com 2019 - 2024. All rights reserved.