包含特定数据的新矩阵

问题描述 投票:0回答:3

样本数据

data=data.frame("group"=c(rep(0:1,10)),
                "value1" = c(1:10),
                "value2" = seq(11:20),
                "value3" = as.factor(rep(1:3,length=10)))

目标enter image description here

我有数据框“数据”,目标是创建一个新的数据框,如图所示。请注意,VALUE3是一个因素变量。所以我想报告n和百分比。对于图中的每个组。

我知道分别估计最小值和最大值,如下所示

value1_min0 = min(data$value1)[data$group==1]
value1_max0 = max(data$value1)[data$group==1]
value1_min1 = min(data$value1)[data$group==0]
value1_max1 = max(data$value1)[data$group==0]

但有更好的方法来更有效地做到这一点吗?

r sapply
3个回答
3
投票

基地R.

对因子上的数值变量和table / prop.table使用聚合。

ag <- aggregate(cbind(value1, value2) ~ group, data, 
  function(x) c(min = min(x), max = max(x)))
tab12 <- as.data.frame.matrix(setNames(as.data.frame(t(ag[-1])), 
  ag[[1]]))

counts <- table(data$group)

tab3 <- prop.table(table(group = data$value3, value3 = data$group), 2)
rownames(tab3) <- paste("value3", rownames(tab3), sep = " = ")

rbind(tab12, n = counts, as.data.frame.matrix(tab3))

给出以下

              0    1
value1.min  1.0  2.0
value1.max  9.0 10.0
value2.min  1.0  2.0
value2.max  9.0 10.0
n          10.0 10.0
value3 = 1  0.4  0.4
value3 = 2  0.2  0.4
value3 = 3  0.4  0.2

sqldf

这个替代方案有点单调乏味,但它很直接:

library(sqldf)

res <- sqldf('select
  [group],  
  min(value1) [value1.min],
  max(value1) [value1.max],
  min(value2) [value2.min],
  max(value2) [value2.max],
  count(*) n,
  avg(value3 = 1) [value3 == 1],
  avg(value3 = 2) [value3 == 2],
  avg(value3 = 3) [value3 == 3]
  from data
  group by [group]')
setNames(as.data.frame(t(res[-1])), res$group)

赠送:

               0    1
value1.min   1.0  2.0
value1.max   9.0 10.0
value2.min   1.0  2.0
value2.max   9.0 10.0
n           10.0 10.0
value3 == 1  0.4  0.4
value3 == 2  0.2  0.4
value3 == 3  0.4  0.2

Skimr

使用skimr包我们可以这样做:

library(dplyr)
library(skimr)
library(tidyr)

# L <- list("fraction = 1" = function(x) mean(x == 1),
#           "fraction = 2" = function(x) mean(x == 2),
#           "fraction = 3" = function(x) mean(x == 3))
levs <- levels(data$value3)
L <- lapply(levs, function(lv) function(x) mean(x == lv))
names(L) <- paste("fraction =", levs)

skim_with(integer = list(min = min, max = max), 
 factor = c(L, n = length), append = FALSE)

data %>% 
  group_by(group) %>%
  skim %>%
  ungroup %>%
  select(group, variable, stat, value) %>%
  spread(group, value)

给出以下内容:

# A tibble: 8 x 4
  variable stat          `0`   `1`
  <chr>    <chr>       <dbl> <dbl>
1 value1   max           9    10  
2 value1   min           1     2  
3 value2   max           9    10  
4 value2   min           1     2  
5 value3   fracion = 1   0.4   0.4
6 value3   fracion = 2   0.2   0.4
7 value3   fracion = 3   0.4   0.2
8 value3   n            10    10  

更新

修订后的基础解决方案添加了sqldf和skimr解决方案。改进了skimr解决方案。


1
投票

使用dplyr(> = 0.8.0语法):

library(dplyr)
df %>% 
  group_by(group) %>% 
  summarise_all(list(~min(.),~max(.)))

结果:

# A tibble: 2 x 7
  group value1_min value2_min value3_min value1_max value2_max value3_max
  <int>      <dbl>      <dbl>      <dbl>      <dbl>      <dbl>      <dbl>
1     0          1          1          1          9          9          3
2     1          2          2          1         10         10          3

0
投票

使用dplyrtidyr

library(dplyr)
library(tidyr)

data %>%
  group_by(group) %>%
  summarize(value1_min = min(value1),
        value1_max = max(value1),
        value2_min = min(value2),
        value2_max = max(value2),
        value3_n = length(unique(value3)),
        value3_perc1 = length(sum(value3==1))/length(unique(value3)),
        value3_perc2 = length(sum(value3==2))/length(unique(value3)),
        value3_perc3 = length(sum(value3==3))/length(unique(value3))
        ) %>%
  gather(review, value, -group) %>%
  spread(group, value)

结果:

# A tibble: 7 x 3
  review         `0`    `1`
  <chr>        <dbl>  <dbl>
1 value1_max   9.00  10.0  
2 value1_min   1.00   2.00 
3 value2_max   9.00  10.0  
4 value2_min   1.00   2.00 
5 value3_n     3.00   3.00 
6 value3_perc1 0.333  0.333
7 value3_perc2 0.333  0.333
8 value3_perc3 0.333  0.333
© www.soinside.com 2019 - 2024. All rights reserved.