【问题标题】:Creating new variable by matching on strings in sparklyr通过匹配 sparklyr 中的字符串来创建新变量
【发布时间】:2019-01-08 01:45:54
【问题描述】:

我是第一次使用 sparklyr,但在匹配两个向量的字符串以大规模创建新变量时遇到了麻烦。我的问题有以下一般结构:

我有一个大的 url 数据集:

df_1 <-  data.frame(
  col1 = c(1,2,3,4,5,6,7,8,9,10),
  col2 = c("john.com/abcd", "ringo.com/defg", "paul.com/hijk", "george.com/lmno", "rob.com/pqrs", "sam.com/tuvw", 
           "matt.com/xyza", "lenny.com/bcde", "bob.com/fghi", "tom.com/jklm"))
col1            col2
 1   john.com/abcd
 2  ringo.com/defg
 3   paul.com/hijk
 4 george.com/lmno
 5    rob.com/pqrs
 6    sam.com/tuvw
 7   matt.com/xyza
 8  lenny.com/bcde
 9    bob.com/fghi
10    tom.com/jklm

还有另一个较小的通用领域数据集:

df_2 <- data.frame( 
  col1 = c(1,2,3,4,5,6,7),
  col2 = c("john.com", "jake.com", "tim.com", "paul.com", "rob.com", "harry.com", "chris.com"))
col1      col2
    1  john.com
    2  jake.com
    3   tim.com
    4  paul.com
    5   rob.com
    6 harry.com
    7 chris.com

我想使用 df_2 (df_2$col2) 中的域向量为 df_1 创建一个虚拟变量,指示域是否出现在 df_1 (df_1$col_2) 中的 url 中。生成的数据框应类似于 df_3。

df_3 <- data.frame(
  col1 = c(1,2,3,4,5,6,7,8,9,10),
  col2 = c("john.com/abcd", "ringo.com/defg", "paul.com/hijk", "george.com/lmno", "rob.com/pqrs", "sam.com/tuvw", 
               "matt.com/xyza", "lenny.com/bcde", "bob.com/fghi", "tom.com/jklm"),
  col3 = c(1,0,1,0,1,0,0,0,0,0))
   col1            col2 col3
     1   john.com/abcd    1
     2  ringo.com/defg    0
     3   paul.com/hijk    1
     4 george.com/lmno    0
     5    rob.com/pqrs    1
     6    sam.com/tuvw    0
     7   matt.com/xyza    0
     8  lenny.com/bcde    0
     9    bob.com/fghi    0
    10    tom.com/jklm    0

我已阅读这篇文章:How to filter on partial match using sparklyr

并尝试使用类似

的方式对 df_2 的每个单独观察进行编码
df_3 <- df_1 %>%
  mutate(col3 = 
    ifelse(like(df_1$col2, "john.com") | df_1$col2, "jake.com" | etc.,1,0))

但到目前为止,我一直遇到堆栈限制或 R 无法识别类似功能。必须有一种更简单的方法来做到这一点。感谢您的任何帮助。

【问题讨论】:

    标签: r apache-spark dplyr sparklyr


    【解决方案1】:

    如果您正在寻找像这里这样定义明确的前缀,您可以提取它:

    sdf_1 <- copy_to(sc, df_1)
    sdf_2 <- copy_to(sc, df_2)
    
    sdf_1_keyed <- sdf_1 %>% mutate(key = regexp_extract(col2, "^(.*)/", 1))
    

    应用左等值连接:

    matched <- sdf_1_keyed %>% 
      left_join(sdf_2 %>% transmute(key = col2, id = col1), by="key")
    

    总结

    matched %>% group_by(col1, col2) %>% 
       summarise(col3 = as.numeric(sum(as.numeric(!is.na(id)), na.rm = TRUE) > 0))
    
    # Source:   lazy query [?? x 3]
    # Database: spark_connection
    # Groups:   col1
        col1 col2             col3
       <dbl> <chr>           <dbl>
     1     1 john.com/abcd       1
     2     5 rob.com/pqrs        1
     3     6 sam.com/tuvw        0
     4     9 bob.com/fghi        0
     5     3 paul.com/hijk       1
     6     4 george.com/lmno     0
     7     8 lenny.com/bcde      0
     8    10 tom.com/jklm        0
     9     2 ringo.com/defg      0
    10     7 matt.com/xyza       0
    # ... with more rows
    

    RLIKE 条件也可以做类似的事情:

    candidates <- sdf_1 %>% spark_dataframe() %>% 
      sparklyr::invoke("crossJoin",
        sdf_2 %>% transmute(target = col2) %>% spark_dataframe()) %>% 
      sdf_register()
    
    candidates %>% 
        mutate(matched = as.numeric(rlike(col2, target))) %>% 
        group_by(col1, col2) %>% 
        summarise(col3 = as.numeric(sum(matched, na.rm=TRUE) > 0))
    
    # Source:   lazy query [?? x 3]
    # Database: spark_connection
    # Groups:   col1
        col1 col2             col3
       <dbl> <chr>           <dbl>
     1     1 john.com/abcd       1
     2     5 rob.com/pqrs        1
     3     6 sam.com/tuvw        0
     4     9 bob.com/fghi        0
     5     3 paul.com/hijk       1
     6     4 george.com/lmno     0
     7     8 lenny.com/bcde      0
     8    10 tom.com/jklm        0
     9     2 ringo.com/defg      0
    10     7 matt.com/xyza       0
    # ... with more rows
    

    最后你可以提取唯一值:

    targets <- unique(as.character(df_2$col2))
    

    并创建 SQL 表达式:

    library(glue)
    
    expr <- glue_collapse(glue("col2 rlike '{targets}'"), " OR ")
    
    sdf_1 %>% 
      spark_dataframe() %>%
      sparklyr::invoke(
        "selectExpr", 
        list("*", as.character(glue("{expr} as col3")))) %>% 
      sdf_register() %>%
      mutate(col3 = as.numeric(col3))
    
     # Source:   lazy query [?? x 3]
     # Database: spark_connection
         col1 col2             col3
        <dbl> <chr>           <dbl>
      1     1 john.com/abcd       1
      2     2 ringo.com/defg      0
      3     3 paul.com/hijk       1
      4     4 george.com/lmno     0
      5     5 rob.com/pqrs        1
      6     6 sam.com/tuvw        0
      7     7 matt.com/xyza       0
      8     8 lenny.com/bcde      0
      9     9 bob.com/fghi        0
     10    10 tom.com/jklm        0
     # ... with more rows
    

    或R表达式:

    library(rlang)
    
    rexpr <- glue_collapse(glue("rlike(col2, '{targets}')"), " | ")
    
    sdf_1 %>% mutate(col3 = !!parse_quosure(glue("as.numeric({rexpr})")))
    
    # Source:   lazy query [?? x 3]
    # Database: spark_connection
        col1 col2             col3
       <dbl> <chr>           <dbl>
     1     1 john.com/abcd       1
     2     2 ringo.com/defg      0
     3     3 paul.com/hijk       1
     4     4 george.com/lmno     0
     5     5 rob.com/pqrs        1
     6     6 sam.com/tuvw        0
     7     7 matt.com/xyza       0
     8     8 lenny.com/bcde      0
     9     9 bob.com/fghi        0
    10    10 tom.com/jklm        0
    # ... with more rows
    

    【讨论】:

    • 非常感谢。这些解决方案对我来说效果很好。不过,对于 rlike 函数的突然识别仍然感到困惑。
    • 您必须记住,所有这些都是由于 SQL 转换层而起作用的。不涉及 R 函数。如果您尝试在纯 R data.frame 上测试相同的代码,它将无法正常工作。
    猜你喜欢
    • 1970-01-01
    • 2016-01-11
    • 2011-06-25
    • 1970-01-01
    • 1970-01-01
    • 1970-01-01
    • 1970-01-01
    • 1970-01-01
    • 2021-12-19
    相关资源
    最近更新 更多