R:使用 rvest 下载图像

问题描述 投票:0回答:3

我正在尝试通过 R 从安全站点下载

png
图像。

为了访问安全站点,我使用了

Rvest
效果很好。

到目前为止,我已经提取了

png
图像的 URL。

如何使用 rvest 下载此链接的图片?

rvest
函数之外的函数由于没有权限而返回错误。

目前的尝试

library(rvest)
uastring <- "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
session <- html_session("https://url.png", user_agent(uastring))
form <- html_form(session)[[1]]
form <- set_values(form, username = "***", password="***", cookie_checkbox= TRUE)
session<-submit_form(session, form)
session2<-jump_to(session, "https://url.png")

## Status 200 using rvest, sucessfully accsessed page.    
session 
<session> https://url.png
  Status: 200
  Type:   image/png
  Size:   438935

## Using download.file returns status 403, page unable to open.
download.file("https://url.png", destfile = "t.png")
    cannot open: HTTP status was '403 Forbidden'

在 url 上尝试了

readPNG
download.file
,由于没有从经过身份验证的安全站点下载的权限(错误:403),这两个都失败了,因此我首先使用 rvest 的原因。

r download rcurl rvest httr
3个回答
12
投票

这是一个将 R 徽标下载到当前目录的示例。

library(rvest)
url <- "https://www.r-project.org"
imgsrc <- read_html(url) %>%
  html_node(xpath = '//*/img') %>%
  html_attr('src')
imgsrc
# [1] "/Rlogo.png"

# side-effect!
download.file(paste0(url, imgsrc), destfile = basename(imgsrc))

编辑

既然涉及到认证,那当然需要Austin建议使用session。试试这个:

library(rvest)
library(httr)
sess <- html_session(url)
imgsrc <- sess %>%
  read_html() %>%
  html_node(xpath = '//*/img') %>%
  html_attr('src')
img <- jump_to(sess, paste0(url, imgsrc))

# side-effect!
writeBin(img$response$content, basename(imgsrc))

6
投票

试试下面这个例子:

library(rvest); library(dplyr)

url <- "http://www.calacademy.org/explore-science/new-discoveries-an-alaskan-butterfly-a-spider-physicist-and-more"
webpage <- html_session(url)
link.titles <- webpage %>% html_nodes("img")

img.url <- link.titles[13] %>% html_attr("src")

download.file(img.url, "test.jpg", mode = "wb")

你现在有了“test.jpg”,也就是图片:


0
投票

处理多个查询,重命名文件,并在 Txt 文件中注册链接。 对于带有空格的查询,请在两者之间使用 +,请参见示例。

library(rvest)
library(magrittr)
library(httr)

search_and_download_images <- function(query, size, n_images = 2, output_directory = "downloaded_images") {
  # Prepare the query for Google Images search
  search_url <- paste0("https://www.google.com/search?q=", query, "&tbm=isch&tbs=isz:m")
  
  # Scrape image URLs from Google Images
  image_links <- search_url %>%
    read_html() %>%
    html_nodes("img") %>%
    html_attr("src") %>%
    na.omit()
  
  # Keep the desired number of image URLs
  image_links <- image_links[1:min(n_images, length(image_links))]
  
  # Create the output directory if it doesn't exist
  if (!dir.exists(output_directory)) {
    dir.create(output_directory)
  }
  
  # Download and save images
  for (i in 2:length(image_links)) {
    img_url <- image_links[i]
    response <- GET(img_url)
    
    if (response$status_code == 200) {
      file_ext <- tools::file_ext(img_url)
      if (file_ext == "") {
        file_ext <- "jpg"
      }
      
      # Save the image
      img_filename <- file.path(output_directory, paste0(query, "_", i, ".", file_ext))
      writeBin(content(response, "raw"), img_filename)
      
      # Save image information (URL, local filename)
      info_filename <- file.path(output_directory, paste0(query, "_", i, "_info.txt"))
      cat(paste("URL:", img_url, "\n"), file = info_filename)
      cat(paste("Local file:", img_filename, "\n"), file = info_filename, append = TRUE)
    } else {
      cat(paste("Failed to download image", i, "for query", query, "\n"))
    }
  }
}


# research_terms <- c("Water erosion", "wind erosion")
research_terms <- c("Water+erosion", "wind+erosion")
desired_size <- "medium"
number_of_images <- 10

for (term in research_terms) {
  search_and_download_images(term, size = desired_size, n_images = number_of_images)
}
© www.soinside.com 2019 - 2024. All rights reserved.