1from selenium import webdriver
2from bs4 import BeautifulSoup
3import requests
4import urllib.request
5import time
6import sys
7import os
8
9
10#taking user input
11print("What do you want to download?")
12download = input()
13site = 'https://www.google.com/search?tbm=isch&q='+download
14
15
16#providing driver path
17driver = webdriver.Firefox(executable_path = 'C:\Drivers\geckodriver.exe')
18
19#passing site url
20driver.get(site)
21
22
23#if you just want to download 10-15 images then skip the while loop and just write
24#driver.execute_script("window.scrollBy(0,document.body.scrollHeight)")
25
26
27#below while loop scrolls the webpage 7 times(if available)
28
29i = 0
30
31while i<7:
32 #for scrolling page
33 driver.execute_script("window.scrollBy(0,document.body.scrollHeight)")
34
35 try:
36 #for clicking show more results button
37 driver.find_element_by_xpath("/html/body/div[2]/c-wiz/div[3]/div[1]/div/div/div/div/div[5]/input").click()
38 except Exception as e:
39 pass
40 time.sleep(5)
41 i+=1
42
43#parsing
44soup = BeautifulSoup(driver.page_source, 'html.parser')
45
46
47#closing web browser
48driver.close()
49
50
51#scraping image urls with the help of image tag and class used for images
52img_tags = soup.find_all("img", class_="rg_i")
53
54
55count = 0
56for i in img_tags:
57 #print(i['src'])
58 try:
59 #passing image urls one by one and downloading
60 urllib.request.urlretrieve(i['src'], str(count)+".jpg")
61 count+=1
62 print("Number of images downloaded = "+str(count),end='\r')
63 except Exception as e:
64 pass