How to check next page button is available or not using python
Question:
I want scrap some events data from page then enter next page scrap the event data then next page, same process until next page button is not available. the question is how to implement in if..else condition once the button is not available.
Condition code
while True:
for i in range(len(links)):
scrapy()
if #Write Condition to Check if Next page button is available :
driver.get('website_link_here')
next_page= '//*[@class="pagination"]/span[7]/a'
link = WebDriverWait(driver, timeout=160).until(lambda d: d.find_element(By.XPATH,next_page))
driver.execute_script("arguments[0].click();", link)
scrapy()
else:
break
driver.quit()
Scrapy() function code
def scrapy():
selector = '.event-poster'
event_name = '.col-md-12 h1'
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
links[i].click()
name_e = WebDriverWait(driver, timeout=500).until(lambda d: d.find_element(By.CSS_SELECTOR,event_name))
print(name_e.text)
driver.back()
Libraries and driver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from lib2to3.pgen2 import driver
from selenium import webdriver
from selenium.webdriver.common.by import By
#import time
#import csv
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
selector = '.event-poster'
driver.get('website_link_here')
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
Answers:
from selenium.common.exceptions import TimeoutException
try:
nextpagebutton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, buttonxpath)))
except TimeoutException:
print("No button available")
Sorry, I do some mistakes in the previous code but now after 2 days I edit and change some logic now this code is working as you want. Also If you want some explanation on how this code works, then let me know in the comments then I will add that in my post too.
Update code. import this too from selenium.common.exceptions import NoSuchElementException
i = 0
while True:
for i in range(len(links)):
scrapy()
try:
driver.find_element(By.CLASS_NAME,'next')
i+=1
except NoSuchElementException:
break
else:
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
driver.get(f'https://shoobs.com/find-events?page={i}')
You can scroll down to the page using this command driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
Here is your full update code.
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from lib2to3.pgen2 import driver
from selenium import webdriver
from selenium.webdriver.common.by import By
#import time
#import csv
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
selector = '.event-poster'
driver.get('https://shoobs.com/find-events')
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
def scrapy():
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
selector = '.event-poster'
event_name = '.col-md-12 h1'
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
links[i].click()
name_e = WebDriverWait(driver, timeout=500).until(lambda d: d.find_element(By.CSS_SELECTOR,event_name))
print(name_e.text)
driver.back()
i = 0
while True:
for i in range(len(links)):
scrapy()
try:
driver.find_element(By.CLASS_NAME,'next')
i+=1
except NoSuchElementException:
break
else:
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
driver.get(f'https://shoobs.com/find-events?page={i}')
driver.quit()
I want scrap some events data from page then enter next page scrap the event data then next page, same process until next page button is not available. the question is how to implement in if..else condition once the button is not available.
Condition code
while True:
for i in range(len(links)):
scrapy()
if #Write Condition to Check if Next page button is available :
driver.get('website_link_here')
next_page= '//*[@class="pagination"]/span[7]/a'
link = WebDriverWait(driver, timeout=160).until(lambda d: d.find_element(By.XPATH,next_page))
driver.execute_script("arguments[0].click();", link)
scrapy()
else:
break
driver.quit()
Scrapy() function code
def scrapy():
selector = '.event-poster'
event_name = '.col-md-12 h1'
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
links[i].click()
name_e = WebDriverWait(driver, timeout=500).until(lambda d: d.find_element(By.CSS_SELECTOR,event_name))
print(name_e.text)
driver.back()
Libraries and driver
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from lib2to3.pgen2 import driver
from selenium import webdriver
from selenium.webdriver.common.by import By
#import time
#import csv
from selenium.webdriver.support.wait import WebDriverWait
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
selector = '.event-poster'
driver.get('website_link_here')
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
from selenium.common.exceptions import TimeoutException
try:
nextpagebutton = WebDriverWait(driver, 20).until(EC.presence_of_element_located((By.XPATH, buttonxpath)))
except TimeoutException:
print("No button available")
Sorry, I do some mistakes in the previous code but now after 2 days I edit and change some logic now this code is working as you want. Also If you want some explanation on how this code works, then let me know in the comments then I will add that in my post too.
Update code. import this too from selenium.common.exceptions import NoSuchElementException
i = 0
while True:
for i in range(len(links)):
scrapy()
try:
driver.find_element(By.CLASS_NAME,'next')
i+=1
except NoSuchElementException:
break
else:
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
driver.get(f'https://shoobs.com/find-events?page={i}')
You can scroll down to the page using this command driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
Here is your full update code.
from webdriver_manager.chrome import ChromeDriverManager
from selenium.webdriver.chrome.service import Service
from lib2to3.pgen2 import driver
from selenium import webdriver
from selenium.webdriver.common.by import By
#import time
#import csv
from selenium.webdriver.support.wait import WebDriverWait
from selenium.common.exceptions import NoSuchElementException
driver = webdriver.Chrome(service=Service(ChromeDriverManager().install()))
selector = '.event-poster'
driver.get('https://shoobs.com/find-events')
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
def scrapy():
driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
selector = '.event-poster'
event_name = '.col-md-12 h1'
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
links[i].click()
name_e = WebDriverWait(driver, timeout=500).until(lambda d: d.find_element(By.CSS_SELECTOR,event_name))
print(name_e.text)
driver.back()
i = 0
while True:
for i in range(len(links)):
scrapy()
try:
driver.find_element(By.CLASS_NAME,'next')
i+=1
except NoSuchElementException:
break
else:
links = WebDriverWait(driver, timeout=500).until(lambda d: d.find_elements(By.CSS_SELECTOR,selector))
driver.get(f'https://shoobs.com/find-events?page={i}')
driver.quit()