Selenium ElementClickInterceptedException when headless = True

Question:

I am running a selenium code on the website DNCA to scrap for some of the document links. I am trying to get links of each value in the drop down for each section shown in this page. My code is working fine, but when I run the same code with option headless = True, I am getting the following error:

ElementClickInterceptedException: element click intercepted: Element <li data-original-index="0">...</li> is not clickable at point (226, 250). Other element would receive the click: <div class="col-md-12">...</div>
  (Session info: headless chrome=104.0.5112.81)

Code:

def get_active_row(active_tab, fund_id):
    
    active_row = active_tab.find_elements(By.XPATH, ".//tr[@style='' or @style='display: table-row;'][@fund-id = '{}']".format(fund_id)) 
    
    try:
        assert len(active_row) == 1
        active_row = active_row[0]
        return active_row
        
    except AssertionError as asserr:
        print(asserr, ' -- More than one active row for the fund id: ', fund_id)
        sys.exit(1)
        
    except Exception as err:
        print(err, ' -- fund id:', fund_id)
        sys.exit(1)



def scrap(driver):
    
    tab_list = driver.find_element(By.XPATH, "//ul[contains(@role, 'tablist')]") 
    tab_list_names = tab_list.find_elements(By.XPATH, './/li') 
    
    data_list = [] 
    
    for loc, tab_name in enumerate(tab_list_names): 
        
        if loc < 20:
            tab_name.click() 
 
            
            html = driver.page_source 
            soup = BeautifulSoup(html) 
            
            bs_active_tab = soup.find('div', {'class': 'tab-pane table-datas active'}) 
            
            bs_headers = bs_active_tab.find('thead') 
            headers = [i.text for i in bs_headers.find_all('td')] 
        
            active_tab = driver.find_element(By.XPATH, "//div[contains(@class, 'tab-pane table-datas active')]") 
            
            unique_fund_ids = [i_fund.get_attribute('fund-id') for i_fund in active_tab.find_elements(By.XPATH, ".//tr[@style]") if i_fund.get_attribute('fund-id') != '-']
            lookup = set() 
            unique_fund_ids = [x for x in unique_fund_ids if x not in lookup and lookup.add(x) is None] 
            
            for fund_id in unique_fund_ids: #Iterate over each fund
                
                active_row = get_active_row(active_tab, fund_id) 
    
                active_row.find_element(By.XPATH, './/button').click() 
                isin_list = [i.text for i in active_row.find_elements(By.XPATH, './/li')] 
                
                for pos, isin_val in enumerate(isin_list): 
                    
                    isin_selected = active_row.find_elements(By.XPATH, './/li')[pos] 
                    isin_selected.click() 
                    
                    active_row = get_active_row(active_tab, fund_id) 
                    
                    fund_name = '' 
                    
                    for pos_inner, td in enumerate(active_row.find_elements(By.XPATH, ".//td")): 
                        a_tag = td.find_elements(By.XPATH, ".//a") 
                        
                        if len(a_tag) == 1: 
                            a_tag = a_tag[0]
                            
                            if pos_inner == 0: 
                                fund_name = a_tag.text 
                              
                            link = a_tag.get_attribute('href')
                            data_list.append([tab_name.text, fund_name, isin_val, headers[pos_inner], link])
    
                        else:
                            data_list.append([tab_name.text, fund_name, isin_val, headers[pos_inner], ''])
                
                    active_row = get_active_row(active_tab, fund_id) 
                    active_row.find_element(By.XPATH, './/button').click() 
                
                isin_selected_to_close = active_row.find_elements(By.XPATH, './/li')[0] 
                isin_selected_to_close.click()    

        tlg_tr_tab = active_tab.find_element(By.XPATH, ".//tr[@fund-id='-']")
        
        for tlg_pos_inner, tlg_td in enumerate(tlg_tr_tab.find_elements(By.XPATH, ".//td")): 
            tlg_a_tag = tlg_td.find_elements(By.XPATH, ".//a") 
            if len(tlg_a_tag) == 1:
                tlg_a_tag = tlg_a_tag[0]
                
                tlg_link = tlg_a_tag.get_attribute('href') #Get document link
                data_list.append([tab_name.text, 'Toute la gamme', '',  headers[tlg_pos_inner], tlg_link])
                
            else:
                data_list.append([tab_name.text, 'Toute la gamme', '', headers[tlg_pos_inner], ''])
    
    dataset_links = pd.DataFrame(data_list, columns = ['Tab', 'Fund Name', 'ISIN', 'Type', 'Link']) 
    
    driver.quit()

Can someone please explain me why is it working fine with headless = False but not with with headless = True.

Asked By: DumbCoder

||

Answers:

In headless mode the default screen size is very small, significantly less than screen size in regular mode.
So, to overcome this problem you need to set the screen size.
It can be done in the following ways:

options = Options()
options.add_argument("--headless")
options.add_argument("window-size=1920, 1080")
webdriver_service = Service('C:webdriverschromedriver.exe')
driver = webdriver.Chrome(service=webdriver_service, options=options)

Or just

driver.set_window_size(1920, 1080)

Both approaches should work.
I prefer the first way 🙂

Answered By: Prophet