Cara Scrap detailnya gimana ya gan, baru bisa next pagenya

from selenium import webdriver from selenium.webdriver.common.keys import Keys from bs4 import BeautifulSoup import re import time import pandas as pd from tabulate import tabulate import os from selenium.webdriver.support.ui import WebDriverWait from selenium.webdriver.common.by import By from selenium.webdriver.support import expected_conditions as ec

#launch url url = "https://www.halodoc.com/obat-dan-vitamin/batuk-dan-flu/page/1"

# create a new Firefox session driver = webdriver.Firefox() driver.implicitly_wait(100) driver.get(url)

fastrack = WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.XPATH, "//li[@class='list__item-container item']")));

soup_level1=BeautifulSoup(driver.page_source, 'lxml')

test=soup_level1.find("a", {"class": "pagination__last-page"}) test=test.find("span", {"class": "pagination__number"}) lastpage=int((test.contents)[0])

for i in range(1,lastpage): for part in soup_level1.find_all('li', class_ = 'list__item-container item'): urlTarget=part.find('a').attrs['href'] for link in part.find_all('a'): print(link.attrs['href']) driver.get("https://www.halodoc.com/"+link.attrs['href'])

disini detail obat tertentu apa aja yang mau di ambil

#next page driver.get("https://www.halodoc.com/obat-dan-vitamin/batuk-dan-flu/page/"+str(i+1)) fastrack = WebDriverWait(driver, 10).until(ec.presence_of_element_located((By.XPATH, "//li[@class='list__item-container item']"))); soup_level1=BeautifulSoup(driver.page_source, 'lxml')

avatar BlitzKrieg99
@BlitzKrieg99

1 Kontribusi 0 Poin

Dipost 5 tahun yang lalu

Tanggapan

coba bertanya dengan jelas gan jangan cuma paste code saja

Belum ada Jawaban. Jadi yang pertama Jawaban

Login untuk ikut Jawaban