Thanks to those who responded to help. I did get it to 'open' the table I wanted. For those who might be able to use what I came up with to open the jmp table, then export column entries to a third-party web site to extract data from the pages related to each column entry, here is a general form of the Python code I worked out. I am still working on getting the output to load directly back into the jmp table rather than extracting it from the log, although such log extraction does allow for editing.
import jmp
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import TimeoutException
# Set up Selenium WebDriver
options = Options()
options.headless = False # Runs Safari in visible mode for debugging
# Initialize the WebDriver (Safari)
driver = webdriver.Safari()
# Path to the JMP file (adjusted to the path you provided)
file_path = "/YOUR JMP PATH.jmp"
# Open the specific table by file path
dt = jmp.open(file_path) # Open the table directly from the file
# Access the 'YOUR_COLUMN' column from the open table
YOUR COLUMN_column = dt["YOUR COLUMN"] # Correct syntax for accessing a column
# Initialize a list to store the results (YOUR NEW COLUMNS)
results = []
# Function to extract YOUR DATA:
url = f'https://www.YOUR WEB PAGE TO EXTRACT FROM'
try:
# Open the URL
driver.get(url)
# Set a short timeout (e.g., 5 seconds) to wait for the page to load
WebDriverWait(driver, 5).until(EC.presence_of_element_located((By.XPATH, "//a[contains(@href, 'YOUR_KEY')]")))
# Adjust XPath to grab the correct link (e.g., get the first match)
YOUR_tag = driver.find_element(By.XPATH, "//a[contains(@href, 'YOUR_KEY') and not(contains(@href, 'access'))]") # Exclude 'access' related links
YOUR_KEY = YOUR_TAG.text.strip() # Extract the text of YOUR KEY
return YOUR_OUTPUT
except TimeoutException:
print(f"Timeout error for YOUR_COLUMN {YOUR_COLUMN}: URL did not load within the specified time.")
return None, None # If the URL doesn't load, return None YOUR TAGS
except Exception as e:
print(f"Error extracting YOUR TAGS for YOUR_COLUMN {YOUR_COLUMN}: {e}")
return None, None
# Iterate over each row
YOUR_OUTPUT = get_YOUR_OUTPUT(YOUR_COLUMN)
results.append((YOUR_OUTPUT)) # Store the result as a tuple
# Print the results
print(f"YOUR_COLUMN: {YOUR_COLUMN} -> YOUR_TAG: {YOUR_NEW_DATA})
# Close the WebDriver after processing all the entries
driver.quit()