2 minute read

Some time ago I blogged about wpsauron, a simple script I made to help me pentest secured wordpress website ( secured = patched, under WAF, no public vuln, etc).

Basically, this script downloads wordpress plugins from the wordpress official database, then, it loads them inside a docker-compose “composed” of a wordpress image, mysql and webserver to make everything setup to run SAST / DAST tools and perform code analsys to find some unknown vuln.

So I had an idea, what if I reuse the code to download N wordpress plugins and then run some grep queries and look for some unsanitizied input?

I quickly spaghetti-coded some methods to scrape and download plugins:

import io
import zipfile

import requests
from bs4 import BeautifulSoup


# credits to tomnomnom for the idea.
def waybackurls(target, include_subs=False):
    wildcard = "*"
    if include_subs:
        url = f'http://web.archive.org/cdx/search/cdx?url={wildcard}.{target}/*&output=json&fl=original&collapse=urlkey'
    else:
        url = f'http://web.archive.org/cdx/search/cdx?url={target}/*&output=json&fl=original&collapse=urlkey'

    r = requests.get(url)
    results = r.json()
    results = [item[0] for item in results][1:]
    return results


def extract_plugin_name_from_url(target):
    separator = "/wp-content/plugins/"
    if separator in target:
        name = target.split(separator)[1].split("/")[0].strip().lower()
        return name


def build_plugin_url(plugin_name):
    url = f"https://wordpress.org/plugins/{plugin_name}/"
    return url


def check_plugin_exists(plugin_name):
    """
    check if plugin exists in wordpress plugin database
    """
    r = requests.get(plugin_name)
    if "/plugins/search/" in r.url:
        return False
    return True


def download_plugin_and_extract(zip_file_url, extraction_path):
    r = requests.get(zip_file_url)
    z = zipfile.ZipFile(io.BytesIO(r.content))
    z.extractall(extraction_path)

def get_plugin_stats(plugin_url):
    """
    bad scraping here :)
    """
    data = {}
    r = requests.get(plugin_url)
    soup = BeautifulSoup(r.text, 'html.parser')

    try:
        els = soup.find('div', {"class": ["widget plugin-meta", "widget"]}).find("ul").findAll("li")
    except:
        return data

    data["plugin_url"] = plugin_url
    for el in els:
        el = el.text.replace("\n", "")
        el = el.replace("\t", "")
        el = el.strip()
        try:
            if "Version" in el and "WordPress" not in el and "PHP" not in el:
                data["plugin_version"] = el.split("Version:")[1].strip()
            elif "Last updated:" in el:
                data["last_updated"] = el.split("Last updated:")[1].strip()
            elif "Active installations:" in el:
                data["active_installations"] = el.split("Active installations:")[1].strip()
            elif "WordPress" in el:
                data["wordpress_version"] = el.split("WordPress Version:")[1].strip()
            elif "PHP" in el:
                data["php_version"] = el.split("PHP Version:")[1].strip()
        except:
            pass

    try:
        download_url = soup.find('a', {"class": ["plugin-download", "button", "download-button", "button-large"]})["href"]
        data["download_url"] = download_url

    except:
        return data

    return data


def get_plugins_set_from_domain(domain):
    urls = waybackurls(domain)
    plugins = set()
    for u in urls:
        name = extract_plugin_name_from_url(u)
        if name:
            plugins.add(name)
    return plugins

def get_plugins_and_info_from_latest(number_of_pages=1): 
    number_of_pages = int(number_of_pages)
    plugins_url_set = set()
    for i in range(1,number_of_pages+1):
        base_url = "https://wordpress.org/plugins/browse/new/"
        if i != 1:
            base_url = "https://wordpress.org/plugins/browse/new/page/" + str(i) + "/"
    
        r = requests.get(base_url)
        soup = BeautifulSoup(r.text, 'html.parser')
        for link in soup.findAll('a'):
            if link:
                if link.has_attr('href'):
                    if "https://wordpress.org/plugins/" in link['href']:
                        plugins_url_set.add(link['href'])
        i += 1 

    result = {}
    for plugin in plugins_url_set:
        result[plugin] = get_plugin_stats(plugin)
    return result





def scrape_domain_and_get_plugins_info(domain):
    plugins = get_plugins_set_from_domain(domain)
    data = {}
    if plugins:
        for plugin_name in plugins:
            data[plugin_name] = {}
            plugin_url = build_plugin_url(plugin_name)
            results = get_plugin_stats(plugin_url)
            if results:
                data[plugin_name] = results
    return data

Then I downloaded about 100-200 plugins.

Then I run a huge grep query, looking for unsanitized $_GET parameters.

5 minutes later, I was on wpscan form submitting a vuln found on a payment processor plugin, and I got a CVE assigned:

CVE-2022-1673

https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2022-1673 (More detail can be disclosed after 25 of May>)

Categories:

Updated: