Skip to content
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
141 changes: 54 additions & 87 deletions app.py
Original file line number Diff line number Diff line change
@@ -1,89 +1,56 @@
import os
import re
import requests
from PIL import Image
from bs4 import BeautifulSoup
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl

# --- Declaration of range of variables ---
weight = ctrl.Antecedent(np.arange(0, 1001, 1), 'weight')
dryer = ctrl.Antecedent(np.arange(0, 101, 1), 'dryer')
time = ctrl.Consequent(np.arange(0, 241, 1), 'time')

# --- Функции принадлежности ---
weight['low'] = fuzz.trimf(weight.universe, [0, 0, 200])
weight['medium'] = fuzz.trimf(weight.universe, [150, 325, 500])
weight['high'] = fuzz.trimf(weight.universe, [400, 700, 1000])

dryer['low'] = fuzz.trimf(dryer.universe, [0, 0, 40])
dryer['medium'] = fuzz.trimf(dryer.universe, [30, 50, 70])
dryer['high'] = fuzz.trimf(dryer.universe, [60, 100, 100])

time['low'] = fuzz.trimf(time.universe, [0, 0, 60])
time['medium'] = fuzz.trimf(time.universe, [50, 85, 120])
time['high'] = fuzz.trimf(time.universe, [100, 170, 240])

#Visualisation of membership function
weight.view()
dryer.view()
time.view()


# --- Rules ---
rule1 = ctrl.Rule(weight['low'] & dryer['high'], time['low'])
rule2 = ctrl.Rule(weight['low'] & dryer['medium'], time['medium'])
rule3 = ctrl.Rule(weight['low'] & dryer['low'], time['medium'])
rule4 = ctrl.Rule(weight['medium'] & dryer['high'], time['medium'])
rule5 = ctrl.Rule(weight['medium'] & dryer['medium'], time['medium'])
rule6 = ctrl.Rule(weight['medium'] & dryer['low'], time['high'])
rule7 = ctrl.Rule(weight['high'] & dryer['high'], time['medium'])
rule8 = ctrl.Rule(weight['high'] & dryer['medium'], time['high'])
rule9 = ctrl.Rule(weight['high'] & dryer['low'], time['high'])

# --- Creating controller ---
drying_ctrl = ctrl.ControlSystem([rule1, rule2, rule3, rule4, rule5, rule6, rule7, rule8, rule9])
drying = ctrl.ControlSystemSimulation(drying_ctrl)

# --- Example 1 ---
drying.input['weight'] = 150
drying.input['dryer'] = 80
drying.compute()
print("Пример 1: время сушки =", drying.output['time'], "мин") # ожидаем короткое время

# --- Example 2 ---
drying.input['weight'] = 600
drying.input['dryer'] = 30
drying.compute()
print("Пример 2: время сушки =", drying.output['time'], "мин") # ожидаем длительное время

class ImageSpider:
def __init__(self):
self.home = os.getcwd()

def grab_all_image_links(self, URL):
try:
valid_links = []
url_protocol = URL.split('/')[0]
url_html = requests.get(URL).text
Image_urls = re.findall(r'((http\:|https\:)?\/\/[^"\' ]*?\.(png|jpg))', url_html, flags=re.IGNORECASE | re.MULTILINE | re.UNICODE)
for image in Image_urls:
image_url = image[0]
if not image_url.startswith(url_protocol):
image_url = url_protocol+image_url
valid_links.append(image_url)
else:
valid_links.append(image_url)
print('Done')
except Exception as graberror:
print('Grab occured while getting links')
print(graberror)
return []
return valid_links

@staticmethod
def extract_image_name(url):
image_name = str(url).split('/')[-1]
return image_name

@staticmethod
def extract_site_name(url):
sitename = str(url).split('/')[2]
return sitename

def saving_images(self,url):
Image_links = self.grab_all_image_links(url)
for link in Image_links:
raw_image = requests.get(link, stream=True).raw
img = Image.open(raw_image)
image_name = self.extract_image_name(link)
img.save(image_name)

def grab_all_links(self, url):
links = [url]
link_html = requests.get(url).text
all_links = BeautifulSoup(link_html, 'html.parser').findAll('a')
for link in all_links:
href = link.get('href')
if href:
if href.startswith('http') or href.startswith('https'):
links.append(href)
return links

def download_images(self):
url = input('Enter URL with images : ')
try:
sitename = self.extract_site_name(url)
print('Extracting from {} ...'.format(sitename))
os.mkdir(sitename);os.chdir(sitename)
print('\nShould we can scan entire site or just home page ?')
option = int(input('1. Entire site\n2.Just this page\nOption : '))
if option == 1:
all_avaialble_links = set(self.grab_all_links(url))
else:
all_avaialble_links = [url]
for link in all_avaialble_links:
try:
print(link)
self.saving_images(link)
except:
continue

except Exception as Error:
print('Error occured while grabing site links')
print(Error)

finally:
print('Scraping finished')
os.chdir(self.home)


spider = ImageSpider()
spider.download_images()