from fastai.learner import load_learner
from fastai.vision.all import *
import os
import contextlib
import warnings
import panel as pn
from huggingface_hub import push_to_hub_fastai, notebook_login
from datasets import load_dataset
# from datasets import ImageFolder
pn.extension()
warnings.filterwarnings("ignore")Compatibility Block
Check Platform
Platform & Environment Configuration
Imports
Public Imports
Private Imports
from aiking.core import aiking_settings
from aiking.data.external import *Loading Learner
We have already trained our model in previous notebook. We now quickly review steps to verify the model.
dsname = 'PandemicSafety'
model_fname = "pandemic_v2.pkl"
learn = load_learner((aiking_path('model')/model_fname)); learn<fastai.learner.Learner>
Testing model locally
uploader = pn.widgets.FileInput(); uploaderuploader.filename'Asian-man-wearing-a-mask-is-sick-pointing-finger-PNG[2].png'
im = pn.pane.Image(uploader.value); imlearn.predict(uploader.value)('No_Mask', tensor(1), tensor([0.4785, 0.5215]))
learn.predict(PILImage.create(uploader.value))('No_Mask', tensor(1), tensor([0.4785, 0.5215]))
uploader = pn.widgets.FileInput(); uploaderPILImage.create(uploader.value)
learn.predict(PILImage.create(uploader.value))('Mask', tensor(0), tensor([9.9937e-01, 6.2690e-04]))
Warning
Model is not perfect. It fails to recognize first image.( White dress, same as mask) with background removed. But correctly recognizes second image with the background.
An idea to improve the model could be to show it more examples of similar types above if we can collect it. We can also try more epochs and RandomResizedCorp and aug_transform as discussed in the lecture.
Uploading Learner to Huggingface Hub
@contextlib.contextmanager
def preserve_cwd(new_dir):
curdir= os.getcwd()
os.chdir(new_dir)
try: yield
finally: os.chdir(curdir)notebook_login()repo_name = f"Model-{dsname}"; repo_name
uname = os.getenv('HUGGINGFACE_HUB_USERNAME')with preserve_cwd(os.getenv('HUGGINGFACE_HUB_DIR')):
push_to_hub_fastai(learner=learn, repo_id=f"{uname}/{repo_name}")Uploading Dataset to Huggingface Hub
repo_name = f"Dataset-{dsname}"; repo_name
uname = os.getenv('HUGGINGFACE_HUB_USERNAME')ds_dir = aiking_path('data')/f"{dsname}"; ds_dir.exists()True
ds = load_dataset('imagefolder',data_dir=ds_dir); dsDatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 309
})
})
dsDatasetDict({
train: Dataset({
features: ['image', 'label'],
num_rows: 309
})
})
ds.push_to_hub(f"{uname}/{repo_name}", private=True)
Warning
Datasets have specific format for image files. Additionally I have not yet understood a way to maintain versions/ data cleaning steps etc… without copying the data multiple times. For now I intend to use this as a bin for saving downloaded images in parquet format