import pandas as pd
import plotly.express as px
from fastcore.all import *
import streamlit as st
Compatibility Block
Check Platform
Platform & Environment Configuration
Imports
="Which Image Model is best?",layout="wide") st.set_page_config(page_title
from streamlit_jupyter import StreamlitPatcher, tqdm
def get_results(result_option = 'original'):
= "" if result_option == 'original' else "-real"
suffix = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/results-imagenet{suffix}.csv"
url_results = pd.read_csv(url_results); df_results.head()
df_results 'model_org'] = df_results['model']
df_results['model'] = df_results['model'].str.split('.').str[0]
df_results[return df_results
# url_results = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/results-imagenet.csv"
# url_results = f"https://raw.githubusercontent.com/huggingface/pytorch-image-models/main/results/results-imagenet.csv"
# pd.read_csv(url_results)
def get_integrated_data(activity_option, result_option):
= get_results(result_option)
df_results = f"https://github.com/huggingface/pytorch-image-models/raw/main/results/benchmark-{activity_option}-amp-nhwc-pt112-cu113-rtx3090.csv"
url_benchmark = pd.read_csv(url_benchmark)
df_benchmark = df_results.merge(df_benchmark, on='model')
df_integrated 'is_tensorflow_model'] = df_integrated.model.str.split('_').str[0] =='tf'
df_integrated['family'] = df_integrated.model.str.removeprefix("tf_").str.removeprefix("legacy_").str.removeprefix("nf_").str.removeprefix("nf_").str.extract('^([a-z]+?(?:v2|v3)?)(?:\d|_|$)')[0].values
df_integrated[str.contains('in22'), 'family'] = df_integrated.loc[df_integrated.model.str.contains('in22'), 'family'] + "_in22"
df_integrated.loc[df_integrated.model.str.contains('resnet.*d'), 'family'] = df_integrated.loc[df_integrated.model.str.contains('resnet.*d'), 'family'] + "d"
df_integrated.loc[df_integrated.model.return df_integrated[~df_integrated.model.str.endswith('gn')] # Group norm models. Why Jeremy eliminated them from analysis?
# df_integrated= get_integrated_data('train', 'original')
# models = L(df_integrated.model.values.tolist()); models
# families = L(df_integrated.family.values.tolist()); families.unique()
# df_integrated[df_integrated.model.str.endswith('gn')] # Group norm models. Why Jeremy eliminated them from analysis?
# df_integrated[df_integrated.model.str.contains('rs')]
# df_integrated.model.str.lstrip("tf_").str.extract('^([a-z]+?(?:v2|v3)?)(?:\d|_|$0)')[0].values
# # "cspresnet50".removeprefix("legacy_")
# df_integrated= get_integrated_data('train', 'original')
# df_integrated
@st.cache_data
def get_filtered_data(df_integrated, subs, is_fullmatch=False, drop_tf=True):
if drop_tf: df_integrated = df_integrated[~df_integrated.is_tensorflow_model]
if not subs: return df_integrated
elif is_fullmatch: return df_integrated[df_integrated.family.str.fullmatch(subs)]
else: return df_integrated[df_integrated.model.str.contains(subs)]
def get_data(col_option, activity_option, result_option, subs, is_fullmatch=False, drop_tf=True):
= "_".join([activity_option, col_option])
col = get_integrated_data(activity_option, result_option)
df_integrated = get_filtered_data(df_integrated, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df_integrated 'secs'] =1./df_integrated[col]
df_integrated[return df_integrated
def plot_selection(df, title, col_option, activity_option, w=1000, h=800):
= "_".join([activity_option, col_option])
size_col return px.scatter(df, width=w, height=h, size=df[size_col]**2,trendline="ols", trendline_options={'log_x':True},
=title, x="secs",log_x=True, y='top1', log_y=True,
title="family", hover_name='model_org',
color=[size_col]) hover_data
= ['original', 'real'] #result = 'real'
result_options = ['train', 'infer']
activity_options = ['samples_per_sec', 'step_time', 'batch_size', 'img_size', 'gmacs', 'macts']
col_options = '^re[sg]netd?|beit|convnext|levit|efficient|vit|vgg|swin'
subs = False
is_fullmatch = False
drop_tf = 'levit|resnetd?|regnetx|vgg|convnext.*|efficientnetv2|beit|swin'
subs = True
is_fullmatch = result_options[0]
result_option = activity_options[1]
activity_option = col_options[0]
col_option = col_options[3]
size_col_option = dict(zip(activity_options, ['Training', "Inference"]))
title_dict = get_data(col_option, activity_option, result_option, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df = plot_selection(df, title_dict[activity_option], size_col_option, activity_option) fig
from IPython.display import HTML
HTML(fig.to_html())
Streamlit App
= StreamlitPatcher()
sp sp.jupyter()
"Which Image Model is best?")
st.title(= st.columns([1,3])
col1, col2 with col1:
"Settings")
st.header(= st.selectbox("Please choose dataset", result_options)
result_option = st.selectbox("Please choose activity", activity_options)
activity_option = st.text_input("Subs", value='levit|resnetd?|regnetx|vgg|convnext.*|efficientnetv2|beit|swin')
subs = st.checkbox("Is fullmatch", value=True)
is_fullmatch = st.checkbox("Drop Tensorflow Models", value=False)
drop_tf = st.selectbox("Please choose col_option", col_options)
col_option = st.selectbox("Please choose sizing col_option", col_options, index=3)
size_col_option with col2:
= dict(zip(activity_options, ['Training', "Inference"]))
title_dict = get_data(col_option, activity_option, result_option, subs, is_fullmatch=is_fullmatch, drop_tf=drop_tf)
df = plot_selection(df, None, size_col_option, activity_option, h=500, w=1000)
fig # Plot!
st.header(title_dict[activity_option])=True, height=500) st.plotly_chart(fig, use_container_width
Huggingface
huggingface_hub[fastai]
streamlit
plotly
pandas statsmodels
import nbdev; nbdev.export.nb_export("03a_image_archs.ipynb", lib_path=".")
from huggingface_hub import create_repo, HfApi, notebook_login
notebook_login()
= "rahuketu86/Space-ImageArchs"
space_repo ='space', space_sdk='streamlit', exist_ok=True) create_repo(space_repo, repo_type
=['requirements.txt', 'app.py']; files_to_upload files_to_upload
= HfApi() api
for fname in files_to_upload:
=fname, path_in_repo=fname, repo_id=space_repo, repo_type='space') api.upload_file(path_or_fileobj
map(lambda e: Path(e).unlink()) L(files_to_upload).