merve HF staff commited on
Commit
9e3ec65
Β·
1 Parent(s): f1346cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -12
app.py CHANGED
@@ -81,21 +81,21 @@ with gr.Blocks(title=title) as demo:
81
  # out = gr.Textbox(label="explaination of the loss function")
82
  gr.Markdown(
83
  """
84
- # Data Generation
85
  We generate 2 clusters one spherical and the other slightly deformed, from Standard Normal distribution
86
  For the sake of consistency inliers are assigned a ground label of 1 and outliers are assigned a label -1.
87
  The plot is a visualization of the clusters of the input dataset.
88
 
89
  """)
90
 
91
- with gr.Tab("**Plot Decision Boundary**"):
92
  # btn_decision = gr.Button(value="Plot decision boundary")
93
  # btn_decision.click(plot_decision_boundary, outputs= gr.Plot(label='Plot decision boundary') )
94
  with gr.Row():
95
  image_decision = gr.Image('./downloaded-model/decision_boundary.png')
96
  gr.Markdown(
97
  """
98
- # Plot the Discrete Decision Boundary
99
  We plot the discrete decision boundary.
100
  The background colour represents whether a sample in that given area is predicted to be an outlier or not.
101
  The scatter plot displays the true labels
@@ -107,24 +107,21 @@ with gr.Blocks(title=title) as demo:
107
  image_path = gr.Image('./downloaded-model/plot_path.png')
108
  gr.Markdown(
109
  """
110
- # Plot the path length of the decision boundary
111
- By setting the response_method="decision_function", the background of the DecisionBoundaryDisplay represents
112
  the measure of the normality of an observation.
113
 
114
- Normality of Observation = path length/(Number_of_forests_of_random trees) - Eqn.1
115
 
116
 
117
- The RHS of the above equation Eqn.1 is given by the number of splits required to isolate a given sample
118
  Such score is given by the path length averaged over a forest of random trees, which itself is given by the depth of
119
- the leaf (or equivalently the number of splits)
120
- required to isolate a given sample.
121
 
122
  When a forest of random trees collectively produces short path lengths for isolating some particular samples,
123
- they are highly likely to be anomalies and the measure of normality is close to 0.
124
  Similarly, large paths correspond to values close to 1 and are more likely to be inliers.
125
 
126
  """)
127
 
128
-
129
- gr.Markdown( f"## Success")
130
  demo.launch()
 
81
  # out = gr.Textbox(label="explaination of the loss function")
82
  gr.Markdown(
83
  """
84
+ ## Data Generation
85
  We generate 2 clusters one spherical and the other slightly deformed, from Standard Normal distribution
86
  For the sake of consistency inliers are assigned a ground label of 1 and outliers are assigned a label -1.
87
  The plot is a visualization of the clusters of the input dataset.
88
 
89
  """)
90
 
91
+ with gr.Tab("Plot Decision Boundary"):
92
  # btn_decision = gr.Button(value="Plot decision boundary")
93
  # btn_decision.click(plot_decision_boundary, outputs= gr.Plot(label='Plot decision boundary') )
94
  with gr.Row():
95
  image_decision = gr.Image('./downloaded-model/decision_boundary.png')
96
  gr.Markdown(
97
  """
98
+ ## Plot the Discrete Decision Boundary
99
  We plot the discrete decision boundary.
100
  The background colour represents whether a sample in that given area is predicted to be an outlier or not.
101
  The scatter plot displays the true labels
 
107
  image_path = gr.Image('./downloaded-model/plot_path.png')
108
  gr.Markdown(
109
  """
110
+ ## Plot the path length of the decision boundary
111
+ By setting the `response_method="decision_function"`, the background of the `DecisionBoundaryDisplay` represents
112
  the measure of the normality of an observation.
113
 
114
+ Normality of Observation = Path Length/Number of Forests of Random Trees
115
 
116
 
117
+ The RHS of the above equation is given by the number of splits required to isolate a given sample
118
  Such score is given by the path length averaged over a forest of random trees, which itself is given by the depth of
119
+ the leaf (or equivalently the number of splits) required to isolate a given sample.
 
120
 
121
  When a forest of random trees collectively produces short path lengths for isolating some particular samples,
122
+ they are more likely to have anomalies, and the measure of normality is close to 0.
123
  Similarly, large paths correspond to values close to 1 and are more likely to be inliers.
124
 
125
  """)
126
 
 
 
127
  demo.launch()