Skip to content

Commit

Permalink
Sarah Segel: Merge pull request #98 from automl/enhancement/dependenc…
Browse files Browse the repository at this point in the history
…y_versions
  • Loading branch information
Github Actions committed Dec 18, 2023
1 parent 762dffd commit 6b82447
Show file tree
Hide file tree
Showing 10 changed files with 10 additions and 16 deletions.
2 changes: 1 addition & 1 deletion development/.buildinfo
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
config: 5eac73f91509e54461eda47c063a78e4
config: 1a3e2d12d88382c2eaa1d36d686cb541
tags: 645f666f9bcd5a90fca523b33c5a78b7
Binary file modified development/.doctrees/environment.pickle
Binary file not shown.
Binary file modified development/.doctrees/examples/record/mnist_pytorch.doctree
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -63,15 +63,14 @@ def __init__(self, activation="relu", learning_rate=1e-4, dropout_rate=0.1, batc
]
)

self.accuracy = Accuracy()
self.accuracy = Accuracy(task="multiclass", num_classes=self.num_classes)

def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)

def setup(self, stage=None):

# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
Expand Down Expand Up @@ -257,10 +256,9 @@ def get_configspace(seed):

# The model weights are trained
trainer = pl.Trainer(
accelerator="gpu",
accelerator="cpu",
devices=1,
num_sanity_val_steps=0, # No validation sanity
auto_scale_batch_size="power",
deterministic=True,
min_epochs=epochs,
max_epochs=epochs,
Expand Down
Binary file not shown.

Large diffs are not rendered by default.

Binary file not shown.
8 changes: 3 additions & 5 deletions development/_sources/examples/record/mnist_pytorch.rst.txt
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ Multi-Layer Perceptron via PyTorch
This more advanced example incorporates multiple objectives, budgets and statusses to
show the strenghts of DeepCAVE's recorder.

.. GENERATED FROM PYTHON SOURCE LINES 8-288
.. GENERATED FROM PYTHON SOURCE LINES 8-286
.. code-block:: Python
Expand Down Expand Up @@ -86,15 +86,14 @@ show the strenghts of DeepCAVE's recorder.
]
)
self.accuracy = Accuracy()
self.accuracy = Accuracy(task="multiclass", num_classes=self.num_classes)
def prepare_data(self):
# download
MNIST(self.data_dir, train=True, download=True)
MNIST(self.data_dir, train=False, download=True)
def setup(self, stage=None):
# Assign train/val datasets for use in dataloaders
if stage == "fit" or stage is None:
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
Expand Down Expand Up @@ -280,10 +279,9 @@ show the strenghts of DeepCAVE's recorder.
# The model weights are trained
trainer = pl.Trainer(
accelerator="gpu",
accelerator="cpu",
devices=1,
num_sanity_val_steps=0, # No validation sanity
auto_scale_batch_size="power",
deterministic=True,
min_epochs=epochs,
max_epochs=epochs,
Expand Down
6 changes: 2 additions & 4 deletions development/examples/record/mnist_pytorch.html
Original file line number Diff line number Diff line change
Expand Up @@ -758,15 +758,14 @@
<span class="p">]</span>
<span class="p">)</span>

<span class="bp">self</span><span class="o">.</span><span class="n">accuracy</span> <span class="o">=</span> <span class="n">Accuracy</span><span class="p">()</span>
<span class="bp">self</span><span class="o">.</span><span class="n">accuracy</span> <span class="o">=</span> <span class="n">Accuracy</span><span class="p">(</span><span class="n">task</span><span class="o">=</span><span class="s2">&quot;multiclass&quot;</span><span class="p">,</span> <span class="n">num_classes</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">num_classes</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">prepare_data</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
<span class="c1"># download</span>
<span class="n">MNIST</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">data_dir</span><span class="p">,</span> <span class="n">train</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">download</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">MNIST</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">data_dir</span><span class="p">,</span> <span class="n">train</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">download</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>

<span class="k">def</span> <span class="nf">setup</span><span class="p">(</span><span class="bp">self</span><span class="p">,</span> <span class="n">stage</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>

<span class="c1"># Assign train/val datasets for use in dataloaders</span>
<span class="k">if</span> <span class="n">stage</span> <span class="o">==</span> <span class="s2">&quot;fit&quot;</span> <span class="ow">or</span> <span class="n">stage</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
<span class="n">mnist_full</span> <span class="o">=</span> <span class="n">MNIST</span><span class="p">(</span><span class="bp">self</span><span class="o">.</span><span class="n">data_dir</span><span class="p">,</span> <span class="n">train</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">transform</span><span class="o">=</span><span class="bp">self</span><span class="o">.</span><span class="n">transform</span><span class="p">)</span>
Expand Down Expand Up @@ -952,10 +951,9 @@

<span class="c1"># The model weights are trained</span>
<span class="n">trainer</span> <span class="o">=</span> <span class="n">pl</span><span class="o">.</span><span class="n">Trainer</span><span class="p">(</span>
<span class="n">accelerator</span><span class="o">=</span><span class="s2">&quot;gpu&quot;</span><span class="p">,</span>
<span class="n">accelerator</span><span class="o">=</span><span class="s2">&quot;cpu&quot;</span><span class="p">,</span>
<span class="n">devices</span><span class="o">=</span><span class="mi">1</span><span class="p">,</span>
<span class="n">num_sanity_val_steps</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="c1"># No validation sanity</span>
<span class="n">auto_scale_batch_size</span><span class="o">=</span><span class="s2">&quot;power&quot;</span><span class="p">,</span>
<span class="n">deterministic</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span>
<span class="n">min_epochs</span><span class="o">=</span><span class="n">epochs</span><span class="p">,</span>
<span class="n">max_epochs</span><span class="o">=</span><span class="n">epochs</span><span class="p">,</span>
Expand Down
2 changes: 1 addition & 1 deletion development/searchindex.js

Large diffs are not rendered by default.

0 comments on commit 6b82447

Please sign in to comment.