arxiv_db / papers /1711 /1711.00436.tex
taesiri's picture
Initial commit
8cf8144
raw
history blame
40.9 kB
\documentclass{article}
\usepackage{iclr2018_conference,times}
\usepackage{amsmath, amssymb}
\usepackage{hyperref}
\usepackage{url}
\usepackage{mathrsfs}
\usepackage[ruled,vlined]{algorithm2e}
\usepackage{graphicx}
\usepackage{subcaption}
\usepackage{booktabs}
\definecolor{darkblue}{rgb}{0.0, 0.0, 0.55}
\hypersetup{
colorlinks=true,
citecolor=darkblue
}
\usepackage{tikz}
\usetikzlibrary{arrows,automata}
\usepackage{verbatim}
\newcommand{\+}[1]{\boldsymbol{#1}}
\title{Hierarchical Representations for \\ Efficient Architecture Search}
\author{Hanxiao Liu\thanks{Work completed at DeepMind.} \\
Carnegie Mellon University \\
\texttt{hanxiaol@cs.cmu.edu}
\AND
Karen Simonyan, Oriol Vinyals, Chrisantha Fernando, Koray Kavukcuoglu \\
DeepMind \\
\texttt{\{simonyan,vinyals,chrisantha,korayk\}@google.com}
}
\iclrfinalcopy
\begin{document}
\maketitle
\begin{abstract}
We explore efficient neural architecture search methods and
show that a simple yet powerful evolutionary algorithm can discover new architectures with excellent performance.
Our approach combines a novel hierarchical genetic representation scheme that imitates the modularized design pattern commonly adopted by human experts,
and an expressive search space that supports complex topologies.
Our algorithm efficiently discovers architectures
that outperform a large number of manually designed models for image classification,
obtaining top-1 error of 3.6\% on CIFAR-10 and 20.3\% when transferred to ImageNet,
which is competitive with the best existing neural architecture search approaches.
We also present results using random search, achieving 0.3\% less top-1 accuracy on CIFAR-10 and 0.1\% less on ImageNet whilst reducing the search time from 36 hours down to 1 hour.
\end{abstract}
\section{Introduction}
Discovering high-performance neural network architectures required years of extensive research by human experts through trial and error.
As far as the image classification task is concerned,
state-of-the-art convolutional neural networks are going beyond deep, chain-structured layout \citep{simonyan2014very, he2016deep}
towards increasingly more complex, graph-structured topologies \citep{szegedy2015going, szegedy2016rethinking, szegedy2017inception, larsson2016fractalnet, Xie2016, huang2016densely}.
The combinatorial explosion in the design space
makes handcrafted architectures not only expensive to obtain,
but also likely to be suboptimal in performance.
Recently,
there has been a surge of interest in using algorithms to automate the manual process of architecture design.
Their goal can be described as
finding the optimal architecture in a given search space such that the validation accuracy
is maximized on the given task.
Representative architecture search algorithms can be categorized as
random with weights prediction \citep{brock2017smash}, Monte Carlo Tree Search \citep{negrinho2017deeparchitect},
evolution \citep{stanley2002evolving, xie2017genetic, miikkulainen2017evolving, real2017large},
and reinforcement learning \citep{baker2016designing, zoph2016neural, zoph2017learning, zhong2017practical},
among which reinforcement learning approaches have demonstrated the strongest empirical performance so far.
Architecture search can be computationally very intensive as each evaluation typically requires training a neural network. Therefore, it is common to restrict the search space to reduce complexity and increase efficiency of architecture search. Various constraints that have been used
include: growing a convolutional ``backbone'' with skip connections \citep{real2017large},
a linear sequence of filter banks \citep{brock2017smash},
or a directed graph where every node has exactly two predecessors \citep{zoph2017learning}.
In this work we constrain the search space by imposing a
hierarchical network structure, while allowing flexible network topologies (directed acyclic graphs) at each level of the hierarchy.
Starting from a small set of primitives such as convolutional and pooling operations at the bottom level of the hierarchy,
higher-level computation graphs, or motifs, are formed by using lower-level motifs as their building blocks.
The motifs at the top of the hierarchy are stacked multiple times to form the final neural network. This approach enables search algorithms to implement powerful hierarchical modules where any change in the motifs is propagated across the whole network immediately.
This is analogous to the modularized design patterns used in many hand-crafted architectures, e.g.\ VGGNet~\citep{simonyan2014very}, ResNet~\citep{he2016deep}, and Inception~\citep{szegedy2016rethinking} are all comprised of building blocks. In our case, a hierarchical architecture is discovered through evolutionary or random search.
The evolution of neural architectures was studied as a sub-task of \emph{neuroevolution} \citep{holland1975adaptation, miller1989designing, yao1999evolving, stanley2002evolving, floreano2008neuroevolution}, where the topology of a neural network is simultaneously evolved along with its weights and hyperparameters.
The benefits of indirect encoding schemes,
such as multi-scale representations,
have historically been discussed in \cite{Gruau94neuralnetwork, kitano1990designing, stanley2007compositional, stanley2009hypercube}.
Despite these pioneer studies,
evolutionary or random architecture search has not been investigated at larger scale on image classification benchmarks until recently \citep{real2017large, miikkulainen2017evolving, xie2017genetic, brock2017smash, negrinho2017deeparchitect}.
Our work shows that the power of simple search methods can be substantially enhanced using well-designed search spaces.
Our experimental setup resembles \cite{zoph2017learning},
where an architecture found using reinforcement learning obtained the state-of-the-art performance on ImageNet.
Our work reveals that random or evolutionary methods, which so far have been seen as less efficient, can scale and achieve competitive performance on this task if combined with a powerful architecture representation, whilst utilizing significantly less computational resources.
To summarize, our main contributions are:
\begin{enumerate}
\item We introduce hierarchical representations for describing neural network architectures.
\item We show that competitive architectures for image classification can be obtained even with simplistic random search, which demonstrates the importance of search space construction.
\item We present a
scalable variant of evolutionary search which further improves the results and achieves the best published results\footnote{at the moment of paper submission; see~\cite{real2018regularized} for a more recent study of evolutionary methods for architecture search.} among evolutionary architecture search techniques.
\end{enumerate}
\section{Architecture Representations}
\label{sec:representations}
We first describe flat representations of neural architectures (Sect.~\ref{subsec:flat}), where each architecture is represented as a single directed acyclic graph of primitive operations.
Then we move on to hierarchical representations (Sect.~\ref{subsec:hier})
where smaller graph motifs are used as building blocks to form larger motifs.
Primitive operations are discussed in Sect.~\ref{subsec:config}.
\subsection{Flat Architecture Representation}
\label{subsec:flat}
We consider a family of neural network architectures represented by a single-source, single-sink computation graph
that transforms the input at the source to the output at the sink.
Each node of the graph corresponds to a feature map, and each directed edge
is associated with some primitive operation (e.g.\ convolution, pooling, etc.) that transforms the feature map in the input node and passes it to the output node.
Formally, an architecture is defined by the representation $(G, \+o)$, consisting of two ingredients:
\begin{enumerate}
\item A set of available operations $\+o = \{o_1, o_2, \dots \}$.
\item An adjacency matrix $G$ specifying the neural network graph of operations,
where $G_{ij}=k$ means that the $k$-th operation $o_k$ is to be placed between nodes $i$ and $j$.
\end{enumerate}
The architecture is obtained by assembling operations $\+o$ according to the adjacency matrix $G$:
\begin{equation}
arch = assemble(G, \+o)
\end{equation}
in a way that the resulting neural network sequentially computes the feature map $x_i$ of each node $i$
from the feature maps $x_j$ of its predecessor nodes $j$ following the topological ordering:
\begin{equation}
x_i = merge\left[\{o_{G_{ij}}(x_j)\}_{j<i}\right], \quad i=2, \dots, |G|
\end{equation}
Here, $|G|$ is the number of nodes in a graph, and $merge$ is an operation combining multiple feature maps into one, which in
our experiments was implemented as depthwise concatenation.
An alternative option of element-wise addition is less flexible as it requires the incoming feature maps to contain the same number of channels, and is strictly subsumed by concatenation if the resulting $x_i$ is immediately followed by a $1\times 1$ convolution.
\subsection{Hierarchical Architecture Representation}
\label{subsec:hier}
\begin{figure}
\centering
\includegraphics[width=0.95\linewidth]{hier.png}
\caption{An example of a three-level hierarchical architecture representation.
The bottom row shows how level-1 primitive operations $o_1^{(1)}, o_2^{(1)}, o_3^{(1)}$ are assembled into a level-2 motif $o_1^{(2)}$.
The top row shows how level-2 motifs $o_1^{(2)}, o_2^{(2)}, o_3^{(2)}$ are then assembled into a level-3 motif $o_1^{(3)}$.
}
\label{fig:assembly}
\end{figure}
The key idea of the hierarchical architecture representation is to have several motifs at different levels of hierarchy,
where lower-level motifs are used as building blocks (operations) during the construction of higher-level motifs.
Consider a hierarchy of $L$ levels where the $\ell$-th level contains $M_\ell$ motifs.
The highest-level $\ell = L$ contains only a single motif corresponding to the full architecture,
and the lowest level $\ell = 1$ is the set of primitive operations.
We recursively define $o_m^{(\ell)}$,
the $m$-th motif in level $\ell$,
as the composition of lower-level motifs $\+o^{(\ell-1)} = \big\{o^{(\ell-1)}_1, o^{(\ell-1)}_2, ..., o^{(\ell-1)}_{M_{(\ell-1)}}\big\}$ according to
its network structure $G_m^{(\ell)}$:
\begin{align}
o_m^{(\ell)} &= assemble\left(G_m^{(\ell)}, \+o^{(\ell-1)} \right),
\quad \forall \ell = 2,\dots,L
\end{align}
A hierarchical architecture representation is therefore defined by
$\left(\big\{ \{G^{(\ell)}_m\}^{M_\ell}_{m=1} \big\}_{\ell=2}^L, \+o^{(1)}\right)$,
as it is determined by network structures of motifs at all levels and the set of bottom-level primitives.
The assembly process is illustrated in Fig.~\ref{fig:assembly}.
\subsection{Primitive Operations}
\label{subsec:config}
We consider the following six primitives at the bottom level of the hierarchy ($\ell=1, M_\ell=6$):
\begin{itemize} \item $1 \times 1$ convolution of $C$ channels
\item $3 \times 3$ depthwise convolution
\item $3 \times 3$ separable convolution of $C$ channels
\item $3 \times 3$ max-pooling
\item $3 \times 3$ average-pooling
\item identity
\end{itemize}
If applicable, all primitives are of stride one and the convolved feature maps are padded to preserve their spatial resolution.
All convolutional operations are followed by batch normalization and ReLU activation \citep{ioffe2015batch};
their number of channels is fixed to a constant $C$.
We note that convolutions with larger receptive fields and more channels can be expressed as motifs of such primitives.
Indeed, large receptive fields can be obtained by stacking $3 \times 3$ convolutions in a chain structure \citep{simonyan2014very},
and wider convolutions with more channels can be obtained by merging the outputs of multiple convolutions through depthwise concatenation.
We also introduce a special $none$ op, which indicates that there is no edge between nodes $i$ and $j$.
It is added to the pool of operations at each level.
\section{Evolutionary Architecture Search}
\label{sec:evolution}
Evolutionary search over neural network architectures can be performed by treating the representations of Sect.~\ref{sec:representations} as genotypes.
We first introduce an action space for mutating hierarchical genotypes (Sect.~\ref{subsec:mutation}), as well as a diversification-based scheme to obtain the initial population (Sect.~\ref{subsec:initialization}).
We then describe tournament selection and random search in Sect.~\ref{subsec:tournament},
and our distributed implementation in Sect.~\ref{subsec:implementation}.
\subsection{Mutation}
\label{subsec:mutation}
A single mutation of a hierarchical genotype consists of the following sequence of actions:
\begin{enumerate}
\item Sample a target non-primitive level $\ell \ge 2$.
\item Sample a target motif $m$ in the target level.
\item Sample a random successor node $i$ in the target motif.
\item Sample a random predecessor node $j$ in the target motif.
\item Replace the current operation $o^{(\ell-1)}_k$ between $j$ and $i$ with a randomly sampled operation $o^{(\ell-1)}_{k'}$.
\end{enumerate}
In the case of flat genotypes which consist of two levels (one of which is the fixed level of primitives), the first step is omitted and $\ell$ is set to $2$. The mutation can be summarized as:
\begin{equation}
[G^{(\ell)}_m]_{ij} = k'
\end{equation}
where $\ell, m, i, j, k'$ are randomly sampled from uniform distributions over their respective domains.
Notably, the above mutation process is powerful enough to perform various modifications on the target motif, such as:
\begin{enumerate}
\item \textbf{Add a new edge}: if $o_k^{(\ell-1)} = none$ and $o_{k'}^{(\ell-1)} \not= none$.
\item \textbf{Alter an existing edge}: if $o_k^{(\ell-1)} \not= none$ and $o_{k'}^{(\ell-1)} \not= none$ and $o_{k'}^{(\ell-1)} \not= o_k^{(\ell-1)}$.
\item \textbf{Remove an existing edge}: if $o_k^{(\ell-1)} \not= none$ and if $o_{k'}^{(\ell-1)} = none$.
\end{enumerate}
\subsection{Initialization}
\label{subsec:initialization}
To initialize the population of genotypes, we use the following strategy:
\begin{enumerate}
\item Create a ``trivial'' genotype where each motif is set to a chain of identity mappings.
\item Diversify the genotype by applying a large number (e.g.\ $1000$) of random mutations.
\end{enumerate}
In contrast to several previous works
where genotypes are initialized by trivial networks \citep{stanley2002evolving, real2017large},
the above diversification-based scheme not only offers a good initial coverage of the search space with non-trivial architectures,
but also helps to avoid an additional bias introduced by handcrafted initialization routines.
In fact, this strategy ensures initial architectures are reasonably well-performing even without any search,
as suggested by our random sample results in Table \ref{table:cifar_search}.
\subsection{Search Algorithms}
\label{subsec:tournament}
Our evolutionary search algorithm is based on tournament selection \citep{goldberg1991comparative}. Starting from an initial population of random genotypes, tournament selection provides a mechanism to pick promising genotypes from the population, and to place its mutated offspring back into the population. By repeating this process, the quality of the population keeps being refined over time.
We always train a model from scratch for a fixed number of iterations,
and we refer to the training and evaluation of a single model
as an evolution step.
The genotype with the highest fitness (validation accuracy) among the entire population is selected as the final output after a fixed amount of time.
A tournament is formed by a random set of genotypes sampled from the current effective population,
among which the individual with the highest fitness value wins the tournament. The selection pressure is controlled by the tournament size, which is set to $5\%$ of the population size in our case. We do not remove any genotypes from the population, allowing it to grow with time, maintaining architecture diversity. Our evolution algorithm is similar to the binary tournament selection used in a recent large-scale evolutionary method \citep{real2017large}.
We also investigated random search, a simpler strategy which has not been sufficiently explored in the literature, as an alternative to evolution. In this case, a population of genotypes is generated randomly, the fitness is computed for each genotype in the same way as done in evolution, and the genotype with the highest fitness is selected as the final output. The main advantage of this method is that it can be run in parallel over the entire population, substantially reducing the search time.
\subsection{Implementation}
\label{subsec:implementation}
\begin{algorithm}[t]
\DontPrintSemicolon
\KwIn{Data queue $\mathcal{Q}$ containing initial genotypes; Memory table $\mathcal{M}$ recording evaluated genotypes and their fitness.
}
\While{True}{
\If{$\textsc{hasIdleWorker}()$}{
$genotype \gets \textsc{asyncTournamentSelect}(\mathcal{M})$\;
$genotype' \gets \textsc{Mutate}(genotype)$\;
$\mathcal{Q} \gets \mathcal{Q} \cup genotype'$\;
}
}
\caption{{\sc AsyncEvo} Asynchronous Evolution (Controller)}
\label{algo:async-controller}
\end{algorithm}
\begin{algorithm}[t]
\DontPrintSemicolon
\KwIn{Training set $\mathcal{T}$, validation set $\mathcal{V}$; Shared memory table $\mathcal{M}$
and data queue $\mathcal{Q}$.}
\While{True}{
\If{$|\mathcal{Q}| > 0$}{
$genotype \gets \mathcal{Q}$.pop()\;
$arch \gets \textsc{Assemble}$($genotype$)\;
$model \gets \textsc{Train}$($arch, \mathcal{T}$)\;
$fitness \gets \textsc{Evaluate}$($model, \mathcal{V}$)\;
$\mathcal{M} \gets \mathcal{M} \cup (genotype, fitness)$\;
}
}
\caption{{\sc AsyncEvo} Asynchronous Evolution (Worker)}
\label{algo:async-worker}
\end{algorithm}
Our distributed implementation is asynchronous,
consisting of
a single controller responsible for performing evolution over the genotypes,
and a set of workers responsible for their evaluation.
Both parties have access to a
shared tabular memory $\mathcal{M}$ recording the
population of genotypes and their fitness,
as well as a data queue $\mathcal{Q}$ containing the genotypes with unknown fitness which should be evaluated.
Specifically, the controller will perform tournament selection of a genotype from $\mathcal{M}$
whenever a worker becomes available, followed by
the mutation of the selected genotype and its insertion into $\mathcal{Q}$ for fitness evaluation (Algorithm \ref{algo:async-controller}).
A worker will pick up an unevaluated genotype from $\mathcal{Q}$
whenever there is one available,
assemble it into an architecture, carry out training and validation,
and then record the validation accuracy (fitness) in $\mathcal{M}$ (Algorithm \ref{algo:async-worker}).
Architectures are trained from scratch for a fixed number of steps with random weight initialization.
We do not rely on weight inheritance as in~\citep{real2017large},
though incorporating it into our system is possible.
Note that during architecture evolution no synchronization is required, and all workers are fully occupied.
\section{Experiments and Results}
\subsection{Experimental Setup}
In our experiments, we use the proposed search framework to learn the architecture of a convolutional cell, rather than the entire model. The reason is that we would like to be able to quickly compute the fitness of the candidate architecture and then transfer it to a larger model, which is achieved by using less cells for fitness computation and more cells for full model evaluation. A similar approach has recently been used in~\citep{zoph2017learning, zhong2017practical}.
Architecture search is carried out entirely on the CIFAR-10 training set, which we split into two sub-sets of 40K training and 10K validation images.
Candidate models are trained on the training subset, and evaluated on the validation subset to obtain the fitness. Once the search process is over, the selected cell is plugged into a large model which is trained on the combination of training and validation sub-sets, and the accuracy is reported on the CIFAR-10 test set.
We note that the test set is never used for model selection, and it is only used for final model evaluation. We also evaluate the cells, learned on CIFAR-10, in a large-scale setting on the ImageNet challenge dataset (Sect.~\ref{subsec:imagenet}).
\begin{figure}[htp]
\centering
\includegraphics[width=\linewidth]{architectures}
\caption{
Image classification models constructed using the cells optimized with architecture search.
\emph{Top-left:} small model used during architecture search on CIFAR-10.
\emph{Top-right:} large CIFAR-10 model used for learned cell evaluation.
\emph{Bottom:} ImageNet model used for learned cell evaluation.
}
\label{fig:cifar_imnet_models}
\end{figure}
For CIFAR-10 experiments we use a model which consists of $3\times 3$ convolution with $c_0$ channels, followed by $3$ groups of learned convolutional cells, each group containing $N$ cells. After each cell (with $c$ input channels) we insert $3\times 3$ separable convolution which has stride $2$ and $2c$ channels if it is the last cell of the group, and stride $1$ and $c$ channels otherwise. The purpose of these convolutions is to control the number of channels as well as reduce the spatial resolution. The last cell is followed by global average pooling and a linear softmax layer.
For fitness computation we use a smaller model with $c_0=16$ and $N=1$, shown in Fig.~\ref{fig:cifar_imnet_models} (top-left). It is trained using SGD with $0.9$ momentum for $5000$ steps, starting with the learning rate $0.1$, which is reduced by $10$x after $4000$ and $4500$ steps. The batch size is 256, and the weight decay value is $3\cdot10^{-4}$. We employ standard training data augmentation where a $24 \times 24$ crop is randomly sampled from a $32 \times 32$ image, followed by random horizontal flipping. The evaluation is performed on the full size $32 \times 32$ image.
\textbf{A note on variance.}\; We found that the variance due to optimization was non-negligible, and we believe that reporting it is important for performing a fair comparison and assessing model capabilities. When training CIFAR models, we have observed standard deviation of up to 0.2\% using the exact same setup. The solution we adopted was to compute the fitness as the average accuracy over $4$ training-evaluation runs.
For the evaluation of the learned cell architecture on CIFAR-10, we use a larger model with $c_0=64$ and $N=2$, shown in Fig.~\ref{fig:cifar_imnet_models} (top-right). The larger model is trained for $80$K steps, starting with a learning rate $0.1$, which is reduced by $10$x after $40$K, $60$K, and $70$K steps. The rest of the training settings are the same as used for fitness computation. We report mean and standard deviation computed over $5$ training-evaluation runs.
For the evaluation on the ILSVRC ImageNet challenge dataset~\citep{russakovsky2015imagenet}, we use an architecture similar to the one used for CIFAR, with the following changes.
An input $299 \times 299$ image is passed through two convolutional layers with $32$ and $64$ channels and stride $2$ each. It is followed by $4$ groups of convolutional cells where the first group contains a single cell (and has $c_0=64$ input channels), and the remaining three groups have $N=2$ cells each (Fig.~\ref{fig:cifar_imnet_models}, bottom).
We use SGD with momentum which is run for $200$K steps, starting with a learning rate of $0.1$, which is reduced by $10$x after $100$K, $150$K, and $175$K steps.
The batch size is $1024$, and weight decay is $10^{-4}$. We did not use auxiliary losses, weight averaging, label smoothing or path dropout empirically found effective in~\citep{zoph2017learning}. The training augmentation is the same as in~\citep{szegedy2016rethinking}, and consists in random crops, horizontal flips and brightness and contrast changes. We report the single-crop top-1 and top-5 error on the ILSVRC validation set.
\subsection{Architecture search on CIFAR-10}
\label{subsec:cifar10}
\begin{figure}[htp]
\centering
\includegraphics[width=\linewidth]{fitness}
\caption{
Fitness and number of parameters vs evolution step for flat and hierarchical representations.
\emph{Left:} fitness of a genotype generated at each evolution step.
\emph{Middle:} maximum fitness across all genotypes generated before each evolution step.
\emph{Right:} number of parameters in the small CIFAR-10 model constructed using the genotype generated at each evolution step.
}
\label{fig:fitness_evo}
\end{figure}
We run the evolution on flat and hierarchical genotypes for $7000$ steps using $200$ GPU workers.
The initial size of the randomly initialized population is $200$, which later grows as a result of tournament selection and mutation (Sect.~\ref{sec:evolution}).
For the hierarchical representation, we use three levels ($L=3$), with $M_1=6, M_2=6, M_3=1$. Each of the level-2 motifs is a graph with $|G^{(2)}|=4$ nodes, and the level-3 motif is a graph with $|G^{(3)}|=5$ nodes. Each level-2 motif is followed by a $1\times 1$ convolution with the same number of channels as on the motif input to reduce the number of parameters. For the flat representation, we used a graph with $11$ nodes to achieve a comparable number of edges.
The evolution process is visualized in Fig.~\ref{fig:fitness_evo}. The left plot shows the fitness of the genotype generated at each step of evolution: the fitness grows fast initially, and plateaus over time. The middle plot shows the best fitness observed by each evolution step.
Since the first $200$ steps correspond to a random initialization and mutation starts after that, the best architecture found at step $200$ corresponds to the output of random search over $200$ architectures.
Fig.~\ref{fig:fitness_evo} (right) shows the number of parameters in the small network (used for fitness computation), constructed using the genotype produced at each step.
Notably, flat genotypes achieve higher fitness, but at the cost of larger parameter count. We thus also consider a parameter-constrained variant of the flat genotype, where only the genotypes with the number of parameters under a fixed threshold are permitted;
the threshold is chosen so that the flat genotype has a similar number of parameters to the hierarchical one. In this setting hierarchical and flat genotypes achieve similar fitness.
To demonstrate that improvement in fitness of the hierarchical architecture is correlated with the improvement in the accuracy of the corresponding large model trained till convergence, we plot the relative accuracy improvements in Fig.~\ref{fig:fitness_vs_full_acc}.
\begin{figure}[htp]
\centering
\includegraphics[width=.5\linewidth]{fitness_vs_full_acc}
\caption{Accuracy improvement over the course of evolution, measured with respect to the first random genotype.
The small model is the model used for fitness computation during evolution (its absolute fitness value is shown with the red curve in Fig.~\ref{fig:fitness_evo} (middle)).
The large model is the model where the evolved cell architecture is deployed for training and evaluation.}
\label{fig:fitness_vs_full_acc}
\end{figure}
As far as the architecture search time is concerned, it takes 1 hour to compute the fitness of one architecture on a single P100 GPU (which involves $4$ rounds of training and evaluation). Using 200 GPUs, it thus takes 1 hour to perform random search over $200$ architectures and 1.5 days to do the evolutionary search with 7000 steps. This is significantly faster than 11 days using 250 GPUs reported by~\citep{real2017large} and 4 days using 450 GPUs reported by~\citep{zoph2017learning}.
\begin{table}[ht]
\begin{center}
\resizebox{\columnwidth}{!}{
\begin{tabular}{lcc}
\toprule
\textbf{Search Method} &
\textbf{CIFAR-10 error (\%)} &
\begin{tabular}{c}
\textbf{ImageNet} \\
\textbf{Top-1/Top-5 error (\%)}
\end{tabular} \\
\midrule
Flat repr-n, random architecture & $4.56 \pm 0.11$ & $21.4/5.8$ \\
Flat repr-n, random search (200 samples) & $4.02 \pm 0.11$ & $20.8/5.7$ \\
Flat repr-n, evolution (7000 samples) & $3.92 \pm 0.06$ & $20.6/5.6$ \\
Flat repr-n, parameter-constrained, evolution (7000 samples) & $4.17 \pm 0.08$ & $21.2 / 5.8$ \\
\midrule
Hier. repr-n, random architecture & $4.21 \pm 0.11$ & $21.5/5.8$ \\
Hier. repr-n, random search (200 samples) & $4.04 \pm 0.2$ & $20.4/5.3$ \\
Hier. repr-n, random search (7000 samples) & $3.91 \pm 0.15$ & $21.0 / 5.5$ \\
Hier. repr-n, evolution (7000 samples) & $\boldsymbol{3.75 \pm 0.12}$ & $\boldsymbol{20.3/5.2}$ \\
\bottomrule
\end{tabular}
}
\end{center}
\caption{Classification results on the CIFAR-10 test set and ILSVRC validation set obtained using the architectures found using various representations and search methods.}
\label{table:cifar_search}
\end{table}
\subsection{Architecture Evaluation on CIFAR-10 and ImageNet}
\label{subsec:imagenet}
We now turn to the evaluation of architectures found using random and evolutionary search on CIFAR-10 and ImageNet. The results are presented in Table~\ref{table:cifar_search}.
First, we note that randomly sampled architectures already perform surprisingly well, which we attribute to the representation power of our architecture spaces. Second, random search over $200$ architectures achieves very competitive results on both CIFAR-10 and ImageNet, which is remarkable considering it took 1 hour to carry out. This demonstrates that well-constructed architecture representations, coupled with diversified sampling and simple search form a simple but strong baseline for architecture search.
Our best results are achieved using evolution over hierarchical representations: $3.75\% \pm 0.12\%$ classification error on the CIFAR-10 test set (using $c_0=64$ channels), which is further improved to $3.63\% \pm 0.10\%$ with more channels ($c_0=128$). On the ImageNet validation set, we achieve $20.3\%$ top-1 classification error and $5.2\%$ top-5 error. We put these results in the context of the state of the art in Tables~\ref{table:cifar_sota} and~\ref{table:imnet_sota}. We achieve the best published results on CIFAR-10 using evolutionary architecture search, and also demonstrate competitive performance compared to the best published methods on both CIFAR-10 and ImageNet. Our ImageNet model has 64M parameters, which is comparable to Inception-ResNet-v2 (55.8M) but larger than NASNet-A (22.6M).
\begin{table}[ht]
\begin{center}
\small
\begin{tabular}{lc}
\toprule
\textbf{Model} & \textbf{Error (\%)} \\
\midrule
ResNet-1001 + pre-activation \citep{he2016identity} & $4.62$ \\
Wide ResNet-40-10 + dropout \citep{zagoruyko2016wide} & $3.8$ \\
DenseNet (k=24) \citep{huang2016densely} & $3.74$ \\
DenseNet-BC (k=40) \citep{huang2016densely} & $3.46$ \\
\midrule
MetaQNN \citep{baker2016designing} & $6.92$ \\
NAS v3 \citep{zoph2016neural} & $3.65$ \\
Block-QNN-A \citep{zhong2017practical} & $3.60$ \\
NASNet-A \citep{zoph2017learning} & $3.41$ \\
\midrule
Evolving DNN \citep{miikkulainen2017evolving} & $7.3$ \\
Genetic CNN \citep{xie2017genetic} & $7.10$ \\
Large-scale Evolution \citep{real2017large} & $5.4$ \\
SMASH \citep{brock2017smash} & $4.03$ \\
\midrule
Evolutionary search, hier. repr., $c_0=64$ & $3.75 \pm 0.12$ \\
Evolutionary search, hier. repr., $c_0=128$ & $3.63 \pm 0.10$ \\
\bottomrule
\end{tabular}
\end{center}
\caption{Classification error on the CIFAR-10 test set obtained using state-of-the-art models as well as the best-performing architecture found using the proposed architecture search framework. Existing models are grouped as (from top to bottom): handcrafted architectures, architectures found using reinforcement learning, and architectures found using random or evolutionary search.}
\label{table:cifar_sota}
\end{table}
\begin{table}[ht]
\begin{center}
\small
\begin{tabular}{lcc}
\toprule
\textbf{Model} & \textbf{Top-1 error (\%)} & \textbf{Top-5 error (\%)} \\
\midrule
Inception-v3~\citep{szegedy2016rethinking} & $21.2$ & $5.6$ \\
Xception~\citep{chollet2016xception} & $21.0$ & $5.5$ \\
Inception-ResNet-v2~\citep{szegedy2017inception} & $19.9$ & $4.9$ \\
NASNet-A~\citep{zoph2017learning} & $19.2$ & $4.7$ \\
\midrule
Evolutionary search, hier. repr., $c_0=64$ & $20.3$ & $5.2$ \\
\bottomrule
\end{tabular}
\end{center}
\caption{Classification error on the ImageNet validation set obtained using state-of-the-art models as well as the best-performing architecture found using our framework.
}
\label{table:imnet_sota}
\end{table}
The evolved hierarchical cell is visualized in Appendix~\ref{sec:arch_vis},
which shows that architecture search have discovered a number of skip connections. For example, the cell contains a direct skip connection between input and output: nodes 1 and 5 are connected by Motif 4, which in turn contains a direct connection between input and output. The cell also contains several internal skip connections, through Motif 5 (which again comes with an input-to-output skip connection similar to Motif 4).
\section{Conclusion}
We have presented an efficient evolutionary method that identifies high-performing neural architectures based on a novel hierarchical representation scheme, where smaller operations are used as the building blocks to form the larger ones.
Notably, we show that strong results can be obtained even using simplistic search algorithms,
such as evolution or random search, when coupled with a well-designed architecture representation.
Our best architecture yields the state-of-the-art result on CIFAR-10 among evolutionary methods and successfully scales to ImageNet with highly competitive performance.
\section*{Acknowledgements}
The authors thank Jacob Menick, Pushmeet Kohli, Yujia Li, Simon Osindero, and many other colleagues at DeepMind for helpful comments and discussions.
\bibliography{iclr2018_conference}
\bibliographystyle{iclr2018_conference}
\appendix
\newpage
\section{Architecture Visualization}
\label{sec:arch_vis}
Visualization of the learned cell and motifs of our best-performing hierarchical architecture. Note that only motifs 1,3,4,5 are used to construct the cell, among which motifs 3 and 5 are dominating.
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3cm,
semithick]
\tikzstyle{every state}=[fill=gray!25,draw=none,text=black]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\node[state] (E) [right of=D] {$5$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {\textcolor{magenta}{Motif 5}} (B)
edge [bend right] node {\textcolor{brown}{Motif 3}} (C)
edge [bend left] node {\textcolor{brown}{Motif 3}} (D)
edge [bend left] node {\textcolor{orange}{Motif 4}} (E)
(B)
edge node {\textcolor{brown}{Motif 3}} (C)
edge [bend left] node {\textcolor{blue}{Motif 1}} (D)
edge [bend right] node {\textcolor{magenta}{Motif 5}} (E)
(C) edge node {\textcolor{brown}{Motif 3}} (D)
edge [bend right] node {\textcolor{magenta}{Motif 5}} (E)
(D) edge node {\textcolor{magenta}{Motif 5}} (E);
\end{tikzpicture}
\caption{Cell}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm,
semithick]
\tikzstyle{every state}=[fill=blue!50,draw=none,text=white]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {max-pooling} (B)
edge [bend right] node {$1 \times 1$} (C)
edge [bend left] node {$3 \times 3$ separable} (D)
(B)
edge node {$3 \times 3$ depthwise} (C)
edge [bend left] node {$1 \times 1$} (D)
(C) edge node {max-pooling} (D);
\end{tikzpicture}
\caption{Motif 1}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm,
semithick]
\tikzstyle{every state}=[fill=red!60,draw=none,text=white]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {max-pooling} (B)
edge [bend right] node {$3 \times 3$ depthwise} (C)
edge [bend left] node {$1 \times 1$} (D)
(B)
edge node {$3 \times 3$ depthwise} (C)
edge [bend right] node {$3 \times 3$ separable} (D)
(C) edge node {$3 \times 3$ separable} (D);
\end{tikzpicture}
\caption{Motif 2}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm,
semithick]
\tikzstyle{every state}=[fill=brown!60,draw=none,text=white]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {$3 \times 3$ separable} (B)
edge [bend left] node {$3 \times 3$ depthwise} (C)
edge [bend left] node {$3 \times 3$ separable} (D)
(B)
edge node {$3 \times 3$ separable} (C)
edge [bend right] node {} (D)
(C) edge node {avg-pooling} (D);
\end{tikzpicture}
\caption{Motif 3}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm,
semithick]
\tikzstyle{every state}=[fill=orange!70,draw=none,text=white]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {avg-pooling} (B)
edge [bend right] node {$3 \times 3$ separable} (C)
edge [bend left] node {} (D)
(B)
edge node {$3 \times 3$ separable} (C)
edge [bend right] node {max-pooling} (D)
(C) edge node {avg-pooling} (D);
\end{tikzpicture}
\caption{Motif 4}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=4.5cm,
semithick]
\tikzstyle{every state}=[fill=magenta!50,draw=none,text=white]
\node[state] (A) {$1 (2)$};
\node[state] (C) [right of=A] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A)
edge [bend right] node {$3 \times 3$ separable} (C)
edge [bend right] node {$3 \times 3$ separable} (D)
edge [bend left] node {} (C)
edge [bend left] node {} (D)
(C) edge node {$3 \times 3$ separable} (D);
\end{tikzpicture}
\caption{Motif 5}
\end{figure}
\begin{figure}[ht]
\centering
\begin{tikzpicture}[->,>=stealth',shorten >=1pt,auto,node distance=3.5cm,
semithick]
\tikzstyle{every state}=[fill=cyan!80,draw=none,text=white]
\node[state] (A) {$1$};
\node[state] (B) [right of=A] {$2$};
\node[state] (C) [right of=B] {$3$};
\node[state] (D) [right of=C] {$4$};
\path[every node/.style={sloped,anchor=south,auto=false,xshift=0.0cm}]
(A) edge node {$3 \times 3$ depthwise} (B)
edge [bend right] node {max-pooling} (C)
edge [bend left] node {$3 \times 3$ separable} (D)
(B)
edge node {max-pooling} (C)
edge [bend left] node {} (D)
(C) edge node {avg-pooling} (D);
\end{tikzpicture}
\caption{Motif 6}
\end{figure}
\end{document}