Upload papers/2401/2401.13947.tex with huggingface_hub
Browse files- papers/2401/2401.13947.tex +876 -0
papers/2401/2401.13947.tex
ADDED
@@ -0,0 +1,876 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
\documentclass[journal]{IEEEtran}
|
2 |
+
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
|
10 |
+
|
11 |
+
|
12 |
+
|
13 |
+
|
14 |
+
|
15 |
+
|
16 |
+
|
17 |
+
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
|
25 |
+
|
26 |
+
|
27 |
+
|
28 |
+
|
29 |
+
|
30 |
+
|
31 |
+
|
32 |
+
|
33 |
+
|
34 |
+
\ifCLASSINFOpdf
|
35 |
+
\else
|
36 |
+
\fi
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
|
41 |
+
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
\usepackage{amssymb}
|
46 |
+
\usepackage{cite}
|
47 |
+
\usepackage{tabularx,ragged2e,booktabs,caption}
|
48 |
+
\usepackage{amsmath,amssymb,amsfonts}
|
49 |
+
\usepackage{algorithmic}
|
50 |
+
\usepackage{graphicx}
|
51 |
+
\usepackage{textcomp}
|
52 |
+
\usepackage{xcolor,soul}
|
53 |
+
\usepackage[english]{babel}
|
54 |
+
\usepackage{amsthm}
|
55 |
+
\usepackage[english]{babel}
|
56 |
+
\usepackage{verbatim}
|
57 |
+
\usepackage{comment}
|
58 |
+
\newtheorem{theorem}{Theorem}[section]
|
59 |
+
\newtheorem{corollary}{Corollary}[section]
|
60 |
+
\newtheorem{lemma}{Lemma}[section]
|
61 |
+
\newtheorem{assumption}{Assumption}
|
62 |
+
\newtheorem{proposition}{Proposition}[section]
|
63 |
+
\newtheorem{definition}{Definition}[section]
|
64 |
+
\DeclareMathOperator{\E}{\mathbb{E}}
|
65 |
+
\DeclareMathOperator{\I}{\mathbb{I}}
|
66 |
+
\DeclareMathOperator{\R}{\mathbb{R}}
|
67 |
+
\newcommand{\eps}{\varepsilon}
|
68 |
+
\newcommand{\kron}{\otimes}
|
69 |
+
|
70 |
+
|
71 |
+
\newcommand{\mat}[1]{\boldsymbol{#1}}
|
72 |
+
\renewcommand{\vec}[1]{\boldsymbol{\mathrm{#1}}}
|
73 |
+
\newcommand{\vecalt}[1]{\boldsymbol{#1}}
|
74 |
+
|
75 |
+
\newcommand{\conj}[1]{\overline{#1}}
|
76 |
+
|
77 |
+
\newcommand{\normof}[1]{\|#1\|}
|
78 |
+
\newcommand{\onormof}[2]{\|#1\|_{#2}}
|
79 |
+
|
80 |
+
\newcommand{\MIN}[2]{\begin{array}{ll} \displaystyle \minimize_{#1} & {#2} \end{array}}
|
81 |
+
\newcommand{\MINone}[3]{\begin{array}{ll} \displaystyle \minimize_{#1} & {#2} \\ \subjectto & {#3} \end{array}}
|
82 |
+
\newcommand{\OPTone}{\MINone}
|
83 |
+
\newcommand{\MINthree}[5]{\begin{array}{ll} \displaystyle \minimize_{#1} & {#2} \\ \subjectto & {#3} \\ & {#4} \\ & {#5} \end{array}}
|
84 |
+
|
85 |
+
\newcommand{\MAX}[2]{\begin{array}{ll} \displaystyle \maximize_{#1} & {#2} \end{array}}
|
86 |
+
\newcommand{\MAXone}[3]{\begin{array}{ll} \displaystyle \maximize_{#1} & {#2} \\ \subjectto & {#3} \end{array}}
|
87 |
+
|
88 |
+
|
89 |
+
\newcommand{\itr}[2]{#1^{(#2)}}
|
90 |
+
\newcommand{\itn}[1]{^{(#1)}}
|
91 |
+
|
92 |
+
\newcommand{\prob}{\mathbb{P}}
|
93 |
+
\newcommand{\probof}[1]{\prob\left\{ #1 \right\}}
|
94 |
+
|
95 |
+
\newcommand{\pmat}[1]{\begin{pmatrix} #1 \end{pmatrix}}
|
96 |
+
\newcommand{\bmat}[1]{\begin{bmatrix} #1 \end{bmatrix}}
|
97 |
+
\newcommand{\spmat}[1]{\left(\begin{smallmatrix} #1 \end{smallmatrix}\right)}
|
98 |
+
\newcommand{\sbmat}[1]{\left[\begin{smallmatrix} #1 \end{smallmatrix}\right]}
|
99 |
+
|
100 |
+
\newcommand{\RR}{\mathbb{R}}
|
101 |
+
\newcommand{\CC}{\mathbb{C}}
|
102 |
+
|
103 |
+
\providecommand{\eye}{\mat{I}}
|
104 |
+
\providecommand{\mA}{\ensuremath{\mat{A}}}
|
105 |
+
\providecommand{\mB}{\ensuremath{\mat{B}}}
|
106 |
+
\providecommand{\mC}{\ensuremath{\mat{C}}}
|
107 |
+
\providecommand{\mD}{\ensuremath{\mat{D}}}
|
108 |
+
\providecommand{\mE}{\ensuremath{\mat{E}}}
|
109 |
+
\providecommand{\mF}{\ensuremath{\mat{F}}}
|
110 |
+
\providecommand{\mG}{\ensuremath{\mat{G}}}
|
111 |
+
\providecommand{\mH}{\ensuremath{\mat{H}}}
|
112 |
+
\providecommand{\mI}{\ensuremath{\mat{I}}}
|
113 |
+
\providecommand{\mJ}{\ensuremath{\mat{J}}}
|
114 |
+
\providecommand{\mK}{\ensuremath{\mat{K}}}
|
115 |
+
\providecommand{\mL}{\ensuremath{\mat{L}}}
|
116 |
+
\providecommand{\mM}{\ensuremath{\mat{M}}}
|
117 |
+
\providecommand{\mN}{\ensuremath{\mat{N}}}
|
118 |
+
\providecommand{\mO}{\ensuremath{\mat{O}}}
|
119 |
+
\providecommand{\mP}{\ensuremath{\mat{P}}}
|
120 |
+
\providecommand{\mQ}{\ensuremath{\mat{Q}}}
|
121 |
+
\providecommand{\mR}{\ensuremath{\mat{R}}}
|
122 |
+
\providecommand{\mS}{\ensuremath{\mat{S}}}
|
123 |
+
\providecommand{\mT}{\ensuremath{\mat{T}}}
|
124 |
+
\providecommand{\mU}{\ensuremath{\mat{U}}}
|
125 |
+
\providecommand{\mV}{\ensuremath{\mat{V}}}
|
126 |
+
\providecommand{\mW}{\ensuremath{\mat{W}}}
|
127 |
+
\providecommand{\mX}{\ensuremath{\mat{X}}}
|
128 |
+
\providecommand{\mY}{\ensuremath{\mat{Y}}}
|
129 |
+
\providecommand{\mZ}{\ensuremath{\mat{Z}}}
|
130 |
+
\providecommand{\mLambda}{\ensuremath{\mat{\Lambda}}}
|
131 |
+
\providecommand{\mPbar}{\bar{\mP}}
|
132 |
+
|
133 |
+
\providecommand{\ones}{\vec{e}}
|
134 |
+
\providecommand{\va}{\ensuremath{\vec{a}}}
|
135 |
+
\providecommand{\vb}{\ensuremath{\vec{b}}}
|
136 |
+
\providecommand{\vc}{\ensuremath{\vec{c}}}
|
137 |
+
\providecommand{\vd}{\ensuremath{\vec{d}}}
|
138 |
+
\providecommand{\ve}{\ensuremath{\vec{e}}}
|
139 |
+
\providecommand{\vf}{\ensuremath{\vec{f}}}
|
140 |
+
\providecommand{\vg}{\ensuremath{\vec{g}}}
|
141 |
+
\providecommand{\vh}{\ensuremath{\vec{h}}}
|
142 |
+
\providecommand{\vi}{\ensuremath{\vec{i}}}
|
143 |
+
\providecommand{\vj}{\ensuremath{\vec{j}}}
|
144 |
+
\providecommand{\vk}{\ensuremath{\vec{k}}}
|
145 |
+
\providecommand{\vl}{\ensuremath{\vec{l}}}
|
146 |
+
\providecommand{\vm}{\ensuremath{\vec{l}}}
|
147 |
+
\providecommand{\vn}{\ensuremath{\vec{n}}}
|
148 |
+
\providecommand{\vo}{\ensuremath{\vec{o}}}
|
149 |
+
\providecommand{\vp}{\ensuremath{\vec{p}}}
|
150 |
+
\providecommand{\vq}{\ensuremath{\vec{q}}}
|
151 |
+
\providecommand{\vr}{\ensuremath{\vec{r}}}
|
152 |
+
\providecommand{\vs}{\ensuremath{\vec{s}}}
|
153 |
+
\providecommand{\vt}{\ensuremath{\vec{t}}}
|
154 |
+
\providecommand{\vu}{\ensuremath{\vec{u}}}
|
155 |
+
\providecommand{\vv}{\ensuremath{\vec{v}}}
|
156 |
+
\providecommand{\vw}{\ensuremath{\vec{w}}}
|
157 |
+
\providecommand{\vx}{\ensuremath{\vec{x}}}
|
158 |
+
\providecommand{\vy}{\ensuremath{\vec{y}}}
|
159 |
+
\providecommand{\vz}{\ensuremath{\vec{z}}}
|
160 |
+
\providecommand{\vpi}{\ensuremath{\vecalt{\pi}}}
|
161 |
+
|
162 |
+
\providecommand{\vlambda}{\ensuremath{\vecalt{\lambda}}}
|
163 |
+
|
164 |
+
|
165 |
+
|
166 |
+
|
167 |
+
\makeatother \usepackage[ruled]{algorithm2e}
|
168 |
+
\usepackage{ulem}
|
169 |
+
\usepackage{pstricks}
|
170 |
+
\usepackage{url}
|
171 |
+
\usepackage{bm}
|
172 |
+
\newcommand{\cf}[1]{{\textcolor{blue}{(Chen: #1)}}}
|
173 |
+
|
174 |
+
|
175 |
+
\usepackage{titlesec}
|
176 |
+
|
177 |
+
\titleformat*{\section}{\large\bf}
|
178 |
+
\titleformat*{\subsection}{\normalsize\bf}
|
179 |
+
\titleformat*{\subsubsection}{\normalsize}
|
180 |
+
|
181 |
+
\titlespacing*{\section}{0pt}{1ex}{1ex}
|
182 |
+
\titlespacing*{\subsection}{0pt}{1ex}{0ex}
|
183 |
+
\titlespacing*{\subsubsection}{0pt}{1ex}{0ex}
|
184 |
+
\titlespacing*{\paragraph}{0pt}{1ex}{1em}
|
185 |
+
|
186 |
+
\DeclareRobustCommand{\hlcyan}[1]{{\sethlcolor{cyan}\hl{#1}}}
|
187 |
+
|
188 |
+
\begin{document}
|
189 |
+
\title{Networked Multiagent Reinforcement Learning for Peer-to-Peer Energy Trading }
|
190 |
+
|
191 |
+
|
192 |
+
\author{Chen~Feng,
|
193 |
+
and~Andrew~L.~Liu\thanks{Chen Feng is with School of Industrial Engineering, Purdue University, West Lafayette, IN, USA, email: feng219@purdue.edu.}\thanks{Andrew L. Lu is with School of Industrial Engineering, Purdue University, West Lafayette, IN, USA, email: andrewliu@purdue.edu.}}
|
194 |
+
|
195 |
+
|
196 |
+
|
197 |
+
|
198 |
+
|
199 |
+
|
200 |
+
|
201 |
+
|
202 |
+
|
203 |
+
|
204 |
+
|
205 |
+
|
206 |
+
|
207 |
+
|
208 |
+
|
209 |
+
|
210 |
+
|
211 |
+
|
212 |
+
|
213 |
+
\maketitle
|
214 |
+
|
215 |
+
\begin{abstract}
|
216 |
+
Utilizing distributed renewable and energy storage
|
217 |
+
resources in local distribution networks via peer-to-peer (P2P) energy trading has long been touted as a solution to improve energy systems' resilience and
|
218 |
+
sustainability. Consumers and prosumers (those who have energy
|
219 |
+
generation resources), however, do not have the expertise to engage in
|
220 |
+
repeated P2P trading, and the zero-marginal costs of renewables
|
221 |
+
present challenges in determining fair market prices. To address
|
222 |
+
these issues, we propose multi-agent reinforcement learning
|
223 |
+
(MARL) frameworks to help automate consumers’
|
224 |
+
bidding and management of their solar PV and energy storage
|
225 |
+
resources, under a specific P2P clearing mechanism that utilizes
|
226 |
+
the so-called supply-demand ratio. In addition, we show how
|
227 |
+
the MARL frameworks can integrate physical network
|
228 |
+
constraints to realize voltage control, hence ensuring physical
|
229 |
+
feasibility of the P2P energy trading and paving way for real-world implementations.
|
230 |
+
\end{abstract}
|
231 |
+
|
232 |
+
|
233 |
+
|
234 |
+
|
235 |
+
|
236 |
+
|
237 |
+
\IEEEpeerreviewmaketitle
|
238 |
+
|
239 |
+
|
240 |
+
\section{Introduction}\label{sec:Intro}
|
241 |
+
As our society strives to transition towards sustainable energy sources, distributed renewable resources and energy storage are increasingly seen as key components in creating resilient and sustainable energy systems. Peer-to-peer (P2P) energy trading in local energy markets offers a promising approach to decentralize and optimize the allocation of distributed energy resources (DERs). This model not only empowers consumers and those who can store or generate energy, known as prosumers, to trade energy directly but also promotes renewable energy usage and reduces energy losses.
|
242 |
+
|
243 |
+
However, implementing P2P trading presents significant challenges. First, consumers and prosumers lack the technical expertise required to engage in repeated P2P trading and manage their energy resources efficiently. Second, renewable energy, such as solar, often has zero marginal costs, which introduces difficulties in determining fair market prices for energy trades: a uniform-pricing auction resembling a wholesale energy market would not work since the market clearing prices would be zero most of the time. Third, while P2P trading only entails financial transactions, the delivery of energy occurs over the physical distribution networks. How to maintain network feasibility in a local P2P trading market is an important yet open question.
|
244 |
+
|
245 |
+
In response to these challenges, this paper introduces a decentralized framework utilizing multiagent reinforcement learning (MARL), which is designed to automate the bidding processes of agents, enhancing the management efficiency of their solar PV and energy storage resources. Additionally, the framework ensures the feasibility of a distribution network. Specifically, inspired by the algorithm in \cite{zhang2018networked}, we propose a consensus-based actor-critic algorithm for a decentralized MARL, in which each agent in a repeated P2P auction is modeled as a Markov decision problem (MDP). By allowing agents to exchange information and reach agreements, the proposed framework facilitates efficient decision-making and resource allocation, while mitigating the computational and privacy challenges associated with certain centralized learning approaches.
|
246 |
+
In addition, the shared network constraints, such as voltage regulation, can be learned through the decentralized approach with a constraint violation incurring a (fictitious) penalty to each agent's reward function. We consider this development a significant step towards practical real-world implementations of P2P energy trading.
|
247 |
+
|
248 |
+
In addition to the modeling and algorithm development, we establish the theoretical conditions for the consensus-MARL algorithm to converge to an asymptotically stable equilibrium. We consider this a significant advantage over a purely decentralized approach without agents' communication, such as the one in our previous work \cite{feng2022decentralized}, in which each agent uses the proximal policy optimization (PPO) approach \cite{PPO} to solve their own MDP and ignores multiagent interaction.
|
249 |
+
|
250 |
+
|
251 |
+
Through numerical simulations, we compare three different frameworks to implement P2P trading while considering network constraints: purely decentralized trading using PPO,
|
252 |
+
consensus-MARL, and a centralized learning and decentralized execution approach using the multiagent deep deterministic policy gradient (MADDPG) algorithm \cite{MADDPG}. Our findings indicate that consensus-MARL achieves higher average rewards compared to both PPO and MADDPG.
|
253 |
+
|
254 |
+
|
255 |
+
The rest of the paper is organized as follows. Section \ref{sec:LitReview} reviews the literature on P2P energy trading and emphasizes the contribution of our work. Section \ref{sec:market} introduces the market clearing mechanism based on the supply-demand ratio, first proposed in \cite{liu2017energy}, which can avoid zero-clearing prices when the supply resources have zero marginal costs. Section \ref{sec:Consensus} provides the detailed consensus-MARL formulation and algorithm, as well as establishing convergence results. The specific setups and data of our simulation are described in Section \ref{sec:Sim}, along with numerical results. Finally, Section \ref{sec:Conclusion} summarizes our work and points out several future research directions.
|
256 |
+
|
257 |
+
\section{Literature Review}
|
258 |
+
\label{sec:LitReview}
|
259 |
+
|
260 |
+
The literature on decentralized control of energy transactions within distribution networks is extensive and diverse. Broadly speaking, there are two predominant approaches: the distributed optimization approach and P2P bilateral trading. One of the most prominent frameworks in the former approach is through the alternating direction of multiplier method (ADMM). The ADMM solves centralized economic dispatch/optimal power flow problems in a distributed manner by sending price signals (the dual variables of supply-demand constraints) for consumers/prosumers to independently optimize their solutions, which are then sent back to the central optimizer to update the price signals (for example, \cite{Multiclass,FADMM}). While this iterative approach enjoys rigorous convergence guarantees under convexity, its practical application encounters notable challenges:
|
261 |
+
(i) As an algorithmic approach rather than a market design, ADMM is subject to the same difficulties faced by a uniform-pricing-based market with all zero-marginal-cost resources.
|
262 |
+
Specifically, it is prone to `bang-bang' pricing issues -- market clearing prices plummet to zero in oversupply or spike to a price ceiling in undersupply. Addressing this with additional market mechanisms, such as adding auxiliary and operating reserve markets, akin to wholesale markets, could overly complicate distribution-level markets for energy trading.
|
263 |
+
(ii) ADMM and most of the distributed optimization algorithms are designed for static optimization, lacking the dynamism required for real-time adaptability, hence necessitating online versions of such algorithms. Online ADMM algorithms have been proposed in \cite{OnlineADMM}. Even so, network constraints are only guaranteed at the algorithm convergence. How to guarantee the physical constraints in a distributed fashion within each iteration is an open question.
|
264 |
+
(iii) The expectation that consumers or prosumers can solve optimization problems might be unrealistic due to a lack of expertise or resources.
|
265 |
+
|
266 |
+
|
267 |
+
These limitations form the basis of our interest in exploring P2P bilateral trading. Consequently, our literature review will be primarily focused on this alternative approach. There have been multiple review papers on the large and ever-growing literature on P2P energy trading \cite{PoorP2PReview,PoorP2PReview2,PoorGameTheory,P2PReview1,P2PReview2}. Within the literature, we focus on the works that explicitly consider distribution network constraints. \cite{ZIP} proposes a continuous double auction framework within which physical network constraints are maintained through a sensitivity-based approach. The agents in the auction are the so-called zero-intelligence-plus (ZIP) traders, who employ simple adaptive mechanisms without any learning or memory of the repeated multi-agent interactions.
|
268 |
+
in which they propose sensitivity-based
|
269 |
+
More sophisticated than ZIP, a fully decentralized MARL is adopted in \cite{biagioni2021powergridworld} to realize decentralized voltage control, and it is extended in \cite{feng2022decentralized} to form a P2P energy trading market with physical constraints. These two papers implement a completely decentralized approach in the sense that each agent ignores the multi-agent interaction as well and just learns their single-agent policy through the Proximal Policy Optimization (PPO) algorithm \cite{PPO} to maximize their own payoff.
|
270 |
+
|
271 |
+
Another MARL approach that has been explored is the centralized training and decentralized execution (CTDE) framework. The multi-agent deep deterministic
|
272 |
+
policy gradient (MADDPG) method from \cite{MADDPG} was applied in \cite{wang2020data} to solve the autonomous voltage control problem, and a multi-agent
|
273 |
+
constrained soft actor-critic (MACSAC) algorithm inspired MADDPG is developed to perform Volt-VAR control (VVC).
|
274 |
+
In \cite{qiu2021multi}, a MADDPG algorithm tailored for a double-sided auction market to realize P2P energy trading is proposed, though they do not consider physical constraints. While the CTDE framework potentially enables more efficient learning and robust outcomes, its reliance on a centralized entity for aggregating all agents' information and coordinating their training introduces vulnerabilities. Specifically, it increases the risk of single-point failures and susceptibility to cyber-attacks.
|
275 |
+
|
276 |
+
To overcome the drawbacks of fully decentralized and CTDE-based MARL, algorithms with limited communication within agents have also been developed. A notable contribution in this area is the consensus-based MARL algorithm designed for discrete-space problems, as presented in \cite{MARLBasar}, and its extension to continuous-space problems in \cite{zhang2018networked}. This model allows agents to learn and implement policies in a decentralized manner with limited communication, underpinned by a theoretical guarantee of convergence under certain conditions. Separately, \cite{qu2022scalable} introduced a Scalable Actor Critic (SAC) framework, particularly relevant for large-scale networks where an individual agent's state transition is influenced only by its neighbors. SAC is distinguished by its reduced communication needs compared to the CTDE framework and offers a theoretical guarantee for its policy convergence as well. However, the SAC framework is only established for discrete state space problems, and its extension and effectiveness in continuous-state space applications remain unexplored.
|
277 |
+
|
278 |
+
In this work, we present several key contributions: First, we develop a comprehensive framework for a P2P energy trading market while integrating a consensus-based MARL algorithm. This framework facilitates automated control and bidding of DERs while allowing for decentralized learning of voltage constraints. Second, we present sufficient conditions required for the convergence of the MARL algorithm and show the details of implementing this algorithm using deep neural networks. Third, we conduct a comparative analysis of market outcomes derived from three distinct MARL strategies: a naive decentralization approach, the MADDPG approach, and the proposed consensus-based MARL. Our findings reveal that the consensus-based approach notably outperforms the others in achieving better market outcomes.
|
279 |
+
|
280 |
+
\section{Market Clearing Mechanism}
|
281 |
+
\label{sec:market}
|
282 |
+
In this section, we first describe a clearing mechanism, referred to as the supply-demand-ratio (SDR) mechanism \cite{liu2017energy}, that can address the two potential issues faced by a renewable-dominated P2P market: (i) marginal-cost-based pricing yielding zero market prices most of the time, and (ii) the potential bang-bang outcomes in a double-auction-based mechanism as the reserve prices for buyers (the utility rates) and the sellers (usually the feed-in tariffs) are publicly known. The second issue is unique to the P2P energy market, in our opinion, and the bang-bang phenomenon is well-documented in \cite{zhao2021auction} and will be explained further later in this section.
|
283 |
+
|
284 |
+
The SDR-clearing mechanism in P2P markets has been detailed in our earlier work \cite{feng2022decentralized}. For completeness, we briefly revisit it here. Let $\mathcal{I} = {1,2,...,I}$ represent all consumers and prosumers, collectively termed as agents. We assume that trading among the agents takes place in fixed rounds (such as hourly or every 15 minutes) and is cleared ahead of time (such as day-ahead or hour-ahead). In each trading round $t$, each agent $i \in \mathcal{I}$ submits bids ($b_{i,t}$) to buy (if $b_{i,t} < 0$) or sell (if $b_{i,t} > 0$) energy. Agents can switch roles between buyers and sellers across different rounds, but not within the same round. For a round $t$, the sets of buyers and sellers are denoted as $\mathcal{B}_t = \{i: b_{i,t} < 0\}$ and $\mathcal{S}_t = \{i: b_{i,t} \geq 0\}$, respectively.
|
285 |
+
|
286 |
+
The SDR mechanism, originally proposed in \cite{liu2017energy}, is an approach to clear a market with all zero-marginal-cost supply resources, which is straightforward to implement. An SDR is defined as the ratio between total supply and demand bids at $t$; that is,
|
287 |
+
\vspace*{-8pt}
|
288 |
+
\begin{align}
|
289 |
+
SDR_t := \displaystyle \sum_{i \in \mathcal{S}_t} b_{i,t}\bigg/\displaystyle -\sum_{i \in \mathcal{B}_t} b_{i,t}.
|
290 |
+
\label{eq:SDR}
|
291 |
+
\end{align}\\[-10pt]
|
292 |
+
To ensure that $SDR_t$ is well-defined, we assume $\mathcal{B}_t \neq \emptyset$ for any $t$, a reasonable assumption given agents' continuous need for energy. In rare cases where every agent is a prosumer with excess energy at time $t$, the P2P market can be temporarily suspended, and surplus energy sold to the grid at a predefined price, which does not impact our model or algorithm framework. Conversely, if $\mathcal{S}_t$ is $\emptyset$, indicating no excess energy for sale at $t$, then $SDR_t = 0$.
|
293 |
+
|
294 |
+
Based on its definition, when $SDR_t > 1$, it means that there is an oversupply. In this case, we assume that the excess energy in the P2P market is sold to a utility company, a distribution system operator (DSO), or an aggregator at a pre-defined rate, generally denoted as $FIT$ (feed-in tariff).\footnote{The $FIT$ rate is a rate set by utilities/policymakers. It can be set as zero and will not affect the pricing mechanism in any way.} If $0\leq SDR_t<1$, indicating overdemand in $t$, then the unmet demand bids will purchase energy at a pre-defined utility rate, denoted as $UR$. Without loss of generality, we assume that $FIT < UR$.\footnote{If $FIT > UR$, it means that energy consumers pay more than the utility rate to purchase energy from the prosumers, which is equivalent to a direct subsidy from energy consumers (including low-income consumers who do not have the means to invest in DERs) to prosumers. This cannot be justified from an equity perspective.}
|
295 |
+
|
296 |
+
The SDR mechanism determines the market clearing price as follows:
|
297 |
+
\begin{align}
|
298 |
+
\label{eq:Price_SDR}
|
299 |
+
& P_t:=P(SDR_t) \nonumber \\
|
300 |
+
&:=
|
301 |
+
\begin{cases}
|
302 |
+
(FIT-UR)\cdot SDR_t + UR, & 0 \leq SDR_t \leq 1;\\
|
303 |
+
FIT, & SDR_t > 1.
|
304 |
+
\end{cases}
|
305 |
+
\end{align}
|
306 |
+
In \eqref{eq:Price_SDR}, $P_t$ denote the market clearing price in round $t$, which is a piece-wise linear function with respect to the $SDR_t$, as illustrated in Figure \ref{fig:SDR_Price}.
|
307 |
+
\begin{figure}[!htb]
|
308 |
+
\centering
|
309 |
+
\includegraphics[scale = 0.35]{SDR.png}
|
310 |
+
\caption{Market clearing using SDR}
|
311 |
+
\label{fig:SDR_Price}\vspace*{-5pt}
|
312 |
+
\end{figure}
|
313 |
+
|
314 |
+
While the SDR approach may be criticized for not being based on economic theories or for being somewhat arbitrary (for example, instead of having a linear function, the pricing function could be replaced by any downward-shaped nonlinear functions, which would lead to different market prices with the same $SDR$), we want to point out that from our perspectives, the simplicity and transparency of the SDR mechanism outweigh the criticisms, and it is well-suited for a P2P energy market with all-zero marginal cost resources. In our previous work \cite{zhao2021auction}, it is identified that a P2P energy market using traditional double-sided auction may not work well due to the special characteristics of such a market. Since uncleared electricity demand bids have to be bought at the rate of $UR$, and uncleared energy supply bids have to be sold at $FIT$, with the $UR$ and $FIT$ known to all market participants.
|
315 |
+
With all zero-marginal cost resources, agents would not know how to price their bids in a marginal-cost-based clearing mechanism, and the public information of the $UR$ and the $FIT$ naturally provides the focal points for sellers to bid at $UR$ and buyers to bid at $FIT$, aiming to maximize their own payoffs. This would lead to a similar bang-bang type market outcome: when there is overdemand, the market price would be UR; reversely, when there is oversupply, the price would be FIT. We view the SDR approach as a fair and reasonable alternative to the marginal-cost-based approach to avoid such bang-bang outcomes, especially for a new market in which the supply resources are likely much less than demand.
|
316 |
+
|
317 |
+
The SDR method offers two key benefits: First, it maintains the market clearing price between $UR$ and $FIT$, ensuring that transactions for buyers and sellers are at least as favorable as dealing directly with the utility. Second, it simplifies the bidding process by requiring only quantity bids. This allows consumers with flexible loads to concentrate on adjusting their demand timings and prosumers to better manage their energy storage and sales strategies, avoiding the complexity of submitting price-quantity pair bids.
|
318 |
+
|
319 |
+
|
320 |
+
|
321 |
+
|
322 |
+
|
323 |
+
|
324 |
+
|
325 |
+
|
326 |
+
|
327 |
+
|
328 |
+
|
329 |
+
|
330 |
+
|
331 |
+
\section{Decentralized Learning in P2P Trading with Consensus Updates}
|
332 |
+
\label{sec:Consensus}
|
333 |
+
In this section, we first formulate the decision-making problem faced by each agent in a repeated P2P trading market as a Markov decision process. (For the sake of brevity, we only describe a prosumer's problem. A pure consumer's problem is the same without the supply bids.)
|
334 |
+
We then introduce the details of the consensus-MARL algorithm.
|
335 |
+
|
336 |
+
|
337 |
+
\subsection{Markov Decision Process for a Prosumer}
|
338 |
+
\label{subsec:CMDP}
|
339 |
+
In our setup, we assume that each prosumer
|
340 |
+
locates at one bus of the distribution
|
341 |
+
network and has two resources under their control: a solar photovoltaic (PV) system with a smart inverter, and a rechargeable battery as energy storage. Each prosumer also has a fixed baseload of energy consumption. At a particular time
|
342 |
+
period t, whether a prosumer is a net energy seller or a buyer depends on their baseload versus the level of PV generation and energy storage.
|
343 |
+
|
344 |
+
Due to the presence of energy storage, each agent’s bidding and charging decisions are linked over time, which are naturally modeled in a dynamic programming framework. Since an agent's decisions at $t$ only depend on its energy storage level at $t-1$ and the PV generation in $t$, a discrete-time Markov decision process (MDP) model is suitable here. In the following, we describe the key building blocks of each agent's MDP model.
|
345 |
+
|
346 |
+
\textit{Observation and state variables:} Agent $i \in \mathcal{I}$ at time step $t$ is assumed to receive the system states $s_t = (s_{1,t},\dots,s_{I,t}) \in\mathcal{S}:= \Pi_{i=1}^I \mathcal{S}_i$, which is the concatenation of the personal states of each agent, where $I$ is the total number of agents in the system. Specifically, the personal states of agent $i$ are defined as $s_{i,t} := (d^p_{i,t}, d^q_{i,t}, e_{i,t},PV_{i,t}) \in \mathcal{S}_i$, where $d^p_{i,t}$ and $d^q_{i,t}$ are the inflexible demand of active and reactive power of agent $i$, $e_{i,t}$ is the state of charge of agent $i$'s energy storage system, and $PV_{i,t}$ is the PV (real) power generation in $t$.
|
347 |
+
|
348 |
+
|
349 |
+
Among the state variables, $(d^p_{i,t}, d^q_{i,t}, PV_{i,t})$ are only observations because their transitions adhere to underlying distributions that are independent of agents' actions. In contrast, the transitions of $e_{i,t}$ are influenced by agents' actions, with details elaborated further below after agents' actions are defined.
|
350 |
+
|
351 |
+
\begin{comment}
|
352 |
+
The transition of the state variable $v_{i,t}$ depends on all agents' collective states and actions. Specifically, let the distribution system where the P2P market operates be represented by an undirected graph $(\mathcal{N}, \mathcal{L})$, with $\mathcal{N}$ being the set of all buses and $\mathcal{L}$ the set of all links in the distribution network. For each $k \in \mathcal{N}$, let $p'_{k,t}$ denote net real power injection at bus $k$ in $t$, but before agents' bidding; that is, $p'_{k,t} = \sum_{i\in \mathcal{Z}_k} d^p_{i,t}$, where $\mathcal{Z}_k$ is the set of all agents at bus $k$). Similarly, we use $q'_{k,t}$ to denote the net reactive demand at bus $k$, and $q'_{k,t} = \sum_{i\in \mathcal{Z}_k} d^q_{i,t}$. For each $(k, j)\in \mathcal{L}$, let $G_{k j}$ and $B_{k j}$ represent the real and imaginary parts of the admittance of the branch $(k, j)$, respectively. Then the voltage magnitude $|V_{k,t}|$ at $k\in \mathcal{N}$, \hl{(Please explain what are small $v$, big $V$, $|V|$ and $V'$)} and the phase angle $\alpha_{k,t}$ at $k\in \mathcal{N}$ at time step $t$ can then be obtained through solving the standard bus-injection model as follows (the $t$ index is omitted for simplicity):
|
353 |
+
\vspace*{-6pt}
|
354 |
+
\begin{align}
|
355 |
+
\begin{split}
|
356 |
+
p'_{k,t}=&\sum_{j\in \mathcal{N}:(j, k) \in \mathcal{L}}|V_{k,t}||V_{j,t}|(G_{k j} \cos (\alpha_{k,t}-\alpha_{j,t}) +B_{k j} \sin (\alpha_{k,t}-\alpha_{j,t})), \\
|
357 |
+
q'_{k,t}=&\sum_{j\in \mathcal{N}:(j, k) \in \mathcal{L}}|V_{k,t}||V_{j,t}|(G_{k j} \sin (\alpha_{k,t}-\alpha_{j,t}) -B_{k j} \cos (\alpha_{k,t}-\alpha_{j,t})),
|
358 |
+
\end{split}
|
359 |
+
\label{eq:BusInjection1}
|
360 |
+
\end{align}
|
361 |
+
\textcolor{magenta}{Let $bus(i):\mathcal{I} \to \mathcal{N}$ be the mapping from the agent index to the index of its bus. Then $v_{i,t}= |V_{{bus(i)},t}|$.}
|
362 |
+
\end{comment}
|
363 |
+
|
364 |
+
|
365 |
+
The assumption that each agent is required to observe the whole system's state may appear to be strong in a decentralized setting and is prone to criticisms and privacy concerns. Our responses are threefold: first, the identity of the agents is irrelevant, and hence, from an agent's perspective, which state variable corresponds to which specific agent is not known. The
|
366 |
+
state information can simply be labeled as Agent 1, 2, ..., I, without revealing the agent's identity.
|
367 |
+
Second,
|
368 |
+
the proposed algorithms are intended to be implemented on devices that can receive information from the grid (generally referring to a utility or a distributed system operator) and automatically participate in the repeated P2P market without human intervention (aka control automation). The information received by the devices should be encrypted and can only be decrypted by the control-automation devices. Granted that the devices could be compromised, there have been works on how to address adversarial attacks in a multi-agent reinforcement learning framework \cite{Gupta}, which leads to interesting future research directions.
|
369 |
+
Second, one of our main goals in this work is to establish rigorous convergence results for the proposed MARL algorithm. In \cite{feng2022decentralized}, we proposed a completely decentralized framework in which each agent just uses their local state information to update the policies and solve their own MDP problem as if in a single-agent environment. The numerical results are surprisingly good in the sense that they all indicate convergence towards some steady states as the learning continues. However, no theoretical results could be established when each agent only uses local information, without communication or shared reward functions. Our view is that to be able to rigorously show a MARL game's convergence to a steady state, there has to be some global information shared among the agents.
|
370 |
+
|
371 |
+
|
372 |
+
|
373 |
+
\textit{Action or control variables:} at time $t$, the actions that agent $i$ can take are represented by a vector $a_{i,t} := (a^{q}_{i,t}, a^e_{i,t}) \in \mathcal{A}_i = \mathcal{A}^q_i \times \mathcal{A}^e_i$, where $a^{q}_{i,t}$ is the reactive power injected or absorbed by agent $i$'s smart inverter, and $a^{e}_{i,t}$ is energy charged (if $a^{e}_{i,t} > 0$) to or discharged (if $a^{e}_{i,t} < 0$) from the battery.
|
374 |
+
The feasible action spaces of $a^{q}_{i,t}$ and $a^e_{i,t}$ are generically denoted by
|
375 |
+
$\mathcal{A}^q_i$ and $\mathcal{A}^b_i$, respectively.
|
376 |
+
In the specific context of our model, it is safe to assume that the action space $\mathcal{A}_i = \mathcal{A}^q_i \times \mathcal{A}^e_i $ is bounded. In addition,
|
377 |
+
agent $i$'s actual bid (either buy or sell) to a P2P market at time $t$ can be written as
|
378 |
+
\begin{equation}
|
379 |
+
\label{eq:action2bid}
|
380 |
+
b_{i,t} =
|
381 |
+
\begin{cases}
|
382 |
+
PV_{i,t} - d^p_{i,t} - \min ( a^{e}_{i,t},\frac{\overline{e}_i - e_{i,t}}{\eta^c_{i}}), \ \ &\text{if } a^{e}_{i,t} \geq 0,\\
|
383 |
+
PV_{i,t} - d^p_{i,t} - \max ( a^{e}_{i,t}, -e_{i,t}\cdot \eta^d_{i}), \ \ &\text{otherwise},
|
384 |
+
\end{cases}
|
385 |
+
\end{equation}
|
386 |
+
which is simply the net energy of PV generation minus baseload demand (of real power) and charge to the battery.
|
387 |
+
|
388 |
+
\textit{State transition:}
|
389 |
+
Out of the state variables defined earlier, the energy storage level $e_{i,t}$ is the only one that is directly affected by an agent's own action. The state transition for $e_{i,t}$ can be written as follows:
|
390 |
+
\begin{align}
|
391 |
+
\label{eq:trans}
|
392 |
+
& e_{i,t+1} = \nonumber\\
|
393 |
+
& \max \Big\{ \min \Big[e_{i,t} + \eta^c_{i} \max(a^e_{i,t},0) + \frac{1}{\eta^d_{i}} \min(a^e_{i,t},0), \overline{e}_i\Big], 0\Big\},
|
394 |
+
\end{align}
|
395 |
+
where $\eta^c_{i}$ and $\eta^d_{i}$ are the charging and discharging efficiency of agent $i$'s battery, respectively, and $\overline{e}_i$ is the battery capacity. The charging efficiency represents the ratio of the amount of energy effectively stored in the system to the energy input during the charging process; while the discharging efficiency is the ratio of the energy delivered by the system during discharge to the energy that was initially stored in it. Their product, $\eta^c_{i} \times \eta^d_{i}$, yields the so-called round-trip efficiency.
|
396 |
+
The outside ``max" operation in \eqref{eq:trans} is to ensure that the energy level in the battery will not be negative; while the first ``min" operation in the bracket is to ensure that the battery storage capacity is not exceeded.
|
397 |
+
|
398 |
+
\begin{comment}
|
399 |
+
For the other state variables, the next state $s^{-e}_{t+1}$ is supposed to be sampled from the Markov kernel $\kappa(s^{-e}_{t+1}|s_t^{-e}): \mathcal{S}^{-e}\times\mathcal{S}^{-e}\to[0,\infty]$ which is Lebesgue integrable, where the notation $s_t^{-e}$ refers to the collection of all system state variables excluding each agent's battery storage level at time step $t$, and $\mathcal{S}^{-e}$ is the corresponding state space. Then
|
400 |
+
\begin{align}
|
401 |
+
\label{eq:kernel}
|
402 |
+
\begin{split}
|
403 |
+
&p(ds_{t+1}|s_t, a_t) \\
|
404 |
+
&= \prod_{i=1}^I\mathbb{I}_{\{e_{i,t+1} = E_i(e_{i,t},a^e_{i,t})\}} \nu(de_{i,t+1}) \cdot \kappa(s^{-e}_{t+1}|s_t^{-e}) ds^{-e}_{t+1}
|
405 |
+
\end{split}
|
406 |
+
\end{align}
|
407 |
+
is the system state transition kernel of the MDP given action $a_t$ and system state $s_t$, where $\mathbb{I}_{\{e_{i,t+1} = E_i(e_{i,t},a^e_{i,t})\}}$ is an indicator function such that $\mathbb{I}_{\{e_{i,t+1} = E_i(e_{i,t},a^e_{i,t})\}} = 1$ if $e_{i,t+1} = E_i(e_{i,t},a^e_{i,t})$ and 0 otherwise, $ds^{-e}_{t+1}$ is the Lebesgue measure, and $\nu(de_{i,t+1})$ is the counting measure.
|
408 |
+
\end{comment}
|
409 |
+
|
410 |
+
\textit{Reward:}
|
411 |
+
Each agent $i$'s reward function is affected by both the collective actions of the agents and the system states. Specifically, agent $i$'s reward in time step $t$, denoted by $r_{i,t}$, is a random variable whose conditional expectation has two components as follows:
|
412 |
+
\begin{align}
|
413 |
+
\begin{split}
|
414 |
+
\mathbb{E}[r_{i,t}|s_t, a_t] &:= R_{i}(a_{i,t}; a_{-i,t}, s_t) \\
|
415 |
+
&\ = R^{m}_{i}(a^e_{i,t}; a^e_{-i,t}, s_t) + R^{v}(a_{i,t}; a_{-i,t}, s_t)/I.
|
416 |
+
\end{split}
|
417 |
+
\label{eq:reward}
|
418 |
+
\end{align}
|
419 |
+
In \eqref{eq:reward}, the notation $a_{-i,t}$ refers to the collection of all other agents' actions, excluding agent $i$'s; while $a^e_{-i,t}$ refers to only the energy charge/discharge decisions of the other agents.
|
420 |
+
The first term $R^{m}_{i}$ in \eqref{eq:reward} is the energy purchase cost or sales profit of agent $i$ from the P2P energy market (with the superscript `m' standing for `market'), whose formulation is as follows:
|
421 |
+
\begin{align}
|
422 |
+
\label{eq:P2PReward}
|
423 |
+
& R^{m}_{i} :=
|
424 |
+
\begin{cases}
|
425 |
+
\mathbb{I}_{i\in\mathcal{B}_t} \times \Big[ SDR_t \cdot P_t \cdot b_{i,t}
|
426 |
+
+\ (1-SDR_t) \cdot UR \cdot b_{i,t} \Big]\\
|
427 |
+
+\ \mathbb{I}_{i\in\mathcal{S}_t} \times \Big(P_t \cdot b_{i,t}\Big) ,\quad \mathrm{if}\ 0 \leq SDR_t \leq 1,\\[10pt]
|
428 |
+
FIT\cdot b_{i,t}, \quad \mathrm{if}\ SDR_t > 1,
|
429 |
+
\end{cases}
|
430 |
+
\end{align}
|
431 |
+
where $\mathbb{I}_{i\in\mathcal{B}_t}$ is an indicator function such that $\mathbb{I}_{i\in\mathcal{B}_t} = 1$ if $i\in \mathcal{B}_t$, and 0 otherwise. Similarly, $\mathbb{I}_{i\in\mathcal{S}_t} = 1$ if $\in \mathcal{S}_t$ and 0 otherwise. $SDR_t$ is the supply-demand-ratio as defined in \eqref{eq:SDR}, which depends on all agents' bids, and the bids further depend on each agent's charging/discharging decisions, as well as the PV generation and the baseload, as specified in \eqref{eq:action2bid}. $P_t$ in \eqref{eq:P2PReward} is the market clearing price as defined in \eqref{eq:Price_SDR}. Note that when $SDR_t < 1$, not all demand bids will be cleared in the P2P market at time $t$. In such instances, they will need to purchase the needed energy at the utility rate, leading to the $UR \cdot b_{i,t}$ term when defining the reward $R_i^m$ in equation \eqref{eq:P2PReward}. To ensure fairness and prevent any inadvertent favoritism among the demand bids, we employ a mechanism where each demand bid is partially cleared. Specifically, the clearance proportion is exactly $SDR_t$ (which represents the percentage of total demand bids cleared), and the uncleared bid for every agent is then $(1 - SDR_t) \cdot b_{i,t}$. This approach of proportional scaling echoes the rules used in multi-unit double auctions in \cite{huang2002design}.
|
432 |
+
|
433 |
+
The second term in the reward function, $R^{v}$, is the system voltage violation penalty at a given time $t$. To provide an explicit form of $R^{v}$, we first write out the standard bus-injection model for all $k= 1,2,\dots, N$ (the time index $t$ is omitted for simplicity):
|
434 |
+
\begin{align}
|
435 |
+
\begin{split}
|
436 |
+
p_{k}=&\sum_{j=1}^{N}|V_{k}||V_{j}|(G_{k j} \cos (\alpha_{k}-\alpha_{j}) +B_{k j} \sin (\alpha_{k}-\alpha_{j})), \\
|
437 |
+
q_{k}=&\sum_{j=1}^{N}|V_{k}||V_{j}|(G_{k j} \sin (\alpha_{k}-\alpha_{j}) -B_{k j} \cos (\alpha_{k}-\alpha_{j})).
|
438 |
+
\end{split}
|
439 |
+
\label{eq:BusInjection2}
|
440 |
+
\end{align}
|
441 |
+
In the above system of nonlinear equations, $p_k$ and $q_k$, $k = 1, \ldots, N$ are input data,
|
442 |
+
where $p_k$ is the net real power injection (or withdrawal) at bus $k$ (summation of the energy amount bid from all agents at bus $k$ per unit time, i.e. $p_k = \sum_{i\in \mathcal{N}_k} b_{i}$, with $b_i$ given in \eqref{eq:action2bid} and $\mathcal{N}_k$ denoting the set of agents at bus $k$), and $q_k$ is the net reactive power flow at bus $k$ (i.e. $q_k = - \sum_{i\in \mathcal{N}_k} d^q_i + a^q_i$). The variables are $|V_k|$ and $\alpha_k$ for $k = 1, \ldots, N$, with $|V_k|$ being the voltage magnitude and $\alpha_k$ being the phase angle at bus $k$.
|
443 |
+
The terms $G_{k j}$ and $B_{k j}$ are parameters that represent the real and imaginary parts of the admittance of branch $k{\text -}j$ respectively.
|
444 |
+
|
445 |
+
At a given round $t$ of the P2P trading, for a given set of the real and reactive power injection/withdrawal as the result of agents' bidding $(p_k, q_k)_{k = 1}^N$, if the system of equations \eqref{eq:BusInjection2} has a solution,\footnote{Sufficient conditions under which \eqref{eq:BusInjection2} has a solution are provided in \cite{Sun}.} then we can define the voltage violation penalty as follows:
|
446 |
+
\vspace*{-5pt}
|
447 |
+
\begin{align}
|
448 |
+
R^{v}:= - \lambda \sum_{k=1}^N \text{clip} \Big[\max (|V_{k,t}|-\overline{V}_{k}, \underline{V}_k-|V_{k,t}|), \{0, M\}\Big].
|
449 |
+
\label{eq:penalty} \vspace*{-5pt}
|
450 |
+
\end{align}
|
451 |
+
In \eqref{eq:penalty}, $\overline{V}_k,\underline{V}_k$ represent the upper and lower limit of bus $k$'s voltage magnitude, respectively.
|
452 |
+
Let $f_{clip}$ denote a generic `clip' function in the form of $f_{clip} = \text{clip}[x, \{v_{min}, v_{max}\}]$, which is defined as $f_{clip} = x$ when $v_{min} \leq x \leq v_{max}$, $f_{clip} = v_{min}$ if $x < v_{min}$, and
|
453 |
+
$f_{clip} = v_{max}$ if $x > v_{max}$.
|
454 |
+
Hence, if the voltage magnitudes $|V_{k,t}|$ obtained from solving equations \eqref{eq:BusInjection2} are within the voltage limit of $[\underline{V}_k, \overline{V}_k]$ on every bus $k = 1, \ldots, N$, then $R^v = 0$; if there is voltage violation at a certain bus $k$,
|
455 |
+
the term $\max (|V_{k,t}|-\overline{V}_{k}, \underline{V}_k-|V_{k,t}|)$ becomes positive, and $R^v$ becomes a negative number with an arbitrary positive number $\lambda$ to amplify the voltage violation. The other positive parameter in \eqref{eq:BusInjection2}, $M$, is just to ensure that the reward $R^v$ is bounded below, a technical condition needed to ensure the convergence of the consensus MARL algorithm to be introduced in the next section.
|
456 |
+
|
457 |
+
If the system of equations \eqref{eq:BusInjection2} does not have a solution with a given set of agents' bids, we can simply set $ R^{v} = - \lambda \cdot N \cdot M$.
|
458 |
+
We want to emphasize that the voltage violation does not result in actual financial penalties for the agents since the market clearing happens before real-time delivery. It simply represents feedback corresponding to agents' bids and is used by each agent to train their reinforcement learning policies.
|
459 |
+
|
460 |
+
|
461 |
+
After market-clearing and solving for voltage magnitudes, the total reward for agent $i$ at time step $t$ is realized, as defined in \eqref{eq:reward}. Note that while we do not explicitly assume to have a DSO in the distribution network, an entity is still needed to solve Equation \eqref{eq:BusInjection2}. A utility company can assume such a role. The solution process may even be carried out by a distributed ledger system, such as on a blockchain.
|
462 |
+
|
463 |
+
\textit{Objective function:} At each time step $t$, agent $i$ receives the system state $s_t$, chooese an action $a_{i,t} \in \mathcal{A}_i$, and receives a reward $r_{i,t}$. At time \( t \), agent \( i \)'s decision, given the system state \( s_t \in \mathcal{S} = \prod_{i=1}^I \mathcal{S}_i \), is determined by the policy \( \pi_{\theta_i}(\cdot|s) \). This policy is a density function mapping from \( \mathcal{A}_i \) to [0, $\infty$), and it is parameterized by \( \theta_i \). Here, \( \theta_i \) is an element of a generic set \( \Theta_i \in \mathcal{R}^{m_i} \), where the dimensionality $m_i$ is determined by each agent. In choosing $m_i$,
|
464 |
+
each agent faces a tradeoff: it should be large enough to avoid underfitting, yet not so large as to risk overfitting and increased training time.
|
465 |
+
Let $\theta = [\theta_1, \cdots, \theta_I]$ be the concatenation of $\theta_i$, and $\pi_{\theta}(\cdot|s) = \prod_{i=1}^I \pi_{\theta_i}(\cdot | s)$ be the joint policy of all agents. In the following discussion of this section, we assume that the Markov chains $\left\{s_t\right\}_{t \geq 0}$ induced by every agents' collective policy $\pi_{\theta}$ where $\theta\in\Theta$ is ergodic with a stationary distribution $\rho_{\theta}$.
|
466 |
+
|
467 |
+
|
468 |
+
Different from a fully decentralized framework, the goal for each agent in the consensus-update framework is to solve the following optimization problem:
|
469 |
+
\begin{align}
|
470 |
+
\label{eq:payoff}
|
471 |
+
\sup_{\theta\in \Theta:= \Pi_{i=1}^I \Theta_i} J(\theta) = \mathbb{E}_{s_0 \sim \rho_{\theta}}\Bigg[\lim_{T\to \infty}\frac{1}{T}\sum_{t=0}^{T} \Bigg(\frac{1}{I} \sum_{i=1}^I r_{i,t}\Bigg)\Bigg|s_0, \pi_{\theta} \Bigg],
|
472 |
+
\end{align}
|
473 |
+
to optimize the average system long-term reward. The objective function resembles a cooperative game, which may face similar critiques regarding the need for each agent to access system-wide state variables in a decentralized setting. A pertinent response, as previously mentioned, is the use of control automation: the objective function can be integrated into intelligent control devices, making them `black boxes' for end-users. Additionally, in developing a MARL framework with provable convergence, some level of global information sharing among agents seems necessary. The collective objective function, as outlined in \eqref{eq:payoff}, is essential for decentralized agents to reach a consensus.
|
474 |
+
|
475 |
+
To ensure that the objective function in \eqref{eq:payoff} is well-defined, we have the following result.
|
476 |
+
\begin{lemma} The supreumum in \eqref{eq:payoff}, $\sup_{\theta} J(\theta)$, exists and is finite.
|
477 |
+
\label{lemma1}
|
478 |
+
\end{lemma}
|
479 |
+
\begin{proof}
|
480 |
+
For each agent $i = 1, \ldots, I$, since the market clearing price $P_t$ and the agent's bid $b_{i,t}$ are bounded above (by their PV and battery capacities), according to Eq. (\ref{eq:Price_SDR}) and (\ref{eq:action2bid}), respectively, the first term in an agent's reward function, $R^{m}_{i}$, is then bounded above based on Eq. (\ref{eq:P2PReward}), for any $a \in \mathcal{A}:= \Pi_{i=1}^I \mathcal{A}_i$ and $s\in \mathcal{S}$.
|
481 |
+
The second term, $R^{v}$, is bounded above by 0 based on its definition in Eq. (\ref{eq:penalty}). Hence, agent $i$'s reward $r_{i,t}$ is bounded at any $t$, and for any $a \in \mathcal{A}$ and $s\in \mathcal{S}$. Since the number of agents is finite, there exists a common upper bound for all $r_{i,t}$'s, and let us denote it as $\overline{R}$. Then by the formulation in \eqref{eq:payoff}, $J(\theta) \leq \overline{R}$ for all $\theta \in \Theta$. By the well-known least-upper-bound property of real numbers, we know that $\sup_{\theta} J(\theta)$ exists and is finite.
|
482 |
+
\end{proof}
|
483 |
+
|
484 |
+
\subsection{Consensus-update Actor-critic Algroithm}
|
485 |
+
\label{subsec:alg}
|
486 |
+
In this section, we introduce the consensus-update actor-critic algorithm for MARL with continuous state and action spaces, developed in \cite{zhang2018networked}, and apply it to solve Problem \eqref{eq:payoff}. First, define the global relative action-value function $Q_{\theta}(s,a)$ for a given $a\in \mathcal{A}$, $s\in \mathcal{S}$ and a policy $\pi_{\theta}$ as:
|
487 |
+
\begin{align}
|
488 |
+
Q_\theta(s, a) :=\sum_{t=0}^{\infty} \mathbb{E}\left[\frac{1}{I} \sum_{i=1}^I r_{i,t}-J(\theta) \mid s_0=s, a_0=a, \pi_{\theta}\right].
|
489 |
+
\end{align}
|
490 |
+
Note that since all the agents share the same $J(\theta)$, the $Q$ function does not need to have an agent index and is the same across the agents.
|
491 |
+
|
492 |
+
The function $Q_{\theta}(s,a)$ cannot be calculated by each agent, even if the global state and action information are shared, since the joint policy distribution $\pi_{\theta}$ is not known. Instead, each agent uses $\hat{Q}(\cdot,\cdot;\omega^i):\mathcal{S}\times\mathcal{A}\to\mathcal{R}$, which is a class of function parametrized by $\omega^i \in \mathcal{R}^K$, where $K$ is the dimension of the $\omega^i$, to approximate the action-value function $Q_{\theta}(s,a)$. Note that unlike the parameters $\theta_i$, where agents can choose their own policy models (and hence different dimensions of $\theta_i$), the dimension of $\omega^i$ has to be the same across all agents (and hence $K$ does not have an agent index) to facilitate the consensus update.
|
493 |
+
|
494 |
+
|
495 |
+
The decentralized consensus-update actor-critic algorithm for networked multi-agents works as follows: at the beginning of time step $t+1$, each agent $i$ observes the global state $s_{t+1}$ and chooses their action $a_{i,t+1}$ according to their own policy $\pi_{\theta_{i,t}}$ (referred to as the `actor'), where $\theta_{i,t}$ is the policy parameter for agent $i$ at time step $t$. Then each agent $i$ will receive the joint action $a_{t+1} = (a_{1,t+1},\dots, a_{I,t+1})$ and their own reward $r_{i,t+1}$, which help them update $\hat{Q}(\cdot,\cdot;\omega^i_t)$ (referred to as the `critic') and $\pi_{\theta_{i,t}}$ on their own in each time step.
|
496 |
+
Specifically, each agent first updates the temporal-difference (TD) error $\delta^i_t$ and long-term return estimate $\Bar{r}^i_{t+1}$ as follows:
|
497 |
+
\begin{align}
|
498 |
+
&\delta_t^i := r_{i,t+1}-\Bar{r}^i_{t+1}+\hat{Q}\left(s_{t+1},a_{t+1};\omega_t^i\right)-\hat{Q}\left(s_{t},a_{t};\omega_t^i\right)\\
|
499 |
+
&\Bar{r}^i_{t+1} \leftarrow (1-\beta_{\omega,t}) \cdot \Bar{r}^i_{t} + \beta_{\omega,t}\cdot r_{i,t}
|
500 |
+
\end{align}
|
501 |
+
Let $\widetilde{\omega}_t^i$ denote a temporary local parameter of $\hat{Q}(\cdot,\cdot;\omega^i_t)$ for agent $i$, and it is updated as:
|
502 |
+
\begin{align}
|
503 |
+
\widetilde{\omega}_t^i = \omega_t^i+\beta_{\omega, t} \cdot \delta_t^i \cdot \nabla_\omega \hat{Q}\left(s_{t},a_{t};\omega_t^i\right). \label{eq:tilde_omega}
|
504 |
+
\end{align}
|
505 |
+
The local parameter of the policy $\pi_{\theta_{i,t}}$ is updated as
|
506 |
+
\begin{align}
|
507 |
+
\theta_{i,t+1} = \Gamma^i[\theta_{i,t}+\beta_{\theta, t} \cdot \widehat{I}_t^{Q, i}],
|
508 |
+
\label{eq:theta_update}
|
509 |
+
\end{align}
|
510 |
+
where $\Gamma^i: \mathbb{R}^{m_i} \rightarrow \Theta_i \subset \mathbb{R}^{m_i}$ is a projector operator. An example is the orthogonal projector mapping: $\Gamma^i(x) \triangleq \underset{y \in \Theta_i}{\arg \min }\|y-x\| \, \forall \mathbf{x} \in \mathbb{R}^{m_i} $ where $\Theta$ is a compact convex set. The $\widehat{I}_t^{Q, i}$ term in \eqref{eq:theta_update} is given as follows:
|
511 |
+
\begin{align}
|
512 |
+
\begin{split}
|
513 |
+
& \widehat{I}_t^{Q, i}=\\
|
514 |
+
& \int_{\mathcal{A}^i} d \pi_{\theta_{i,t}}\left(a^i \mid s_t\right) \nabla_{\theta_i} \log \pi_{\theta_{i,t}}\left(a^i \mid s_t\right) \hat{Q}\left(s_t, a^i, a_t^{-i} ; \omega_t^i\right).
|
515 |
+
\end{split}
|
516 |
+
\end{align}
|
517 |
+
To limit communication among agents, each agent $i$ only shares their local parameter $\widetilde{\omega}_t^i$ with the others; no other information needs to be shared. A consensual estimate of $Q_{\theta}(s,a)$ can then be estimated through updating the parameter $\omega_{t+1}^i$ as follows:
|
518 |
+
\begin{align}
|
519 |
+
\omega_{t+1}^i = \sum_{j=1}^I \widetilde{\omega}_t^j.
|
520 |
+
\end{align}
|
521 |
+
To apply such an algorithm to the P2P energy trading, we illustrate the conceptual framework in Fig. \ref{fig:consensus}. The detailed algorithm is presented in Algorithm \ref{algo1}.
|
522 |
+
|
523 |
+
In the next section, we present the conditions under which
|
524 |
+
Algorithm \ref{algo1} can be shown to converge to a steady state.
|
525 |
+
|
526 |
+
|
527 |
+
\begin{algorithm}
|
528 |
+
\caption{Consensus-update actor-critic algorithm for voltage control with P2P energy market}
|
529 |
+
\textbf{Input:} Initial values of the parameters $\Bar{r}_0^i, \omega_0^i, \widetilde{\omega}_0^i, \theta_0^i, \forall i \in \{1,\dots,I\}$, the initial state $s_0$, and stepsizes $\left\{\beta_{\omega, t}\right\}_{t \geq 0} \text { and }\left\{\beta_{\theta, t}\right\}_{t \geq 0}$.\\
|
530 |
+
\For{\textbf{each} agent $i = 1,...,I$}
|
531 |
+
{
|
532 |
+
Makes the decision on reactive power generation and battery charging/discharging $a_{i,0} \sim \pi_{\theta_0^i}\left(\cdot \mid s_0\right)$.
|
533 |
+
}The power flow equation of the distribution network is solved, the P2P energy market is cleared, and the information of joint actions $a_0=\left(a_{i,0}, \ldots, a_{I,0}\right)$ is sent to each agent.\\
|
534 |
+
\For{\textbf{each} agent $i = 1,...,I$}
|
535 |
+
{
|
536 |
+
Observes the reward $r_{i,0}$.
|
537 |
+
}
|
538 |
+
\For{\textbf{each} time step $t = 0,1,...T$}
|
539 |
+
{
|
540 |
+
\For{\textbf{each} agent $i = 1,...,I$}
|
541 |
+
{
|
542 |
+
Observes the next global state $s_{t+1}$.\\
|
543 |
+
Update $\Bar{r}^i_{t+1} \leftarrow (1-\beta_{\omega,t}) \cdot \Bar{r}^i_{t} + \beta_{\omega,t}\cdot r_{i,t}$.
|
544 |
+
Makes the decision on reactive power generation and battery charging/discharging $a_{i,t+1} \sim \pi_{\theta_{i,t}}\left(\cdot \mid s_{t+1}\right)$.
|
545 |
+
}
|
546 |
+
The power flow equation of the distribution network is solved, the P2P energy market is cleared, and the information of joint actions $a_{t+1}=\left(a_{i,t+1}, \ldots, a_{I,t+1}\right)$ is sent to each agent.\\
|
547 |
+
\For{\textbf{each} agent $i = 1,...,I$}
|
548 |
+
{
|
549 |
+
1. Observes the reward $r_{i,t+1}$ and update $\delta_{t}^i \leftarrow r_{i,t+1}-\Bar{r}^i_{t+1}+\hat{Q}\left(s_{t+1},a_{t+1};\omega_t^i\right)-\hat{Q}\left(s_{t},a_{t};\omega_t^i\right)$.\\
|
550 |
+
2. \textbf{Critic step: } $\widetilde{\omega}_t^i \leftarrow \omega_t^i+\beta_{\omega, t} \cdot \delta_t^i \cdot \nabla_\omega \hat{Q}\left(s_{t},a_{t};\omega_t^i\right)$.\\
|
551 |
+
3. \textbf{Actor step: } $\theta_{i,t+1} \leftarrow \Tilde{\Gamma}^i[\theta_{i,t}+\beta_{\theta, t} \cdot \widehat{I}_t^{Q, i}]$, where $\Gamma^i: \mathbb{R}^{m_i} \rightarrow \Theta_i \subset \mathbb{R}^{m_i}$, and
|
552 |
+
\begin{align}
|
553 |
+
& \widehat{I}_t^{Q, i}= \int_{\mathcal{A}^i} d \pi_{\theta_{i,t}}\left(a^i \mid s_t\right) \cdot \noindent\\
|
554 |
+
&\nabla_{\theta_i} \log \pi_{\theta_{i,t}}\left(a^i \mid s_t\right) \hat{Q}\left(s_t, a^i, a_t^{-i} ; \omega_t^i\right),
|
555 |
+
\end{align}
|
556 |
+
4. Send $\widetilde{\omega}_t^i$ to the other agent in the system.
|
557 |
+
}
|
558 |
+
\For{\textbf{each} agent $i = 1,...,I$}
|
559 |
+
{
|
560 |
+
\textbf{Consensus step: } $\omega_{t+1}^i \leftarrow \sum_{j=1}^I \widetilde{\omega}_t^j$.
|
561 |
+
}
|
562 |
+
}
|
563 |
+
\label{algo1}
|
564 |
+
\end{algorithm}
|
565 |
+
\begin{figure*}[!htb]
|
566 |
+
\centering
|
567 |
+
\includegraphics[width=\textwidth]{consensus_framework.pdf}
|
568 |
+
\caption{Consensus MARL Framework for the Voltage Control with P2P Market}
|
569 |
+
\label{fig:consensus}\vspace*{-5pt}
|
570 |
+
\end{figure*}
|
571 |
+
|
572 |
+
\subsection{Convergence Results}
|
573 |
+
In this section, we discuss the convergence results of Algorithm \ref{algo1}. For ease of notation, we drop the $t$ index of the parameters $\theta_i$ for $i\in \mathcal{I}$ in the following discussions.
|
574 |
+
\begin{comment}
|
575 |
+
To do so, we need to introduce some concepts and assumptions.
|
576 |
+
|
577 |
+
Consider a generic Markov chain $X = \{X_n: n\geq 0 \}$ with a general state space $S$. Assume that $S$ is a topological space, and let $\mathcal{B}(S)$ denote the Borel $\sigma$-filed on $S$.
|
578 |
+
The transition probability kernel of the Markov chain, denoted by $P$, is defined as $P = \{P(s, A), s \in S, A \in \mathcal{B}(S)\}$ such that (i) for each $A \in \mathcal{B}(S)$, $P(\cdot, A)$ is a non-negative measurable function on $S$, and (ii) for each $s \in S$, $P(s, \cdot)$ is a probability measure on $\mathcal{B}(S)$ (This definition is from \cite{meyn2012markov}, Chapter 3.) With the transition kernel of a general state-space Markov chain defined, we present the definition of geometric ergodicity.
|
579 |
+
\end{comment}
|
580 |
+
|
581 |
+
|
582 |
+
|
583 |
+
\begin{comment}
|
584 |
+
Denote $p_{\theta}(ds'|s)$ as the transition kernel of $\{s_t\}_{t\geq0}$ induced by policy $\pi_{\theta}$ and $\Tilde{p}_{\theta}(ds',da'|s,a)$ as the transition kernel of $\{s_t,a_t\}_{t\geq0}$ induced by policy $\pi_{\theta}$. Then
|
585 |
+
\begin{align}
|
586 |
+
\label{eq:ergodic1}
|
587 |
+
p_{\theta}(ds'|s) = \int_\mathcal{A}\pi_{\theta}(a|s)da \cdot p(ds'|s,a), \ \forall s, s' \in \mathcal{S},
|
588 |
+
\end{align}
|
589 |
+
and
|
590 |
+
\begin{align}
|
591 |
+
\label{eq:ergodic2}
|
592 |
+
&p_{\theta}(ds',da'|s,a) = p_{\theta}(ds'|s) \cdot \pi_{\theta}(a'|s')da', \\
|
593 |
+
&\forall s, s' \in \mathcal{S}, \ \forall a, a' \in \mathcal{A},
|
594 |
+
\end{align}
|
595 |
+
where $da$ is the Lebesgue measure and $p(ds'|s,a)$ is defined as the transition kernel of the system given action $a$ and state $s$.
|
596 |
+
To guarantee the convergence of Algorithm \ref{algo1} in the application of voltage control with P2P energy market, we need to start with the following assumptions \cite{zhang2018networked}.
|
597 |
+
\end{comment}
|
598 |
+
|
599 |
+
|
600 |
+
\begin{definition}(Geometric ergodicity, referred to as uniform ergodicity in \cite{MCMC})
|
601 |
+
A Markov chain having stationary distribution $\pi(\cdot)$ is geometricly ergodic if
|
602 |
+
$$
|
603 |
+
||P^n(x,\cdot) - \pi(\cdot)|| \leq M c^n,\ n = 1, 2, 3, \ldots
|
604 |
+
$$
|
605 |
+
for some $0 < c < 1$ and $0 < M < \infty$, where $P$ is the transition kernel of the Markov chain.
|
606 |
+
\end{definition}
|
607 |
+
The concept of geometric ergodicity specifies the rate at which a Markov chain converges to a stationary distribution if it exists. In the following, we state the first assumption to ensure the consensus MARL convergence to a steady state, which requires the Markov chains formed by the system state and state-action pairs to be both geometrically ergodic. Granted that this assumption cannot be easily verified, it is essential to the convergence proof.
|
608 |
+
|
609 |
+
|
610 |
+
|
611 |
+
\begin{comment}
|
612 |
+
Denote $p_{\theta}(ds'|s)$ as the transition kernel of $\{s_t\}_{t\geq0}$ induced by policy $\pi_{\theta}$ and $\Tilde{p}_{\theta}(ds',da'|s,a)$ as the transition kernel of $\{s_t,a_t\}_{t\geq0}$ induced by policy $\pi_{\theta}$. Then
|
613 |
+
\begin{align}
|
614 |
+
\label{eq:ergodic1}
|
615 |
+
p_{\theta}(ds'|s) = \int_\mathcal{A}\pi_{\theta}(a|s)da \cdot p(ds'|s,a), \ \forall s, s' \in \mathcal{S},
|
616 |
+
\end{align}
|
617 |
+
and
|
618 |
+
\begin{align}
|
619 |
+
\label{eq:ergodic2}
|
620 |
+
p_{\theta}(ds',da'|s,a) = p_{\theta}(ds'|s) \cdot \pi_{\theta}(a'|s')da', \ \forall s, s' \in \mathcal{S}, \ \forall a, a' \in \mathcal{A},
|
621 |
+
\end{align}
|
622 |
+
where $da$ is the Lebesgue measure and $p(ds'|s,a)$ is defined by (\ref{eq:kernel}).
|
623 |
+
\end{comment}
|
624 |
+
|
625 |
+
\begin{assumption}
|
626 |
+
\label{assump1}
|
627 |
+
For any $i \in \{1,\dots,I\}, s \in \mathcal{S}$ and $\theta_i \in \Theta_i$, an agent $i$'s policy, aka the conditional probability density over the action space $\mathcal{A}_i$, is positive; that is, $\pi_{\theta_i}\left(a_i \mid s\right)>0$ for all $a_i \in \mathcal{A}_i$. Additionally, $\pi_{\theta_i}\left(\cdot\mid s\right)$ is assumed to be continuously differentiable with respect to the parameter $\theta_i$ over $\Theta_i$. Finally, the Markov chains $\left\{s_t\right\}_{t \geq 0}$ and $\left\{\left(s_t, a_t\right)\right\}_{t \geq 0}$ induced by the agents' collective policies $\pi_{\theta}$ are both geometrically ergodic, with the coressponding stationary distribution denoted by $\rho_{\theta}$ and $\tilde{\rho_{\theta}}$, respectively.
|
628 |
+
\end{assumption}
|
629 |
+
\noindent\textbf{Remark 1} The first part of Assumption \ref{assump1} is a standard assumption on the policy function. One example of such a policy is Gaussian, i.e., $\pi_{\theta_i}(\cdot \mid s) \sim$ $\mathcal{N}\left(\eta_{\theta_i}(s), \Sigma_i\right)$, where $\eta_{\theta_i}(s): \mathcal{S} \rightarrow \mathcal{A}_i \in \mathbb{R}^{n}$ is a fully connected neural network parametrized by $\theta_i$ and is continuously differentiable with respect to $\theta_i$ and $s$, and $\Sigma_i \in \mathbb{R}^{n \times n}$ is the covariance matrix.
|
630 |
+
|
631 |
+
\begin{comment}
|
632 |
+
The second part, which is the geometrical ergodicity assumption of the Markov chain induced by the MDP and policies is to guarantee it can approach the stationarity can \hl{The second standard assumption on the MDP and policy function as in the past work }\cite{konda1999actor} \hl{before we want to apply an actor-critic algorithm. (AL: if you were the reader, would you be satisfied with the statement? What did you learn from this statement?)}
|
633 |
+
\end{comment}
|
634 |
+
|
635 |
+
\begin{assumption}
|
636 |
+
\label{assump2}
|
637 |
+
The policy parameter $\theta_{i}$ for each agent is updated by a local projection operator, $\Gamma^i: \mathbb{R}^{m_i} \rightarrow \Theta_i \subset \mathbb{R}^{m_i}$, that projects any $\theta_{i}$ onto a compact set $\Theta_i$. In addition, $\Theta=\prod_{i=1}^I \Theta_i$ includes at least one local minimum of $J(\theta)$.
|
638 |
+
\end{assumption}
|
639 |
+
\noindent\textbf{Remark 2} The requirement of the local projection operator is standard in convergence analyses of many reinforcement learning algorithms. If $\Theta_i$ is a convex set, a qualifying example of $\Gamma^i$ can be the nearest point projection on $\Theta_i$; that is,
|
640 |
+
\begin{align}
|
641 |
+
\Gamma^i(\theta_i) = \arg \max_{{\theta_i}^*\in \Theta_i} \Vert \theta_i - {\theta_i}^*\Vert_2.
|
642 |
+
\end{align}
|
643 |
+
Next, we make an assumption on the action-value function approximation.
|
644 |
+
\begin{assumption}
|
645 |
+
\label{assump3}
|
646 |
+
For each agent $i$, the action-value function is approximated by linear functions, i.e., $\hat{Q}(s, a ; \omega)=$ $\omega^{\top} \phi(s, a)$ where $\phi(s, a)=\left[\phi_1(s, a), \cdots, \phi_K(s, a)\right]^{\top} \in \mathbb{R}^K$ is the feature associated with $(s, a)$. The feature function, $\phi_k: \mathcal{S} \times \mathcal{A} \rightarrow \mathbb{R}$, for $k=1,\cdots,K$, is bounded for any $s \in \mathcal{S}, a \in \mathcal{A}$. Furthermore, the feature functions $\left\{\phi_k\right\}_{k=1}^K$ are linearly independent, and for any $u \in \mathbb{R}^K$ and $u \neq 0, u^{\top} \phi$ is not a constant function over $\mathcal{S} \times \mathcal{A}$.
|
647 |
+
\end{assumption}
|
648 |
+
\noindent\textbf{Remark 3.} One exmaple of the action-value function approximation is the Gaussian radial basis function (RBF):
|
649 |
+
\begin{align}
|
650 |
+
\hat{Q}(s, a ; \omega) = \sum_{j=1}^K \omega_i e^{-\gamma_{j} \Vert [s,a]-c_{j} \Vert } ,
|
651 |
+
\end{align}
|
652 |
+
where $[s,a]$ is the concatenation of $s$ and $a$, $\gamma_{j} \in \mathcal{R}^+$ for $j=1,\cdots,K$, $c_{j}\in \mathcal{R}^{|\mathcal{S}|+|\mathcal{A}|}$ for $j=1,\cdots,K$. $\gamma_{j}$ and $c_{j}$ can be chosen arbitrarily, as long as $(\gamma_{j}, c_{j})$ are different with different $j$ so that the feature functions are linearly independent.
|
653 |
+
\begin{assumption}
|
654 |
+
\label{assump4}
|
655 |
+
The stepsizes $\beta_{\omega, t}$ and $\beta_{\theta, t}$ in Algorithm \ref{algo1} satisfy
|
656 |
+
\begin{align}
|
657 |
+
\sum_t \beta_{\omega, t}=\sum_t \beta_{\theta, t}=\infty,\ \mathrm{and} \ \sum_t (\beta_{\omega, t}^2+\beta_{\theta, t}^2)<\infty .
|
658 |
+
\end{align}
|
659 |
+
Also, $\beta_{\theta, t}=o\left(\beta_{\omega, t}\right)$ and $\lim _{t \rightarrow \infty} \beta_{\omega, t+1} \cdot \beta_{\omega, t}^{-1}=1$.
|
660 |
+
\end{assumption}
|
661 |
+
An example of such stepsizes can be $\beta_{\omega, t}= 1/t^{0.8}$ and $\beta_{\theta, t}=1/t$.
|
662 |
+
|
663 |
+
With the above assumptions, the convergence of Algorithm \ref{algo1} is given below.
|
664 |
+
\begin{comment}
|
665 |
+
To verify it,
|
666 |
+
\begin{align}
|
667 |
+
&\sum_t \beta_{\theta, t} = \sum_t 1/t = \infty,\\
|
668 |
+
&\sum_t \beta_{\omega, t} = \sum_t 1/t^{0.8} = \infty,\\
|
669 |
+
&\sum_t \beta_{\omega, t}^2+\beta_{\theta, t}^2 =\sum_t 1/t^{1.6} + \sum_t 1/t^2<\infty, \\
|
670 |
+
&\beta_{\theta, t}=1/t = o(\beta_{\omega, t})= o(1/t^{0.8}), \label{eq:smallO}\\
|
671 |
+
&\lim _{t \rightarrow \infty} \beta_{\omega, t+1} \cdot \beta_{\omega, t}^{-1} = \lim _{t \rightarrow \infty} \frac{1}{(t+1)^{0.8}}\cdot t^{0.8} = 1.
|
672 |
+
\end{align}
|
673 |
+
\end{comment}
|
674 |
+
\begin{comment}
|
675 |
+
To summarize, the MDP needs to satisfy Assumption \ref{assump1}, the policy function needs to satisfy Assumption \ref{assump1} and \ref{assump2}, the action-value function approximation needs to satisfy Assumption \ref{assump3}, and the stepsize of the algorithm needs to satisfy Assumption \ref{assump4} so that the convergence of the algorithm can be guaranteed as in Theorem \ref{Theorem}.
|
676 |
+
\end{comment}
|
677 |
+
|
678 |
+
|
679 |
+
\begin{theorem}
|
680 |
+
\label{Theorem}
|
681 |
+
Assume that each agent's state space $\mathcal{S}_i$ is compact for $i = 1, \ldots, I$. Under Assumptions 1 -- 4, the sequences $\left\{\theta_{i,t}\right\}$, $\left\{\mu_t^i\right\}$ and $\left\{\omega_t^i\right\}$ generated from Algorithm \ref{algo1} satisfy the following.
|
682 |
+
|
683 |
+
\begin{itemize}
|
684 |
+
\item[(i)] Convergence of the critic step: $\frac{1}{I}\lim _{t\rightarrow \infty} \sum_{i \in \mathcal{I}} \mu_t^i =J(\theta)$, where $J(\theta)$ is defined in \eqref{eq:payoff}, and $\lim _{t\rightarrow \infty} \omega_t^i=\omega_\theta$ for all $i \in \mathcal{I}$, with $\omega_\theta$ being a solution of a fixed point mapping. (The convergence is in the sense of almost surely.)
|
685 |
+
\item[(ii)] Convergence of the actor step: for each $i\in \mathcal{I}$, $\theta_{i,t}$ converges almost surely to a point $\Hat{\theta}_i$ that is a projection onto the set $\Theta$.
|
686 |
+
\end{itemize}
|
687 |
+
\end{theorem}
|
688 |
+
\begin{proof} The general convergence result of the consensus algorithm is proved in \cite{zhang2018networked} under six assumptions.
|
689 |
+
Our Assumption \ref{assump1}, \ref{assump2}, \ref{assump3} and \ref{assump4} directly correspond to four assumptions in \cite{zhang2018networked}. The fifth assumption \cite{zhang2018networked} regarding agents' time-varying neighborhood is trivially true in our setting since we assume agents can communicate through the whole network and the network topology does not change over time. Hence, the only remaining thing to show is the uniform boundedness of agents' reward $r_{i,t}$.
|
690 |
+
|
691 |
+
By the definition of the market clearing price $P_t$ in \eqref{eq:Price_SDR}, it is bounded between $FIT$ and $UR$. Together with the compactness assumption of the state space $\mathcal{S}_i$ (which is reasonable since an agent's energy demand and battery/PV capacities are all bounded), the $R_i^m$ component in the reward function, as defined in \eqref{eq:reward}, is uniformly bounded. The other term, the voltage violation penalty $R_i^v$, is uniformly upper bounded by 0 and lower bounded by $-\lambda NM$, based on its definition. Hence, $r_{i,t}$ is uniformly bounded for all $i$ and $t$, and the convergence results follow directly from Theorem 2 and 3 in \cite{zhang2018networked}.
|
692 |
+
\end{proof}
|
693 |
+
\noindent\textbf{Remark 4}.
|
694 |
+
While the theorem establishes the convergence to a steady state of the critic and actor steps, it does not address how good the system reward is upon convergence. A key concern
|
695 |
+
in this context is the potential inadequacy of the linear approximation of the critic function $\hat{Q}(s, a ; \omega)$ in Assumption \ref{assump3}.
|
696 |
+
In our model, the distribution network encompasses a multitude of agents, each exhibiting non-linear behaviors and interactions that may not be adequately captured by simple linear models. One natural idea is to deep neural networks (DNNs) instead, as they excel in capturing and modeling non-linear relationships. Specifically, the layered architecture of DNNs allows for the abstraction and hierarchical representation of data, enabling the network to learn rich and intricate representations of the environment. This is particularly crucial in our context, where the environment is characterized by a high degree of stochasticity and dynamical complexity. The downside of using DNNs, however, is that the proof of Theorem \ref{Theorem} is no longer applicable. Consequently, the convergence of the critic and actor steps remains an unresolved issue, meriting further investigation in future research.
|
697 |
+
|
698 |
+
\section{Simulation Results}
|
699 |
+
\label{sec:Sim}
|
700 |
+
\subsection{Test Data}
|
701 |
+
We test our problem using the IEEE 13-bus test feeder. In our simulations, we set each bus to have one prosumer, except for the substation bus; hence, there are 12 prosumers (aka agents). The length of each time step is set to be 1 hour. The distribution network's voltage limit is set to $[0.96 \ pu,1.04 \ pu]$. The voltage violation penalty, $M$, is set at $10^4$. The price ceiling and floor, that is, the utility rate $UR$ and feed-in tariff $FIT$, are set at 14 \textcent /kWh and 5 \textcent/kWh, respectively. The capacity of each agent's PV is set to be 30kW, and the energy storage capacity is set to be 50kWh, with the charging and discharging efficiency being 0.95 and 0.9 for each agent. The PV generation of the prosumers and the total base load of the pure consumers on each bus have fixed diurnal shapes. These shapes represent the mean values at the corresponding hour of the day. The detail of these shapes can be found in \cite{feng2022decentralized} due to the page limit. The power flow equations \eqref{eq:BusInjection2} are solved by the open-source distribution system simulator OpenDSS \cite{EPRI}.
|
702 |
+
|
703 |
+
\subsection{Deep Neural Network Implementation}
|
704 |
+
As stated in Remark 4, we use DNNs instead of linear combinations of features to approximate the critic function $\hat{Q}$.
|
705 |
+
To implement the consensus MARL with DNNs, we apply the default architectures of the actor and critic networks of actor-critic algorithms in RLlib \cite{Ray}. To be specific, for the policy function, we use
|
706 |
+
\begin{align}
|
707 |
+
\pi_{\theta_i}(\cdot | s) \sim \mathcal{N}\left(\bmat{\sigma^{(1)}_{\theta_i}(s) \\ \sigma^{(2)}_{\theta_i}(s)}, \bmat{\sigma^{(3)}_{\theta_i}(s) & 0\\0 & \sigma^{(4)}_{\theta_i}(s)}\right),
|
708 |
+
\end{align}
|
709 |
+
where $\sigma^{(1)}_{\theta_i}(s)$ represents the mean value of the charging/discharging action, $\sigma^{(2)}_{\theta_i}(s)$ denotes the mean value of the smart inverter action. Similarly, $\sigma^{(3)}_{\theta_i}(s)$ and $\sigma^{(4)}_{\theta_i}(s)$ correspond to the variances of the charging/discharging action and the smart inverter action, respectively.
|
710 |
+
The function $\sigma_{\theta_i}(s) = \bmat{\sigma^{(1)}_{\theta_i}(s) \\ \cdots \\ \sigma^{(4)}_{\theta_i}(s)}: \mathcal{S} \to \mathcal{R}^4$ is implemented as a DNN, parameterized by $\theta_i$. This network comprises two fully connected layers, each consisting of 256 neurons, and employs a $tanh$ activate function following each layer. Specifically,
|
711 |
+
\begin{align}
|
712 |
+
& \sigma_{\theta_i}(s) = \nonumber\\
|
713 |
+
&{\theta^{(3)}_{i}} tanh\Big({\theta^{(2)}_{i}} tanh ({\theta^{(1)}_{i}} s + {\theta^{(1)}_{i}}^{bias}+{\theta^{(2)}_{i}}^{bias}\Big)+{\theta^{(3)}_{i}}^{bias},
|
714 |
+
\end{align}
|
715 |
+
where $\theta_i = ({\theta^{(1)}_{i}},{\theta^{(1)}_{i}}^{bias},{\theta^{(2)}_{i}},{\theta^{(2)}_{i}}^{bias},{\theta^{(3)}_{i}},{\theta^{(3)}_{i}}^{bias})$ are the policy parameters, ${\theta^{(1)}_{i}}$ is a $256 \times |\mathcal{S}|$ matrix, ${\theta^{(2)}_{i}}$ is a $256 \times 256$ matrix, ${\theta^{(3)}_{i}}$ is a $4 \times 256$ matrix, ${\theta^{(1)}_{i}}^{bias}$ is a $|\mathcal{S}| \times 1$ vector, ${\theta^{(2)}_{i}}^{bias}$ is a $256 \times 1$ vector, ${\theta^{(3)}_{i}}^{bias}$ is a $4 \times 1$ vector, and for any vector $x \in \Re^n$,
|
716 |
+
\begin{align}
|
717 |
+
tanh(x):= \bmat{\frac{e^{x_1}-e^{-x_1}}{e^{x_1}+e^{-x_1}}\\ \cdots \\\frac{e^{x_1}-e^{-x_1}}{e^{x_n}+e^{-x_n}}},
|
718 |
+
\end{align}
|
719 |
+
where $x_j$, $j = 1, \ldots, n$, is the $j$-th element of $x$.
|
720 |
+
|
721 |
+
A similar neural network architecture is also applied to the approximator of the action-value function $\hat{Q}(\cdot,\cdot;\omega^i)$. The settings of stepsizes are $\beta_{\omega, t}= 1/t^{0.65}$ and $\beta_{\theta, t}=1/t^{0.85}$.
|
722 |
+
|
723 |
+
|
724 |
+
\subsection{Comaprison of Three Different MARL Algorithms}
|
725 |
+
We compare the consensus MARL with two other algorithm frameworks: (i) a fully decentralized framework in which each agent just solves their own reinforcement learning problem using the PPO algorithm \cite{feng2022decentralized}, while completely ignoring multiagent interactions, and (ii) the MADDPG approach, originally proposed in \cite{MADDPG}.
|
726 |
+
|
727 |
+
Different from maximizing the system's total long-term expected reward, the objective for each agent in the fully decentralized framework and in MADDPG is to optimize their own long-term expected reward:
|
728 |
+
\begin{align}
|
729 |
+
\label{ownpayoff}
|
730 |
+
\sup_{\theta_i} J_i(\theta_i) = \mathbb{E}_{s_0 \sim \rho_{\theta}}\Bigg[\sum_{t=0}^{\infty} \gamma_i^t r_{i,t}|s_0, \pi_{\theta} \Bigg], \ i = 1, \ldots, I,
|
731 |
+
\end{align}
|
732 |
+
where $\gamma_i$ is the discount factor of agent $i$. In the fully decentralized framework, each agent uses the single-agent PPO algorithm from \cite{schulman2017proximal} to update their own policy and value function locally based on only their own state, action, and reward information.
|
733 |
+
|
734 |
+
Since all three MARL approaches are categorized as policy gradient descent methods, they update the parameter $\theta_{t+1}$ in a similar manner to approximate gradient ascent of $J(\theta)$ as follows:
|
735 |
+
\begin{equation}\label{eq:PolicyG}
|
736 |
+
\theta_{t+1} = \theta_t + \beta \widehat{\nabla J(\theta_t)},
|
737 |
+
\end{equation}
|
738 |
+
where $\beta$ is the step size and $\widehat{\nabla J(\theta_t)}$
|
739 |
+
is a stochastic estimate approximating the gradient of the performance measure. The three MARL approaches differ in how each agent updates $\widehat{\nabla J(\theta_t)}$. To highlight such differences, we present the specific gradient approximations for each of the three approaches below.
|
740 |
+
|
741 |
+
|
742 |
+
In the fully decentralized approach, the gradient update uses local states and actions only. Particularly, for each $i = 1, \ldots, I$,
|
743 |
+
\begin{align}
|
744 |
+
\label{PPO}
|
745 |
+
\widehat{\nabla_{\theta_i} J_i\left(\theta_i\right)}=\mathbb{E}_{s \sim \rho^\theta, a_i \sim \pi_{\theta_i}}\left[\nabla_{\theta_i} \log \pi_{\theta_i}\left(a_i \mid s_i\right) Q_i^\pi\left(s_i ; a_i\right)\right].
|
746 |
+
\end{align}
|
747 |
+
|
748 |
+
|
749 |
+
In the MADDPG approach, each agent makes decisions based on only their own state variables. However, their action-value functions are updated based on the global states and actions of all agents by a central authority, who is assumed to have access to the global information. Specifically, for each agent $i = 1, \ldots, I$, the gradient approximation in MADDPG is
|
750 |
+
\begin{align}
|
751 |
+
\label{MADDPG}
|
752 |
+
& \widehat{\nabla_{\theta_i} J_i\left(\theta_i\right)}= \\
|
753 |
+
& \ \mathbb{E}_{s \sim \rho^\theta, a_i \sim \pi_{\theta_i}}\left[\nabla_{\theta_i} \log \pi_{\theta_i}\left(a_i \mid s_i\right) Q_i^\pi\left(\bm{s} ; \bm{a_1, \ldots, a_I}\right)\right],
|
754 |
+
\nonumber
|
755 |
+
\end{align}
|
756 |
+
where $Q_i^\pi\left(\bm{s}; \bm{a_1, \ldots, a_I}\right)$ is the centralized action-value function to be updated by the assumed central authority.
|
757 |
+
The policy function, $\pi_{\theta_i}\left(a_i \mid s_i\right) $, on the other hand, uses only local states $s_i$. This is what is referred to as centralized training and decentralized execution.
|
758 |
+
|
759 |
+
For the consensus framework, the gradient estimation uses the so-called expected policy gradient as in \cite{zhang2018networked}:
|
760 |
+
\begin{align}
|
761 |
+
\label{CONSENSUS}
|
762 |
+
\widehat{\nabla_{\theta_i} J\left(\theta\right)}=\mathbb{E}_{s \sim \rho^\theta, a_{-i} \sim \pi_{\theta_{-i}}} \mathcal{I}_{\theta_i}^Q\left(s, a_{-i}\right),\ i=1, \ldots, I,
|
763 |
+
\end{align}
|
764 |
+
where
|
765 |
+
\begin{align}
|
766 |
+
\label{eq:EPG}
|
767 |
+
\mathcal{I}_{\theta_i}^Q\left(s, a_{-i}\right)=\mathbb{E}_{a_i \sim \pi_{\theta_i}} \nabla_{\theta_i} \log \pi_{\theta_i}\left(a_i \mid \bm{s}\right) Q_i^\pi\left(\bm{s} ; \bm{a_1, \ldots, a_I}\right).
|
768 |
+
\end{align}
|
769 |
+
While the action-value function $Q_i^\pi\left(\bm{s} ; \bm{a_1, \ldots, a_I}\right)$ is the same as in \eqref{MADDPG}, the key difference lies in the policy function. As stated in Section \ref{subsec:CMDP} and reflected in \eqref{eq:EPG}, each agent $i$'s policy depends on all agents' states $\bm{s}$. The other key difference between the consensus MARL and the MADDPG algorithm is how the action-value function $Q_i^\pi$ is updated. In the MADDPG framework, the function is updated through centralized training through a central authority; while in the consensus MARL, updates are executed via a decentralized consensus process, as described in Section \ref{subsec:alg}.
|
770 |
+
|
771 |
+
|
772 |
+
|
773 |
+
\subsection{Numerical Results}
|
774 |
+
Figure \ref{fig:reward_con} displays the convergence curves of the 30-episode moving average of the episodic total cost in the distribution network for the three different MARL algorithms.
|
775 |
+
\begin{figure}[!htb]
|
776 |
+
\centering
|
777 |
+
\includegraphics[scale = 0.37]{reward_con.png}
|
778 |
+
\caption{30-episode moving average of episodic total reward}
|
779 |
+
\label{fig:reward_con}\vspace*{-5pt}
|
780 |
+
\end{figure}
|
781 |
+
It is evident that the mean episodic total reward gradually converges to a high level at the end of the training in both frameworks, indicating the effective convergence of the consensus-update framework. Furthermore, the agents in the consensus-update framework achieve a higher stationary reward level than those in the fully decentralized framework, which is expected since the consensus-update framework benefits from improved approximation of the action-value function through communication. The reward under MADDPG is higher than that under the fully decentralized PPO, but lower than that under consensus-update framework. On one hand, MADDPG has the process of centralized learning which can guarantee a better performance than the fully decentralized one. However, there is no communication among agents in the process of decentralized execution in MADDPG, so each agent's decision-making is only based on their own local states, which leads to a worse overall performance than
|
782 |
+
the consensus-update framework.
|
783 |
+
|
784 |
+
Figure \ref{fig:price_con} presents the market price of the P2P market in both frameworks in chronological order over the last three days after thousands of training episodes.
|
785 |
+
\begin{figure}[!htb]
|
786 |
+
\centering
|
787 |
+
\includegraphics[scale = 0.37]{price_con.png}
|
788 |
+
\caption{Hourly prices over the last three days}
|
789 |
+
\label{fig:price_con}\vspace*{-5pt}
|
790 |
+
\end{figure}
|
791 |
+
Notably, the market price in the consensus-update framework demonstrates a tendency to be higher during off-peak hours and lower during peak hours when compared to the fully decentralized framework. This indicates that agents employing the consensus-update algorithm have developed more sophisticated trading strategies compared to those using the fully decentralized PPO algorithm. Specifically, the consensus-algorithm-induced strategies involve buying more and selling less during off-peak hours, and conversely, selling more and buying less during peak hours, when compared with the strategies produced by the purely decentralized algorithm.
|
792 |
+
|
793 |
+
Figure \ref{fig:violation} shows the gradual reduction of the system voltage deviation after training.
|
794 |
+
\begin{figure}[!htb]
|
795 |
+
\centering
|
796 |
+
\includegraphics[scale = 0.37]{voltage_violation.png}
|
797 |
+
\caption{System voltage deviation. The total system voltage deviation (p.u.) in the $n$-th episode is calculated as $\sum_{t=24(n-1)}^{24n-1} \sum_{j:Bus}[\max (0, v^j_{t}-\bar{v})+\max (0, \underline{v}-v^j_{t})]$.}
|
798 |
+
\label{fig:violation}
|
799 |
+
\end{figure}
|
800 |
+
It can be seen that the voltage violations of the system are all high for the three frameworks at the beginning. However, that of the consensus framework drops and converges to zero quickly at the fastest rate, and the other two converge to almost zero as well after half of the training process. On the one hand, the results are remarkable in the sense that voltage violation reduction is realized through purely decentralized learning for all three frameworks, while the consensus approach has the best performance.
|
801 |
+
|
802 |
+
|
803 |
+
|
804 |
+
|
805 |
+
|
806 |
+
\section{Conclusion and Future Research}
|
807 |
+
\label{sec:Conclusion}
|
808 |
+
This study has developed a market and algorithmic framework to enable energy consumers' and prosumers' participation in local P2P energy trading. Utilizing reinforcement learning algorithms, we have automated bidding for agents while ensuring decentralized decision-making. The SDR-based market clearing addresses challenges with zero-marginal-cost resources and simplifies bidding. Additionally, our MARL framework includes voltage constraints of the physical network, setting a foundation towards real-world applications.
|
809 |
+
|
810 |
+
|
811 |
+
However, this research represents just the initial phase in the practical application of a P2P energy trading market, with several challenges ahead. Theoretically, the scalability of the MARL framework is crucial; if the scalability of the consensus MARL algorithm is limited, a mean-field approach, as in \cite{LearningMFG}, could be considered, where agents operate based on a mean-field equilibrium. Practically, while the current model uses discrete, synchronous trading rounds, future work should investigate continuous and asynchronous trading among agents.
|
812 |
+
|
813 |
+
Additionally, while integrating transmission constraints into the current framework is straightforward (that is, they can be added to the power flow equations \eqref{eq:BusInjection2}), the allocation of transmission losses raises important equity considerations that must be addressed.
|
814 |
+
|
815 |
+
Another critical aspect is cybersecurity. With increased automation, the system becomes vulnerable to cyber attacks, such as malicious users compromising smart inverters to inject false information into the market. In this context, the work in \cite{Gupta} provides valuable insights.
|
816 |
+
|
817 |
+
Last but not least, it is essential to investigate how short-term P2P market dynamics influence long-term consumer investment decisions, particularly regarding the adoption of solar panels and energy storage systems.
|
818 |
+
|
819 |
+
\begin{comment}
|
820 |
+
\section*{Appendix. Proof of Thoerem \ref{Theorem}}
|
821 |
+
Before presenting the convergence result, we need to further define some notation. Let $P_{\theta}$ denote the operator over the action value function $Q$ as
|
822 |
+
\begin{align}
|
823 |
+
\left(P_\theta Q\right)(s, a)=\int_{\mathcal{S} \times \mathcal{A}} Q\left(s^{\prime}, a^{\prime}\right) p\left(d s^{\prime} \mid s, a\right) d \pi_\theta\left(a^{\prime} \mid s^{\prime}\right)
|
824 |
+
\end{align}
|
825 |
+
where $p\left(d s^{\prime} \mid s, a\right)$ is the system state transition kernel. Further define the operator $T_{\theta}$ as
|
826 |
+
\begin{align}
|
827 |
+
\left(T_\theta Q\right)(s, a) \ = \ &\mathbb{E}[\frac{1}{I} \sum_{i=1}^I r_{i,t}|s_0=s, a_0=a] \nonumber\\
|
828 |
+
&-J(\theta)+\left(P_\theta Q\right)(s, a),
|
829 |
+
\end{align}
|
830 |
+
for any $(s,a) \in \mathcal{S}\times \mathcal{A}$. Then the convergence result can be presented as follows, which directly follows the proof of Theorem 2 and 3 in \cite{zhang2018networked}.
|
831 |
+
|
832 |
+
where $\omega_{\theta}$ is the unique solution to
|
833 |
+
|
834 |
+
\begin{align}
|
835 |
+
\label{eq:fixpoint}
|
836 |
+
\left\langle\phi, T_\theta\left(\omega_\theta^{\top} \phi\right)-\omega_\theta^{\top} \phi\right\rangle_\theta=0,
|
837 |
+
\end{align}
|
838 |
+
with the inner product $\left\langle \cdot, \cdot \right\rangle_\theta$ defined as
|
839 |
+
\begin{align}
|
840 |
+
\left\langle F_1, F_2\right\rangle_\theta=\int_{\mathcal{S} \times \mathcal{A}} \tilde{\rho}_\theta(d s, d a) F_1(s, a) \cdot F_2(s, a);
|
841 |
+
\end{align}
|
842 |
+
|
843 |
+
in the set of asymptotically stable equilibria of the ordinary differential equation
|
844 |
+
\begin{align}
|
845 |
+
\label{eq:equilibrium}
|
846 |
+
\frac{d\Hat{\theta}_i(x)}{dx} =\hat{\Gamma}^i\left[\int_{\mathcal{S} \times \mathcal{A}^{-i}} \rho_{\Hat{\theta}\left(d s_t\right)} d \pi_{\Hat{\theta}_{-i}}\left(a_t^{-i} \mid s_t\right) \cdot \widehat{I}_{t, \Hat{\theta}}^{Q, i}\right],
|
847 |
+
\end{align}
|
848 |
+
for any $i \in \mathcal{I}$, where $\Hat{\theta}_i$ represents a generic mapping from $\Theta_i$ to $\mathcal{R}$, and
|
849 |
+
\begin{align}
|
850 |
+
\begin{split}
|
851 |
+
& \widehat{I}_{t, \Hat{\theta}}^{Q, i}=\widehat{I}_{t, \Hat{\theta}}^{Q, i}\left(s_t, a_{-i,t}\right) = \\
|
852 |
+
& \int_{\mathcal{A}_i} \pi_{\Hat{\theta}_i}\left(a_i \mid s_t\right) da_i\nabla_{\Hat{\theta}_i} \log \pi_{\Hat{\theta}_i}\left(a_i \mid s_t\right) \cdot \omega_{\Hat{\theta}^{\top}} \phi\left(s_t, a_i, a_{-i,t}\right),
|
853 |
+
\end{split}
|
854 |
+
\end{align}
|
855 |
+
and
|
856 |
+
\begin{align}\label{eq:Gamma_hat}
|
857 |
+
\hat{\Gamma}^i[g(\theta)]=\lim _{\eta \downarrow 0}\left\{\Gamma^i\left[\theta_i+\eta \cdot g(\theta)\right]-\theta_i\right\} / \eta
|
858 |
+
\end{align}
|
859 |
+
for any $\theta \in \Theta$ and any continuous function $g: \Theta \rightarrow$ $\mathbb{R}^{\sum_{i \in \mathcal{I}} m_i}$.
|
860 |
+
\end{comment}
|
861 |
+
|
862 |
+
|
863 |
+
|
864 |
+
\bibliographystyle{ieeetr}
|
865 |
+
\bibliography{P2P}
|
866 |
+
|
867 |
+
\begin{comment}
|
868 |
+
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{ChenFeng.png}}]{Chen Feng}
|
869 |
+
is a current Ph.D. student from the School of Industrial Engineering at Purdue University. He received a B.B.A. degree from the Department of Economics and Finance, City University of Hong Kong, Hong Kong, China, and a M.S. degree from the Department of Applied Mathematics and Statistics, Johns Hopkins University, Maryland, USA. His research interests include game theory, mechanism design, reinforcement learning, and their applications in the energy system.
|
870 |
+
\end{IEEEbiography}
|
871 |
+
\begin{IEEEbiography}[{\includegraphics[width=1in,height=1.25in,clip,keepaspectratio]{Pic_AL.jpg}}]{Andrew L. Liu}
|
872 |
+
received his B.S. degree in Applied Mathematics from the Beijing Institute of Technology, Beijing, China in 2000, and his Ph.D. degree in Applied Mathematics and Statistics from the Johns Hopkins University, Baltimore, MD in 2009. He is currently an associate professor in the School of Industrial Engineering, Purdue University, West Lafayette, IN. Before joining Purdue, he was a Senior Associate with the Wholesale Power Group at ICF International, Fairfax, VA. His research interests include optimization, game theory and their applications in power systems, smart grid, and environmental policy analysis.
|
873 |
+
\end{IEEEbiography}
|
874 |
+
\end{comment}
|
875 |
+
|
876 |
+
\end{document}
|