Hot-keys on this page

r m x p   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816

817

818

819

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846

847

848

849

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

884

885

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

905

906

907

908

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1029

1030

1031

1032

1033

1034

1035

1036

1037

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076

1077

1078

1079

1080

1081

1082

1083

1084

1085

1086

1087

1088

1089

1090

1091

1092

1093

1094

1095

1096

1097

1098

1099

1100

1101

1102

1103

1104

1105

1106

1107

1108

1109

1110

1111

1112

1113

1114

1115

1116

1117

1118

1119

1120

1121

1122

1123

1124

1125

1126

1127

1128

1129

1130

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1141

1142

1143

1144

1145

1146

1147

1148

1149

1150

1151

1152

1153

1154

1155

1156

1157

1158

1159

1160

1161

1162

1163

1164

1165

1166

1167

1168

1169

1170

1171

1172

1173

1174

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195

1196

1197

1198

1199

1200

1201

1202

1203

1204

1205

1206

1207

1208

1209

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220

1221

1222

1223

1224

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1236

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269

1270

1271

1272

1273

1274

1275

1276

1277

1278

1279

1280

1281

1282

1283

1284

1285

1286

1287

1288

1289

1290

1291

1292

1293

1294

1295

1296

1297

1298

1299

1300

1301

1302

1303

1304

1305

1306

1307

1308

1309

1310

1311

1312

1313

1314

1315

1316

1317

1318

1319

1320

1321

1322

1323

1324

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354

1355

1356

1357

1358

1359

1360

1361

1362

1363

1364

1365

1366

1367

1368

1369

1370

1371

1372

1373

1374

1375

1376

1377

1378

1379

1380

1381

1382

1383

1384

1385

1386

1387

1388

1389

1390

1391

1392

1393

1394

1395

1396

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408

1409

1410

1411

1412

1413

1414

1415

1416

1417

1418

1419

1420

1421

1422

1423

1424

1425

1426

1427

1428

1429

1430

1431

1432

1433

1434

1435

1436

1437

1438

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1449

1450

1451

1452

1453

1454

1455

1456

1457

1458

1459

1460

1461

1462

1463

1464

1465

1466

1467

1468

1469

1470

1471

1472

1473

1474

1475

1476

1477

1478

1479

1480

1481

1482

1483

1484

1485

1486

1487

1488

1489

1490

1491

1492

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1504

1505

1506

1507

1508

1509

1510

1511

1512

1513

1514

1515

1516

1517

1518

1519

1520

1521

1522

1523

1524

1525

1526

1527

1528

1529

1530

1531

1532

1533

1534

1535

1536

1537

1538

1539

1540

1541

1542

1543

1544

1545

1546

1547

1548

1549

1550

1551

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592

1593

1594

1595

1596

1597

1598

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640

1641

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673

1674

1675

1676

1677

1678

1679

1680

1681

1682

1683

1684

1685

1686

1687

1688

1689

1690

1691

1692

1693

1694

1695

1696

1697

1698

1699

1700

1701

1702

1703

1704

1705

1706

1707

1708

1709

1710

1711

1712

1713

1714

1715

1716

1717

1718

1719

1720

1721

1722

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732

1733

1734

1735

1736

1737

1738

1739

1740

1741

1742

1743

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1767

1768

1769

1770

1771

1772

1773

1774

1775

1776

1777

1778

1779

1780

1781

1782

1783

1784

1785

1786

1787

1788

1789

1790

1791

1792

1793

1794

1795

1796

1797

1798

1799

1800

1801

1802

1803

1804

1805

1806

1807

1808

1809

1810

1811

1812

1813

1814

1815

1816

1817

1818

1819

1820

# Copyright 2011 VMware, Inc. 

# All Rights Reserved. 

# 

#    Licensed under the Apache License, Version 2.0 (the "License"); you may 

#    not use this file except in compliance with the License. You may obtain 

#    a copy of the License at 

# 

#         http://www.apache.org/licenses/LICENSE-2.0 

# 

#    Unless required by applicable law or agreed to in writing, software 

#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 

#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 

#    License for the specific language governing permissions and limitations 

#    under the License. 

 

import hashlib 

import signal 

import sys 

import time 

import uuid 

 

import functools 

import netaddr 

from oslo_config import cfg 

from oslo_log import log as logging 

import oslo_messaging 

from oslo_service import loopingcall 

import six 

from six import moves 

 

from neutron.agent.common import ovs_lib 

from neutron.agent.common import polling 

from neutron.agent.common import utils 

from neutron.agent.l2.extensions import manager as ext_manager 

from neutron.agent.linux import ip_lib 

from neutron.agent.linux.iptables_firewall import port_needs_l3_security 

from neutron.agent import rpc as agent_rpc 

from neutron.agent import securitygroups_rpc as sg_rpc 

from neutron.api.rpc.handlers import dvr_rpc 

from neutron.common import config 

from neutron.common import constants as n_const 

from neutron.common import exceptions 

from neutron.common import topics 

from neutron.common import utils as n_utils 

from neutron import context 

from neutron.i18n import _LE, _LI, _LW 

from neutron.plugins.common import constants as p_const 

from neutron.plugins.ml2.drivers.l2pop.rpc_manager import l2population_rpc 

from neutron.plugins.ml2.drivers.openvswitch.agent.common \ 

    import constants 

from neutron.plugins.ml2.drivers.openvswitch.agent \ 

    import ovs_dvr_neutron_agent 

 

 

LOG = logging.getLogger(__name__) 

cfg.CONF.import_group('AGENT', 'neutron.plugins.ml2.drivers.openvswitch.' 

                      'agent.common.config') 

cfg.CONF.import_group('OVS', 'neutron.plugins.ml2.drivers.openvswitch.agent.' 

                      'common.config') 

 

# A placeholder for dead vlans. 

DEAD_VLAN_TAG = p_const.MAX_VLAN_TAG + 1 

UINT64_BITMASK = (1 << 64) - 1 

 

 

class _mac_mydialect(netaddr.mac_unix): 

    word_fmt = '%.2x' 

 

 

class DeviceListRetrievalError(exceptions.NeutronException): 

    message = _("Unable to retrieve port details for devices: %(devices)s ") 

 

 

# A class to represent a VIF (i.e., a port that has 'iface-id' and 'vif-mac' 

# attributes set). 

class LocalVLANMapping(object): 

    def __init__(self, vlan, network_type, physical_network, segmentation_id, 

                 vif_ports=None): 

        if vif_ports is None: 

            vif_ports = {} 

        self.vlan = vlan 

        self.network_type = network_type 

        self.physical_network = physical_network 

        self.segmentation_id = segmentation_id 

        self.vif_ports = vif_ports 

        # set of tunnel ports on which packets should be flooded 

        self.tun_ofports = set() 

 

    def __str__(self): 

        return ("lv-id = %s type = %s phys-net = %s phys-id = %s" % 

                (self.vlan, self.network_type, self.physical_network, 

                 self.segmentation_id)) 

 

 

class OVSPluginApi(agent_rpc.PluginApi): 

    pass 

 

 

class OVSNeutronAgent(sg_rpc.SecurityGroupAgentRpcCallbackMixin, 

                      l2population_rpc.L2populationRpcCallBackTunnelMixin, 

                      dvr_rpc.DVRAgentRpcCallbackMixin): 

    '''Implements OVS-based tunneling, VLANs and flat networks. 

 

    Two local bridges are created: an integration bridge (defaults to 

    'br-int') and a tunneling bridge (defaults to 'br-tun'). An 

    additional bridge is created for each physical network interface 

    used for VLANs and/or flat networks. 

 

    All VM VIFs are plugged into the integration bridge. VM VIFs on a 

    given virtual network share a common "local" VLAN (i.e. not 

    propagated externally). The VLAN id of this local VLAN is mapped 

    to the physical networking details realizing that virtual network. 

 

    For virtual networks realized as GRE tunnels, a Logical Switch 

    (LS) identifier is used to differentiate tenant traffic on 

    inter-HV tunnels. A mesh of tunnels is created to other 

    Hypervisors in the cloud. These tunnels originate and terminate on 

    the tunneling bridge of each hypervisor. Port patching is done to 

    connect local VLANs on the integration bridge to inter-hypervisor 

    tunnels on the tunnel bridge. 

 

    For each virtual network realized as a VLAN or flat network, a 

    veth or a pair of patch ports is used to connect the local VLAN on 

    the integration bridge with the physical network bridge, with flow 

    rules adding, modifying, or stripping VLAN tags as necessary. 

    ''' 

 

    # history 

    #   1.0 Initial version 

    #   1.1 Support Security Group RPC 

    #   1.2 Support DVR (Distributed Virtual Router) RPC 

    #   1.3 Added param devices_to_update to security_groups_provider_updated 

    target = oslo_messaging.Target(version='1.3') 

 

    def __init__(self, bridge_classes, integ_br, tun_br, local_ip, 

                 bridge_mappings, polling_interval, tunnel_types=None, 

                 veth_mtu=None, l2_population=False, 

                 enable_distributed_routing=False, 

                 minimize_polling=False, 

                 ovsdb_monitor_respawn_interval=( 

                     constants.DEFAULT_OVSDBMON_RESPAWN), 

                 arp_responder=False, 

                 prevent_arp_spoofing=True, 

                 use_veth_interconnection=False, 

                 quitting_rpc_timeout=None, 

                 conf=None): 

        '''Constructor. 

 

        :param bridge_classes: a dict for bridge classes. 

        :param integ_br: name of the integration bridge. 

        :param tun_br: name of the tunnel bridge. 

        :param local_ip: local IP address of this hypervisor. 

        :param bridge_mappings: mappings from physical network name to bridge. 

        :param polling_interval: interval (secs) to poll DB. 

        :param tunnel_types: A list of tunnel types to enable support for in 

               the agent. If set, will automatically set enable_tunneling to 

               True. 

        :param veth_mtu: MTU size for veth interfaces. 

        :param l2_population: Optional, whether L2 population is turned on 

        :param minimize_polling: Optional, whether to minimize polling by 

               monitoring ovsdb for interface changes. 

        :param ovsdb_monitor_respawn_interval: Optional, when using polling 

               minimization, the number of seconds to wait before respawning 

               the ovsdb monitor. 

        :param arp_responder: Optional, enable local ARP responder if it is 

               supported. 

        :param prevent_arp_spoofing: Optional, enable suppression of any ARP 

               responses from ports that don't match an IP address that belongs 

               to the ports. Spoofing rules will not be added to ports that 

               have port security disabled. 

        :param use_veth_interconnection: use veths instead of patch ports to 

               interconnect the integration bridge to physical bridges. 

        :param quitting_rpc_timeout: timeout in seconds for rpc calls after 

               SIGTERM is received 

        :param conf: an instance of ConfigOpts 

        ''' 

        super(OVSNeutronAgent, self).__init__() 

        self.conf = conf or cfg.CONF 

 

        # init bridge classes with configured datapath type. 

        self.br_int_cls, self.br_phys_cls, self.br_tun_cls = ( 

            functools.partial(bridge_classes[b], 

                              datapath_type=self.conf.OVS.datapath_type) 

            for b in ('br_int', 'br_phys', 'br_tun')) 

 

        self.use_veth_interconnection = use_veth_interconnection 

        self.veth_mtu = veth_mtu 

        self.available_local_vlans = set(moves.range(p_const.MIN_VLAN_TAG, 

                                                     p_const.MAX_VLAN_TAG)) 

        self.use_call = True 

        self.tunnel_types = tunnel_types or [] 

        self.l2_pop = l2_population 

        # TODO(ethuleau): Change ARP responder so it's not dependent on the 

        #                 ML2 l2 population mechanism driver. 

        self.enable_distributed_routing = enable_distributed_routing 

        self.arp_responder_enabled = arp_responder and self.l2_pop 

        self.prevent_arp_spoofing = prevent_arp_spoofing 

 

        self.agent_state = { 

            'binary': 'neutron-openvswitch-agent', 

            'host': self.conf.host, 

            'topic': n_const.L2_AGENT_TOPIC, 

            'configurations': {'bridge_mappings': bridge_mappings, 

                               'tunnel_types': self.tunnel_types, 

                               'tunneling_ip': local_ip, 

                               'l2_population': self.l2_pop, 

                               'arp_responder_enabled': 

                               self.arp_responder_enabled, 

                               'enable_distributed_routing': 

                               self.enable_distributed_routing, 

                               'log_agent_heartbeats': 

                               self.conf.AGENT.log_agent_heartbeats}, 

            'agent_type': n_const.AGENT_TYPE_OVS, 

            'start_flag': True} 

 

217        if tunnel_types: 

            self.enable_tunneling = True 

        else: 

            self.enable_tunneling = False 

 

        # Validate agent configurations 

        self._check_agent_configurations() 

 

        # Keep track of int_br's device count for use by _report_state() 

        self.int_br_device_count = 0 

 

        self.agent_uuid_stamp = uuid.uuid4().int & UINT64_BITMASK 

 

        self.int_br = self.br_int_cls(integ_br) 

        self.setup_integration_br() 

        # Stores port update notifications for processing in main rpc loop 

        self.updated_ports = set() 

        # Stores port delete notifications 

        self.deleted_ports = set() 

        # keeps association between ports and ofports to detect ofport change 

        self.vifname_to_ofport_map = {} 

        self.setup_rpc() 

        self.init_extension_manager(self.connection) 

        self.bridge_mappings = bridge_mappings 

        self.setup_physical_bridges(self.bridge_mappings) 

        self.local_vlan_map = {} 

        self.tun_br_ofports = {p_const.TYPE_GRE: {}, 

                               p_const.TYPE_VXLAN: {}} 

 

        self.polling_interval = polling_interval 

        self.minimize_polling = minimize_polling 

        self.ovsdb_monitor_respawn_interval = ovsdb_monitor_respawn_interval 

        self.local_ip = local_ip 

        self.tunnel_count = 0 

        self.vxlan_udp_port = self.conf.AGENT.vxlan_udp_port 

        self.dont_fragment = self.conf.AGENT.dont_fragment 

        self.tun_br = None 

        self.patch_int_ofport = constants.OFPORT_INVALID 

        self.patch_tun_ofport = constants.OFPORT_INVALID 

258        if self.enable_tunneling: 

            # The patch_int_ofport and patch_tun_ofport are updated 

            # here inside the call to setup_tunnel_br() 

            self.setup_tunnel_br(tun_br) 

 

        self.dvr_agent = ovs_dvr_neutron_agent.OVSDVRNeutronAgent( 

            self.context, 

            self.dvr_plugin_rpc, 

            self.int_br, 

            self.tun_br, 

            self.bridge_mappings, 

            self.phys_brs, 

            self.int_ofports, 

            self.phys_ofports, 

            self.patch_int_ofport, 

            self.patch_tun_ofport, 

            self.conf.host, 

            self.enable_tunneling, 

            self.enable_distributed_routing) 

 

        report_interval = self.conf.AGENT.report_interval 

        if report_interval: 

            heartbeat = loopingcall.FixedIntervalLoopingCall( 

                self._report_state) 

            heartbeat.start(interval=report_interval) 

 

282        if self.enable_tunneling: 

            self.setup_tunnel_br_flows() 

 

        self.dvr_agent.setup_dvr_flows() 

 

        # Collect additional bridges to monitor 

        self.ancillary_brs = self.setup_ancillary_bridges(integ_br, tun_br) 

 

        # In order to keep existed device's local vlan unchanged, 

        # restore local vlan mapping at start 

        self._restore_local_vlan_map() 

 

        # Security group agent support 

        self.sg_agent = sg_rpc.SecurityGroupAgentRpc(self.context, 

                self.sg_plugin_rpc, self.local_vlan_map, 

                defer_refresh_firewall=True) 

 

        # Initialize iteration counter 

        self.iter_num = 0 

        self.run_daemon_loop = True 

 

        self.catch_sigterm = False 

        self.catch_sighup = False 

 

        # The initialization is complete; we can start receiving messages 

        self.connection.consume_in_threads() 

 

        self.quitting_rpc_timeout = quitting_rpc_timeout 

 

    def _report_state(self): 

        # How many devices are likely used by a VM 

        self.agent_state.get('configurations')['devices'] = ( 

            self.int_br_device_count) 

        self.agent_state.get('configurations')['in_distributed_mode'] = ( 

            self.dvr_agent.in_distributed_mode()) 

 

        try: 

            self.state_rpc.report_state(self.context, 

                                        self.agent_state, 

                                        self.use_call) 

            self.use_call = False 

            self.agent_state.pop('start_flag', None) 

        except Exception: 

            LOG.exception(_LE("Failed reporting state!")) 

 

    def _restore_local_vlan_map(self): 

        cur_ports = self.int_br.get_vif_ports() 

        port_names = [p.port_name for p in cur_ports] 

        port_info = self.int_br.get_ports_attributes( 

            "Port", columns=["name", "other_config", "tag"], ports=port_names) 

        by_name = {x['name']: x for x in port_info} 

        for port in cur_ports: 

            # if a port was deleted between get_vif_ports and 

            # get_ports_attributes, we 

            # will get a KeyError 

            try: 

                local_vlan_map = by_name[port.port_name]['other_config'] 

                local_vlan = by_name[port.port_name]['tag'] 

            except KeyError: 

                continue 

            if not local_vlan: 

                continue 

            net_uuid = local_vlan_map.get('net_uuid') 

332   332            if (net_uuid and net_uuid not in self.local_vlan_map 

                and local_vlan != DEAD_VLAN_TAG): 

                self.provision_local_vlan(local_vlan_map['net_uuid'], 

                                          local_vlan_map['network_type'], 

                                          local_vlan_map['physical_network'], 

                                          local_vlan_map['segmentation_id'], 

                                          local_vlan) 

 

    def setup_rpc(self): 

        self.agent_id = 'ovs-agent-%s' % self.conf.host 

        self.topic = topics.AGENT 

        self.plugin_rpc = OVSPluginApi(topics.PLUGIN) 

        self.sg_plugin_rpc = sg_rpc.SecurityGroupServerRpcApi(topics.PLUGIN) 

        self.dvr_plugin_rpc = dvr_rpc.DVRServerRpcApi(topics.PLUGIN) 

        self.state_rpc = agent_rpc.PluginReportStateAPI(topics.PLUGIN) 

 

        # RPC network init 

        self.context = context.get_admin_context_without_session() 

        # Handle updates from service 

        self.endpoints = [self] 

        # Define the listening consumers for the agent 

        consumers = [[topics.PORT, topics.UPDATE], 

                     [topics.PORT, topics.DELETE], 

                     [constants.TUNNEL, topics.UPDATE], 

                     [constants.TUNNEL, topics.DELETE], 

                     [topics.SECURITY_GROUP, topics.UPDATE], 

                     [topics.DVR, topics.UPDATE]] 

372        if self.l2_pop: 

            consumers.append([topics.L2POPULATION, 

                              topics.UPDATE, self.conf.host]) 

        self.connection = agent_rpc.create_consumers(self.endpoints, 

                                                     self.topic, 

                                                     consumers, 

                                                     start_listening=False) 

 

    def init_extension_manager(self, connection): 

        ext_manager.register_opts(self.conf) 

        self.ext_manager = ( 

            ext_manager.AgentExtensionsManager(self.conf)) 

        self.ext_manager.initialize( 

            connection, constants.EXTENSION_DRIVER_TYPE) 

 

    def get_net_uuid(self, vif_id): 

        for network_id, vlan_mapping in six.iteritems(self.local_vlan_map): 

387            if vif_id in vlan_mapping.vif_ports: 

                return network_id 

 

    def port_update(self, context, **kwargs): 

        port = kwargs.get('port') 

        # Put the port identifier in the updated_ports set. 

        # Even if full port details might be provided to this call, 

        # they are not used since there is no guarantee the notifications 

        # are processed in the same order as the relevant API requests 

        self.updated_ports.add(port['id']) 

        LOG.debug("port_update message processed for port %s", port['id']) 

 

    def port_delete(self, context, **kwargs): 

        port_id = kwargs.get('port_id') 

        self.deleted_ports.add(port_id) 

        LOG.debug("port_delete message processed for port %s", port_id) 

 

    def process_deleted_ports(self, port_info): 

        # don't try to process removed ports as deleted ports since 

        # they are already gone 

        if 'removed' in port_info: 

            self.deleted_ports -= port_info['removed'] 

        while self.deleted_ports: 

            port_id = self.deleted_ports.pop() 

            # Flush firewall rules and move to dead VLAN so deleted ports no 

            # longer have access to the network 

            self.sg_agent.remove_devices_filter([port_id]) 

            port = self.int_br.get_vif_port_by_id(port_id) 

            self.ext_manager.delete_port(self.context, 

                                         {"vif_port": port, 

                                          "port_id": port_id}) 

423            if port: 

                # don't log errors since there is a chance someone will be 

                # removing the port from the bridge at the same time 

                self.port_dead(port, log_errors=False) 

            self.port_unbound(port_id) 

 

    def tunnel_update(self, context, **kwargs): 

        LOG.debug("tunnel_update received") 

428        if not self.enable_tunneling: 

            return 

        tunnel_ip = kwargs.get('tunnel_ip') 

        tunnel_ip_hex = self.get_ip_in_hex(tunnel_ip) 

432        if not tunnel_ip_hex: 

            return 

        tunnel_type = kwargs.get('tunnel_type') 

435        if not tunnel_type: 

            LOG.error(_LE("No tunnel_type specified, cannot create tunnels")) 

            return 

438        if tunnel_type not in self.tunnel_types: 

            LOG.error(_LE("tunnel_type %s not supported by agent"), 

                      tunnel_type) 

            return 

442        if tunnel_ip == self.local_ip: 

            return 

        tun_name = '%s-%s' % (tunnel_type, tunnel_ip_hex) 

exit        if not self.l2_pop: 

            self._setup_tunnel_port(self.tun_br, tun_name, tunnel_ip, 

                                    tunnel_type) 

 

    def tunnel_delete(self, context, **kwargs): 

        LOG.debug("tunnel_delete received") 

451        if not self.enable_tunneling: 

            return 

        tunnel_ip = kwargs.get('tunnel_ip') 

454        if not tunnel_ip: 

            LOG.error(_LE("No tunnel_ip specified, cannot delete tunnels")) 

            return 

        tunnel_type = kwargs.get('tunnel_type') 

458        if not tunnel_type: 

            LOG.error(_LE("No tunnel_type specified, cannot delete tunnels")) 

            return 

461        if tunnel_type not in self.tunnel_types: 

            LOG.error(_LE("tunnel_type %s not supported by agent"), 

                      tunnel_type) 

            return 

        ofport = self.tun_br_ofports[tunnel_type].get(tunnel_ip) 

        self.cleanup_tunnel_port(self.tun_br, ofport, tunnel_type) 

 

    def _tunnel_port_lookup(self, network_type, remote_ip): 

        return self.tun_br_ofports[network_type].get(remote_ip) 

 

    def fdb_add(self, context, fdb_entries): 

        LOG.debug("fdb_add received") 

        for lvm, agent_ports in self.get_agent_ports(fdb_entries, 

                                                     self.local_vlan_map): 

            agent_ports.pop(self.local_ip, None) 

            if len(agent_ports): 

481                if not self.enable_distributed_routing: 

                    with self.tun_br.deferred() as deferred_br: 

                        self.fdb_add_tun(context, deferred_br, lvm, 

                                         agent_ports, self._tunnel_port_lookup) 

                else: 

                    self.fdb_add_tun(context, self.tun_br, lvm, 

                                     agent_ports, self._tunnel_port_lookup) 

 

    def fdb_remove(self, context, fdb_entries): 

        LOG.debug("fdb_remove received") 

        for lvm, agent_ports in self.get_agent_ports(fdb_entries, 

                                                     self.local_vlan_map): 

            agent_ports.pop(self.local_ip, None) 

            if len(agent_ports): 

496                if not self.enable_distributed_routing: 

                    with self.tun_br.deferred() as deferred_br: 

                        self.fdb_remove_tun(context, deferred_br, lvm, 

                                            agent_ports, 

                                            self._tunnel_port_lookup) 

                else: 

                    self.fdb_remove_tun(context, self.tun_br, lvm, 

                                        agent_ports, self._tunnel_port_lookup) 

 

    def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): 

        if port_info == n_const.FLOODING_ENTRY: 

            lvm.tun_ofports.add(ofport) 

            br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, 

                                    lvm.tun_ofports) 

        else: 

            self.setup_entry_for_arp_reply(br, 'add', lvm.vlan, 

                                           port_info.mac_address, 

                                           port_info.ip_address) 

            br.install_unicast_to_tun(lvm.vlan, 

                                      lvm.segmentation_id, 

                                      ofport, 

                                      port_info.mac_address) 

 

    def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport): 

        if port_info == n_const.FLOODING_ENTRY: 

            if ofport not in lvm.tun_ofports: 

                LOG.debug("attempt to remove a non-existent port %s", ofport) 

                return 

            lvm.tun_ofports.remove(ofport) 

524            if len(lvm.tun_ofports) > 0: 

                br.install_flood_to_tun(lvm.vlan, lvm.segmentation_id, 

                                        lvm.tun_ofports) 

            else: 

                # This local vlan doesn't require any more tunnelling 

                br.delete_flood_to_tun(lvm.vlan) 

        else: 

            self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan, 

                                           port_info.mac_address, 

                                           port_info.ip_address) 

            br.delete_unicast_to_tun(lvm.vlan, port_info.mac_address) 

 

    def _fdb_chg_ip(self, context, fdb_entries): 

        LOG.debug("update chg_ip received") 

        with self.tun_br.deferred() as deferred_br: 

            self.fdb_chg_ip_tun(context, deferred_br, fdb_entries, 

                                self.local_ip, self.local_vlan_map) 

 

    def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address, 

                                  ip_address): 

        '''Set the ARP respond entry. 

 

        When the l2 population mechanism driver and OVS supports to edit ARP 

        fields, a table (ARP_RESPONDER) to resolve ARP locally is added to the 

        tunnel bridge. 

        ''' 

546        if not self.arp_responder_enabled: 

            return 

 

        mac = str(netaddr.EUI(mac_address, dialect=_mac_mydialect)) 

        ip = str(netaddr.IPAddress(ip_address)) 

 

        if action == 'add': 

            br.install_arp_responder(local_vid, ip, mac) 

556        elif action == 'remove': 

            br.delete_arp_responder(local_vid, ip) 

        else: 

            LOG.warning(_LW('Action %s not supported'), action) 

 

    def _local_vlan_for_flat(self, lvid, physical_network): 

        phys_br = self.phys_brs[physical_network] 

        phys_port = self.phys_ofports[physical_network] 

        int_br = self.int_br 

        int_port = self.int_ofports[physical_network] 

        phys_br.provision_local_vlan(port=phys_port, lvid=lvid, 

                                     segmentation_id=None, 

                                     distributed=False) 

        int_br.provision_local_vlan(port=int_port, lvid=lvid, 

                                    segmentation_id=None) 

 

    def _local_vlan_for_vlan(self, lvid, physical_network, segmentation_id): 

        distributed = self.enable_distributed_routing 

        phys_br = self.phys_brs[physical_network] 

        phys_port = self.phys_ofports[physical_network] 

        int_br = self.int_br 

        int_port = self.int_ofports[physical_network] 

        phys_br.provision_local_vlan(port=phys_port, lvid=lvid, 

                                     segmentation_id=segmentation_id, 

                                     distributed=distributed) 

        int_br.provision_local_vlan(port=int_port, lvid=lvid, 

                                    segmentation_id=segmentation_id) 

 

    def provision_local_vlan(self, net_uuid, network_type, physical_network, 

                             segmentation_id, local_vlan=None): 

        '''Provisions a local VLAN. 

 

        :param net_uuid: the uuid of the network associated with this vlan. 

        :param network_type: the network type ('gre', 'vxlan', 'vlan', 'flat', 

                                               'local') 

        :param physical_network: the physical network for 'vlan' or 'flat' 

        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' 

        ''' 

 

        # On a restart or crash of OVS, the network associated with this VLAN 

        # will already be assigned, so check for that here before assigning a 

        # new one. 

        lvm = self.local_vlan_map.get(net_uuid) 

597        if lvm: 

            lvid = lvm.vlan 

        else: 

600            if local_vlan in self.available_local_vlans: 

                lvid = local_vlan 

                self.available_local_vlans.remove(local_vlan) 

            else: 

604                if not self.available_local_vlans: 

                    LOG.error(_LE("No local VLAN available for net-id=%s"), 

                              net_uuid) 

                    return 

                lvid = self.available_local_vlans.pop() 

            self.local_vlan_map[net_uuid] = LocalVLANMapping(lvid, 

                                                             network_type, 

                                                             physical_network, 

                                                             segmentation_id) 

 

        LOG.info(_LI("Assigning %(vlan_id)s as local vlan for " 

                     "net-id=%(net_uuid)s"), 

                 {'vlan_id': lvid, 'net_uuid': net_uuid}) 

 

        if network_type in constants.TUNNEL_NETWORK_TYPES: 

635            if self.enable_tunneling: 

                # outbound broadcast/multicast 

                ofports = list(self.tun_br_ofports[network_type].values()) 

622                if ofports: 

                    self.tun_br.install_flood_to_tun(lvid, 

                                                     segmentation_id, 

                                                     ofports) 

                # inbound from tunnels: set lvid in the right table 

                # and resubmit to Table LEARN_FROM_TUN for mac learning 

631                if self.enable_distributed_routing: 

                    self.dvr_agent.process_tunneled_network( 

                        network_type, lvid, segmentation_id) 

                else: 

                    self.tun_br.provision_local_vlan( 

                        network_type=network_type, lvid=lvid, 

                        segmentation_id=segmentation_id) 

            else: 

                LOG.error(_LE("Cannot provision %(network_type)s network for " 

                              "net-id=%(net_uuid)s - tunneling disabled"), 

                          {'network_type': network_type, 

                           'net_uuid': net_uuid}) 

640        elif network_type == p_const.TYPE_FLAT: 

            if physical_network in self.phys_brs: 

                self._local_vlan_for_flat(lvid, physical_network) 

            else: 

                LOG.error(_LE("Cannot provision flat network for " 

                              "net-id=%(net_uuid)s - no bridge for " 

                              "physical_network %(physical_network)s"), 

                          {'net_uuid': net_uuid, 

                           'physical_network': physical_network}) 

        elif network_type == p_const.TYPE_VLAN: 

653            if physical_network in self.phys_brs: 

                self._local_vlan_for_vlan(lvid, physical_network, 

                                          segmentation_id) 

            else: 

                LOG.error(_LE("Cannot provision VLAN network for " 

                              "net-id=%(net_uuid)s - no bridge for " 

                              "physical_network %(physical_network)s"), 

                          {'net_uuid': net_uuid, 

                           'physical_network': physical_network}) 

662        elif network_type == p_const.TYPE_LOCAL: 

            # no flows needed for local networks 

            pass 

        else: 

            LOG.error(_LE("Cannot provision unknown network type " 

                          "%(network_type)s for net-id=%(net_uuid)s"), 

                      {'network_type': network_type, 

                       'net_uuid': net_uuid}) 

 

    def reclaim_local_vlan(self, net_uuid): 

        '''Reclaim a local VLAN. 

 

        :param net_uuid: the network uuid associated with this vlan. 

        ''' 

        lvm = self.local_vlan_map.pop(net_uuid, None) 

674        if lvm is None: 

            LOG.debug("Network %s not used on agent.", net_uuid) 

            return 

 

        LOG.info(_LI("Reclaiming vlan = %(vlan_id)s from " 

                     "net-id = %(net_uuid)s"), 

                 {'vlan_id': lvm.vlan, 'net_uuid': net_uuid}) 

 

694        if lvm.network_type in constants.TUNNEL_NETWORK_TYPES: 

727            if self.enable_tunneling: 

                self.tun_br.reclaim_local_vlan( 

                    network_type=lvm.network_type, 

                    segmentation_id=lvm.segmentation_id) 

                self.tun_br.delete_flood_to_tun(lvm.vlan) 

                self.tun_br.delete_unicast_to_tun(lvm.vlan, None) 

                self.tun_br.delete_arp_responder(lvm.vlan, None) 

727                if self.l2_pop: 

                    # Try to remove tunnel ports if not used by other networks 

                    for ofport in lvm.tun_ofports: 

                        self.cleanup_tunnel_port(self.tun_br, ofport, 

                                                 lvm.network_type) 

        elif lvm.network_type == p_const.TYPE_FLAT: 

            if lvm.physical_network in self.phys_brs: 

                # outbound 

                br = self.phys_brs[lvm.physical_network] 

                br.reclaim_local_vlan( 

                    port=self.phys_ofports[lvm.physical_network], 

                    lvid=lvm.vlan) 

                # inbound 

                br = self.int_br 

                br.reclaim_local_vlan( 

                    port=self.int_ofports[lvm.physical_network], 

                    segmentation_id=None) 

        elif lvm.network_type == p_const.TYPE_VLAN: 

            if lvm.physical_network in self.phys_brs: 

                # outbound 

                br = self.phys_brs[lvm.physical_network] 

                br.reclaim_local_vlan( 

                    port=self.phys_ofports[lvm.physical_network], 

                    lvid=lvm.vlan) 

                # inbound 

                br = self.int_br 

                br.reclaim_local_vlan( 

                    port=self.int_ofports[lvm.physical_network], 

                    segmentation_id=lvm.segmentation_id) 

        elif lvm.network_type == p_const.TYPE_LOCAL: 

            # no flows needed for local networks 

            pass 

        else: 

            LOG.error(_LE("Cannot reclaim unknown network type " 

                          "%(network_type)s for net-id=%(net_uuid)s"), 

                      {'network_type': lvm.network_type, 

                       'net_uuid': net_uuid}) 

 

        self.available_local_vlans.add(lvm.vlan) 

 

    def port_bound(self, port, net_uuid, 

                   network_type, physical_network, 

                   segmentation_id, fixed_ips, device_owner, 

                   ovs_restarted): 

        '''Bind port to net_uuid/lsw_id and install flow for inbound traffic 

        to vm. 

 

        :param port: a ovs_lib.VifPort object. 

        :param net_uuid: the net_uuid this port is to be associated with. 

        :param network_type: the network type ('gre', 'vlan', 'flat', 'local') 

        :param physical_network: the physical network for 'vlan' or 'flat' 

        :param segmentation_id: the VID for 'vlan' or tunnel ID for 'tunnel' 

        :param fixed_ips: the ip addresses assigned to this port 

        :param device_owner: the string indicative of owner of this port 

        :param ovs_restarted: indicates if this is called for an OVS restart. 

        ''' 

        if net_uuid not in self.local_vlan_map or ovs_restarted: 

            self.provision_local_vlan(net_uuid, network_type, 

                                      physical_network, segmentation_id) 

        lvm = self.local_vlan_map[net_uuid] 

        lvm.vif_ports[port.vif_id] = port 

 

        self.dvr_agent.bind_port_to_dvr(port, lvm, 

                                        fixed_ips, 

                                        device_owner) 

        port_other_config = self.int_br.db_get_val("Port", port.port_name, 

                                                   "other_config") 

        vlan_mapping = {'net_uuid': net_uuid, 

                        'network_type': network_type, 

                        'physical_network': physical_network, 

                        'segmentation_id': segmentation_id} 

        port_other_config.update(vlan_mapping) 

        self.int_br.set_db_attribute("Port", port.port_name, "other_config", 

                                     port_other_config) 

 

    def _bind_devices(self, need_binding_ports): 

        devices_up = [] 

        devices_down = [] 

        port_names = [p['vif_port'].port_name for p in need_binding_ports] 

        port_info = self.int_br.get_ports_attributes( 

            "Port", columns=["name", "tag"], ports=port_names) 

        tags_by_name = {x['name']: x['tag'] for x in port_info} 

        for port_detail in need_binding_ports: 

            lvm = self.local_vlan_map.get(port_detail['network_id']) 

            if not lvm: 

                # network for port was deleted. skip this port since it 

                # will need to be handled as a DEAD port in the next scan 

                continue 

            port = port_detail['vif_port'] 

            device = port_detail['device'] 

            # Do not bind a port if it's already bound 

            cur_tag = tags_by_name.get(port.port_name) 

783            if cur_tag != lvm.vlan: 

                self.int_br.delete_flows(in_port=port.ofport) 

            if self.prevent_arp_spoofing: 

                self.setup_arp_spoofing_protection(self.int_br, 

                                                   port, port_detail) 

795            if cur_tag != lvm.vlan: 

                self.int_br.set_db_attribute( 

                    "Port", port.port_name, "tag", lvm.vlan) 

 

            # update plugin about port status 

            # FIXME(salv-orlando): Failures while updating device status 

            # must be handled appropriately. Otherwise this might prevent 

            # neutron server from sending network-vif-* events to the nova 

            # API server, thus possibly preventing instance spawn. 

            if port_detail.get('admin_state_up'): 

                LOG.debug("Setting status for %s to UP", device) 

                devices_up.append(device) 

            else: 

                LOG.debug("Setting status for %s to DOWN", device) 

                devices_down.append(device) 

        failed_devices = [] 

        if devices_up or devices_down: 

            devices_set = self.plugin_rpc.update_device_list( 

                self.context, devices_up, devices_down, self.agent_id, 

                self.conf.host) 

            failed_devices = (devices_set.get('failed_devices_up') + 

                devices_set.get('failed_devices_down')) 

809        if failed_devices: 

            LOG.error(_LE("Configuration for devices %s failed!"), 

                      failed_devices) 

            #TODO(rossella_s) handle better the resync in next patches, 

            # this is just to preserve the current behavior 

            raise DeviceListRetrievalError(devices=failed_devices) 

        LOG.info(_LI("Configuration for devices up %(up)s and devices " 

                     "down %(down)s completed."), 

                 {'up': devices_up, 'down': devices_down}) 

 

    @staticmethod 

    def setup_arp_spoofing_protection(bridge, vif, port_details): 

        # clear any previous flows related to this port in our ARP table 

        bridge.delete_arp_spoofing_protection(port=vif.ofport) 

        if not port_details.get('port_security_enabled', True): 

            LOG.info(_LI("Skipping ARP spoofing rules for port '%s' because " 

                         "it has port security disabled"), vif.port_name) 

            return 

        # collect all of the addresses and cidrs that belong to the port 

        addresses = {f['ip_address'] for f in port_details['fixed_ips']} 

        if port_details.get('allowed_address_pairs'): 

            addresses |= {p['ip_address'] 

                          for p in port_details['allowed_address_pairs']} 

 

        if not port_needs_l3_security(port_details): 

            return 

 

        addresses = {ip for ip in addresses 

                     if netaddr.IPNetwork(ip).version == 4} 

840        if any(netaddr.IPNetwork(ip).prefixlen == 0 for ip in addresses): 

            # don't try to install protection because a /0 prefix allows any 

            # address anyway and the ARP_SPA can only match on /1 or more. 

            return 

 

        bridge.install_arp_spoofing_protection(port=vif.ofport, 

                                               ip_addresses=addresses) 

 

    def port_unbound(self, vif_id, net_uuid=None): 

        '''Unbind port. 

 

        Removes corresponding local vlan mapping object if this is its last 

        VIF. 

 

        :param vif_id: the id of the vif 

        :param net_uuid: the net_uuid this port is associated with. 

        ''' 

        if net_uuid is None: 

            net_uuid = self.get_net_uuid(vif_id) 

 

        if not self.local_vlan_map.get(net_uuid): 

            LOG.info(_LI('port_unbound(): net_uuid %s not in local_vlan_map'), 

                     net_uuid) 

            return 

 

        lvm = self.local_vlan_map[net_uuid] 

 

        if vif_id in lvm.vif_ports: 

            vif_port = lvm.vif_ports[vif_id] 

            self.dvr_agent.unbind_port_from_dvr(vif_port, lvm) 

        lvm.vif_ports.pop(vif_id, None) 

 

        if not lvm.vif_ports: 

            self.reclaim_local_vlan(net_uuid) 

 

    def port_dead(self, port, log_errors=True): 

        '''Once a port has no binding, put it on the "dead vlan". 

 

        :param port: a ovs_lib.VifPort object. 

        ''' 

        # Don't kill a port if it's already dead 

        cur_tag = self.int_br.db_get_val("Port", port.port_name, "tag", 

                                         log_errors=log_errors) 

        if cur_tag != DEAD_VLAN_TAG: 

            self.int_br.set_db_attribute("Port", port.port_name, "tag", 

                                         DEAD_VLAN_TAG, log_errors=log_errors) 

            self.int_br.drop_port(in_port=port.ofport) 

 

    def setup_integration_br(self): 

        '''Setup the integration bridge. 

 

        ''' 

        self.int_br.set_agent_uuid_stamp(self.agent_uuid_stamp) 

        # Ensure the integration bridge is created. 

        # ovs_lib.OVSBridge.create() will run 

        #   ovs-vsctl -- --may-exist add-br BRIDGE_NAME 

        # which does nothing if bridge already exists. 

        self.int_br.create() 

        self.int_br.set_secure_mode() 

        self.int_br.setup_controllers(self.conf) 

 

        self.int_br.delete_port(self.conf.OVS.int_peer_patch_port) 

        if self.conf.AGENT.drop_flows_on_start: 

            self.int_br.delete_flows() 

        self.int_br.setup_default_table() 

 

    def setup_ancillary_bridges(self, integ_br, tun_br): 

        '''Setup ancillary bridges - for example br-ex.''' 

        ovs = ovs_lib.BaseOVS() 

        ovs_bridges = set(ovs.get_bridges()) 

        # Remove all known bridges 

        ovs_bridges.remove(integ_br) 

910        if self.enable_tunneling: 

            ovs_bridges.remove(tun_br) 

        br_names = [self.phys_brs[physical_network].br_name for 

                    physical_network in self.phys_brs] 

        ovs_bridges.difference_update(br_names) 

        # Filter list of bridges to those that have external 

        # bridge-id's configured 

        br_names = [] 

        for bridge in ovs_bridges: 

            bridge_id = ovs.get_bridge_external_bridge_id(bridge) 

920            if bridge_id != bridge: 

                br_names.append(bridge) 

        ovs_bridges.difference_update(br_names) 

        ancillary_bridges = [] 

        for bridge in ovs_bridges: 

            br = ovs_lib.OVSBridge(bridge) 

            LOG.info(_LI('Adding %s to list of bridges.'), bridge) 

            ancillary_bridges.append(br) 

        return ancillary_bridges 

 

    def setup_tunnel_br(self, tun_br_name=None): 

        '''(re)initialize the tunnel bridge. 

 

        Creates tunnel bridge, and links it to the integration bridge 

        using a patch port. 

 

        :param tun_br_name: the name of the tunnel bridge. 

        ''' 

938        if not self.tun_br: 

            self.tun_br = self.br_tun_cls(tun_br_name) 

        self.tun_br.set_agent_uuid_stamp(self.agent_uuid_stamp) 

 

        if not self.tun_br.bridge_exists(self.tun_br.br_name): 

            self.tun_br.create(secure_mode=True) 

        self.tun_br.setup_controllers(self.conf) 

        if (not self.int_br.port_exists(self.conf.OVS.int_peer_patch_port) or 

                self.patch_tun_ofport == ovs_lib.INVALID_OFPORT): 

            self.patch_tun_ofport = self.int_br.add_patch_port( 

                self.conf.OVS.int_peer_patch_port, 

                self.conf.OVS.tun_peer_patch_port) 

        if (not self.tun_br.port_exists(self.conf.OVS.tun_peer_patch_port) or 

                self.patch_int_ofport == ovs_lib.INVALID_OFPORT): 

            self.patch_int_ofport = self.tun_br.add_patch_port( 

                self.conf.OVS.tun_peer_patch_port, 

                self.conf.OVS.int_peer_patch_port) 

956        if ovs_lib.INVALID_OFPORT in (self.patch_tun_ofport, 

                                      self.patch_int_ofport): 

            LOG.error(_LE("Failed to create OVS patch port. Cannot have " 

                          "tunneling enabled on this agent, since this " 

                          "version of OVS does not support tunnels or patch " 

                          "ports. Agent terminated!")) 

            exit(1) 

        if self.conf.AGENT.drop_flows_on_start: 

            self.tun_br.delete_flows() 

 

    def setup_tunnel_br_flows(self): 

        '''Setup the tunnel bridge. 

 

        Add all flows to the tunnel bridge. 

        ''' 

        self.tun_br.setup_default_table(self.patch_int_ofport, 

                                        self.arp_responder_enabled) 

 

    def get_peer_name(self, prefix, name): 

        """Construct a peer name based on the prefix and name. 

 

        The peer name can not exceed the maximum length allowed for a linux 

        device. Longer names are hashed to help ensure uniqueness. 

        """ 

        if len(prefix + name) <= n_const.DEVICE_NAME_MAX_LEN: 

            return prefix + name 

        # We can't just truncate because bridges may be distinguished 

        # by an ident at the end. A hash over the name should be unique. 

        # Leave part of the bridge name on for easier identification 

        hashlen = 6 

        namelen = n_const.DEVICE_NAME_MAX_LEN - len(prefix) - hashlen 

        new_name = ('%(prefix)s%(truncated)s%(hash)s' % 

                    {'prefix': prefix, 'truncated': name[0:namelen], 

                     'hash': hashlib.sha1(name).hexdigest()[0:hashlen]}) 

        LOG.warning(_LW("Creating an interface named %(name)s exceeds the " 

                        "%(limit)d character limitation. It was shortened to " 

                        "%(new_name)s to fit."), 

                    {'name': name, 'limit': n_const.DEVICE_NAME_MAX_LEN, 

                     'new_name': new_name}) 

        return new_name 

 

    def setup_physical_bridges(self, bridge_mappings): 

        '''Setup the physical network bridges. 

 

        Creates physical network bridges and links them to the 

        integration bridge using veths or patch ports. 

 

        :param bridge_mappings: map physical network names to bridge names. 

        ''' 

        self.phys_brs = {} 

        self.int_ofports = {} 

        self.phys_ofports = {} 

        ip_wrapper = ip_lib.IPWrapper() 

        ovs = ovs_lib.BaseOVS() 

        ovs_bridges = ovs.get_bridges() 

        for physical_network, bridge in six.iteritems(bridge_mappings): 

            LOG.info(_LI("Mapping physical network %(physical_network)s to " 

                         "bridge %(bridge)s"), 

                     {'physical_network': physical_network, 

                      'bridge': bridge}) 

            # setup physical bridge 

            if bridge not in ovs_bridges: 

                LOG.error(_LE("Bridge %(bridge)s for physical network " 

                              "%(physical_network)s does not exist. Agent " 

                              "terminated!"), 

                          {'physical_network': physical_network, 

                           'bridge': bridge}) 

                sys.exit(1) 

            br = self.br_phys_cls(bridge) 

            br.setup_controllers(self.conf) 

            br.setup_default_table() 

            self.phys_brs[physical_network] = br 

 

            # interconnect physical and integration bridges using veth/patchs 

            int_if_name = self.get_peer_name(constants.PEER_INTEGRATION_PREFIX, 

                                             bridge) 

            phys_if_name = self.get_peer_name(constants.PEER_PHYSICAL_PREFIX, 

                                              bridge) 

            # Interface type of port for physical and integration bridges must 

            # be same, so check only one of them. 

            int_type = self.int_br.db_get_val("Interface", int_if_name, "type") 

            if self.use_veth_interconnection: 

                # Drop ports if the interface types doesn't match the 

                # configuration value. 

1039                if int_type == 'patch': 

                    self.int_br.delete_port(int_if_name) 

                    br.delete_port(phys_if_name) 

1047                if ip_lib.device_exists(int_if_name): 

                    ip_lib.IPDevice(int_if_name).link.delete() 

                    # Give udev a chance to process its rules here, to avoid 

                    # race conditions between commands launched by udev rules 

                    # and the subsequent call to ip_wrapper.add_veth 

                    utils.execute(['udevadm', 'settle', '--timeout=10']) 

                int_veth, phys_veth = ip_wrapper.add_veth(int_if_name, 

                                                          phys_if_name) 

                int_ofport = self.int_br.add_port(int_veth) 

                phys_ofport = br.add_port(phys_veth) 

            else: 

                # Drop ports if the interface type doesn't match the 

                # configuration value 

                if int_type == 'veth': 

                    self.int_br.delete_port(int_if_name) 

                    br.delete_port(phys_if_name) 

                # Create patch ports without associating them in order to block 

                # untranslated traffic before association 

                int_ofport = self.int_br.add_patch_port( 

                    int_if_name, constants.NONEXISTENT_PEER) 

                phys_ofport = br.add_patch_port( 

                    phys_if_name, constants.NONEXISTENT_PEER) 

 

            self.int_ofports[physical_network] = int_ofport 

            self.phys_ofports[physical_network] = phys_ofport 

 

            # block all untranslated traffic between bridges 

            self.int_br.drop_port(in_port=int_ofport) 

            br.drop_port(in_port=phys_ofport) 

 

            if self.use_veth_interconnection: 

                # enable veth to pass traffic 

                int_veth.link.set_up() 

                phys_veth.link.set_up() 

1077                if self.veth_mtu: 

                    # set up mtu size for veth interfaces 

                    int_veth.link.set_mtu(self.veth_mtu) 

                    phys_veth.link.set_mtu(self.veth_mtu) 

            else: 

                # associate patch ports to pass traffic 

                self.int_br.set_db_attribute('Interface', int_if_name, 

                                             'options:peer', phys_if_name) 

                br.set_db_attribute('Interface', phys_if_name, 

                                    'options:peer', int_if_name) 

 

    def update_stale_ofport_rules(self): 

        # right now the ARP spoofing rules are the only thing that utilizes 

        # ofport-based rules, so make arp_spoofing protection a conditional 

        # until something else uses ofport 

1091        if not self.prevent_arp_spoofing: 

            return [] 

        previous = self.vifname_to_ofport_map 

        current = self.int_br.get_vif_port_to_ofport_map() 

 

        # if any ofport numbers have changed, re-process the devices as 

        # added ports so any rules based on ofport numbers are updated. 

        moved_ports = self._get_ofport_moves(current, previous) 

 

        # delete any stale rules based on removed ofports 

        ofports_deleted = set(previous.values()) - set(current.values()) 

        for ofport in ofports_deleted: 

            self.int_br.delete_arp_spoofing_protection(port=ofport) 

 

        # store map for next iteration 

        self.vifname_to_ofport_map = current 

        return moved_ports 

 

    @staticmethod 

    def _get_ofport_moves(current, previous): 

        """Returns a list of moved ports. 

 

        Takes two port->ofport maps and returns a list ports that moved to a 

        different ofport. Deleted ports are not included. 

        """ 

        port_moves = [] 

        for name, ofport in previous.items(): 

            if name not in current: 

                continue 

            current_ofport = current[name] 

            if ofport != current_ofport: 

                port_moves.append(name) 

        return port_moves 

 

    def _get_port_info(self, registered_ports, cur_ports): 

        port_info = {'current': cur_ports} 

        # FIXME(salv-orlando): It's not really necessary to return early 

        # if nothing has changed. 

        if cur_ports == registered_ports: 

            # No added or removed ports to set, just return here 

            return port_info 

        port_info['added'] = cur_ports - registered_ports 

        # Remove all the known ports not found on the integration bridge 

        port_info['removed'] = registered_ports - cur_ports 

        return port_info 

 

    def scan_ports(self, registered_ports, updated_ports=None): 

        cur_ports = self.int_br.get_vif_port_set() 

        self.int_br_device_count = len(cur_ports) 

        port_info = self._get_port_info(registered_ports, cur_ports) 

        if updated_ports is None: 

            updated_ports = set() 

        updated_ports.update(self.check_changed_vlans(registered_ports)) 

        if updated_ports: 

            # Some updated ports might have been removed in the 

            # meanwhile, and therefore should not be processed. 

            # In this case the updated port won't be found among 

            # current ports. 

            updated_ports &= cur_ports 

1151            if updated_ports: 

                port_info['updated'] = updated_ports 

        return port_info 

 

    def scan_ancillary_ports(self, registered_ports): 

        cur_ports = set() 

        for bridge in self.ancillary_brs: 

            cur_ports |= bridge.get_vif_port_set() 

        return self._get_port_info(registered_ports, cur_ports) 

 

    def check_changed_vlans(self, registered_ports): 

        """Return ports which have lost their vlan tag. 

 

        The returned value is a set of port ids of the ports concerned by a 

        vlan tag loss. 

        """ 

        port_tags = self.int_br.get_port_tag_dict() 

        changed_ports = set() 

        for lvm in self.local_vlan_map.values(): 

            for port in registered_ports: 

                if ( 

                    port in lvm.vif_ports 

                    and lvm.vif_ports[port].port_name in port_tags 

                    and port_tags[lvm.vif_ports[port].port_name] != lvm.vlan 

                ): 

                    LOG.info( 

                        _LI("Port '%(port_name)s' has lost " 

                            "its vlan tag '%(vlan_tag)d'!"), 

                        {'port_name': lvm.vif_ports[port].port_name, 

                         'vlan_tag': lvm.vlan} 

                    ) 

                    changed_ports.add(port) 

        return changed_ports 

 

    def treat_vif_port(self, vif_port, port_id, network_id, network_type, 

                       physical_network, segmentation_id, admin_state_up, 

                       fixed_ips, device_owner, ovs_restarted): 

        # When this function is called for a port, the port should have 

        # an OVS ofport configured, as only these ports were considered 

        # for being treated. If that does not happen, it is a potential 

        # error condition of which operators should be aware 

        port_needs_binding = True 

        if not vif_port.ofport: 

            LOG.warn(_LW("VIF port: %s has no ofport configured, " 

                         "and might not be able to transmit"), vif_port.vif_id) 

        if vif_port: 

            if admin_state_up: 

                self.port_bound(vif_port, network_id, network_type, 

                                physical_network, segmentation_id, 

                                fixed_ips, device_owner, ovs_restarted) 

            else: 

                self.port_dead(vif_port) 

                port_needs_binding = False 

        else: 

            LOG.debug("No VIF port for port %s defined on agent.", port_id) 

        return port_needs_binding 

 

    def _setup_tunnel_port(self, br, port_name, remote_ip, tunnel_type): 

        ofport = br.add_tunnel_port(port_name, 

                                    remote_ip, 

                                    self.local_ip, 

                                    tunnel_type, 

                                    self.vxlan_udp_port, 

                                    self.dont_fragment) 

        if ofport == ovs_lib.INVALID_OFPORT: 

            LOG.error(_LE("Failed to set-up %(type)s tunnel port to %(ip)s"), 

                      {'type': tunnel_type, 'ip': remote_ip}) 

            return 0 

 

        self.tun_br_ofports[tunnel_type][remote_ip] = ofport 

        # Add flow in default table to resubmit to the right 

        # tunnelling table (lvid will be set in the latter) 

        br.setup_tunnel_port(tunnel_type, ofport) 

 

        ofports = self.tun_br_ofports[tunnel_type].values() 

1231        if ofports and not self.l2_pop: 

            # Update flooding flows to include the new tunnel 

            for vlan_mapping in list(self.local_vlan_map.values()): 

1226                if vlan_mapping.network_type == tunnel_type: 

                    br.install_flood_to_tun(vlan_mapping.vlan, 

                                            vlan_mapping.segmentation_id, 

                                            ofports) 

        return ofport 

 

    def setup_tunnel_port(self, br, remote_ip, network_type): 

        remote_ip_hex = self.get_ip_in_hex(remote_ip) 

1236        if not remote_ip_hex: 

            return 0 

        port_name = '%s-%s' % (network_type, remote_ip_hex) 

        ofport = self._setup_tunnel_port(br, 

                                         port_name, 

                                         remote_ip, 

                                         network_type) 

        return ofport 

 

    def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type): 

        # Check if this tunnel port is still used 

        for lvm in self.local_vlan_map.values(): 

            if tun_ofport in lvm.tun_ofports: 

                break 

        # If not, remove it 

        else: 

            items = list(self.tun_br_ofports[tunnel_type].items()) 

            for remote_ip, ofport in items: 

                if ofport == tun_ofport: 

                    port_name = '%s-%s' % (tunnel_type, 

                                           self.get_ip_in_hex(remote_ip)) 

                    br.delete_port(port_name) 

                    br.cleanup_tunnel_port(ofport) 

                    self.tun_br_ofports[tunnel_type].pop(remote_ip, None) 

 

    def treat_devices_added_or_updated(self, devices, ovs_restarted): 

        skipped_devices = [] 

        need_binding_devices = [] 

        security_disabled_devices = [] 

        devices_details_list = ( 

            self.plugin_rpc.get_devices_details_list_and_failed_devices( 

                self.context, 

                devices, 

                self.agent_id, 

                self.conf.host)) 

1273        if devices_details_list.get('failed_devices'): 

            #TODO(rossella_s) handle better the resync in next patches, 

            # this is just to preserve the current behavior 

            raise DeviceListRetrievalError(devices=devices) 

 

        devices = devices_details_list.get('devices') 

        vif_by_id = self.int_br.get_vifs_by_ids( 

            [vif['device'] for vif in devices]) 

        for details in devices: 

            device = details['device'] 

            LOG.debug("Processing port: %s", device) 

            port = vif_by_id.get(device) 

            if not port: 

                # The port disappeared and cannot be processed 

                LOG.info(_LI("Port %s was not found on the integration bridge " 

                             "and will therefore not be processed"), device) 

                skipped_devices.append(device) 

                continue 

 

            if 'port_id' in details: 

                LOG.info(_LI("Port %(device)s updated. Details: %(details)s"), 

                         {'device': device, 'details': details}) 

                details['vif_port'] = port 

                need_binding = self.treat_vif_port(port, details['port_id'], 

                                                   details['network_id'], 

                                                   details['network_type'], 

                                                   details['physical_network'], 

                                                   details['segmentation_id'], 

                                                   details['admin_state_up'], 

                                                   details['fixed_ips'], 

                                                   details['device_owner'], 

                                                   ovs_restarted) 

                if need_binding: 

                    need_binding_devices.append(details) 

 

                port_security = details['port_security_enabled'] 

                has_sgs = 'security_groups' in details 

                if not port_security or not has_sgs: 

                    security_disabled_devices.append(device) 

 

                self.ext_manager.handle_port(self.context, details) 

            else: 

                LOG.warn(_LW("Device %s not defined on plugin"), device) 

                if (port and port.ofport != -1): 

                    self.port_dead(port) 

        return skipped_devices, need_binding_devices, security_disabled_devices 

 

    def treat_ancillary_devices_added(self, devices): 

        devices_details_list = ( 

            self.plugin_rpc.get_devices_details_list_and_failed_devices( 

                self.context, 

                devices, 

                self.agent_id, 

                self.conf.host)) 

        if devices_details_list.get('failed_devices'): 

            #TODO(rossella_s) handle better the resync in next patches, 

            # this is just to preserve the current behavior 

            raise DeviceListRetrievalError(devices=devices) 

        devices_added = [ 

            d['device'] for d in devices_details_list.get('devices')] 

        LOG.info(_LI("Ancillary Ports %s added"), devices_added) 

 

        # update plugin about port status 

        devices_set_up = ( 

            self.plugin_rpc.update_device_list(self.context, 

                                               devices_added, 

                                               [], 

                                               self.agent_id, 

                                               self.conf.host)) 

        if devices_set_up.get('failed_devices_up'): 

            #TODO(rossella_s) handle better the resync in next patches, 

            # this is just to preserve the current behavior 

            raise DeviceListRetrievalError() 

 

    def treat_devices_removed(self, devices): 

        resync = False 

        self.sg_agent.remove_devices_filter(devices) 

        LOG.info(_LI("Ports %s removed"), devices) 

        devices_down = self.plugin_rpc.update_device_list(self.context, 

                                                          [], 

                                                          devices, 

                                                          self.agent_id, 

                                                          self.conf.host) 

        failed_devices = devices_down.get('failed_devices_down') 

1355        if failed_devices: 

            LOG.debug("Port removal failed for %(devices)s ", failed_devices) 

            resync = True 

        for device in devices: 

            self.port_unbound(device) 

        return resync 

 

    def treat_ancillary_devices_removed(self, devices): 

        resync = False 

        LOG.info(_LI("Ancillary ports %s removed"), devices) 

        devices_down = self.plugin_rpc.update_device_list(self.context, 

                                                          [], 

                                                          devices, 

                                                          self.agent_id, 

                                                          self.conf.host) 

        failed_devices = devices_down.get('failed_devices_down') 

        if failed_devices: 

            LOG.debug("Port removal failed for %(devices)s ", failed_devices) 

            resync = True 

        for detail in devices_down.get('devices_down'): 

            if detail['exists']: 

                LOG.info(_LI("Port %s updated."), detail['device']) 

                # Nothing to do regarding local networking 

            else: 

                LOG.debug("Device %s not defined on plugin", detail['device']) 

        return resync 

 

    def process_network_ports(self, port_info, ovs_restarted): 

        resync_a = False 

        resync_b = False 

        # TODO(salv-orlando): consider a solution for ensuring notifications 

        # are processed exactly in the same order in which they were 

        # received. This is tricky because there are two notification 

        # sources: the neutron server, and the ovs db monitor process 

        # If there is an exception while processing security groups ports 

        # will not be wired anyway, and a resync will be triggered 

        # VIF wiring needs to be performed always for 'new' devices. 

        # For updated ports, re-wiring is not needed in most cases, but needs 

        # to be performed anyway when the admin state of a device is changed. 

        # A device might be both in the 'added' and 'updated' 

        # list at the same time; avoid processing it twice. 

        devices_added_updated = (port_info.get('added', set()) | 

                                 port_info.get('updated', set())) 

        need_binding_devices = [] 

        security_disabled_ports = [] 

        if devices_added_updated: 

            start = time.time() 

            try: 

                (skipped_devices, need_binding_devices, 

                    security_disabled_ports) = ( 

                    self.treat_devices_added_or_updated( 

                        devices_added_updated, ovs_restarted)) 

                LOG.debug("process_network_ports - iteration:%(iter_num)d - " 

                          "treat_devices_added_or_updated completed. " 

                          "Skipped %(num_skipped)d devices of " 

                          "%(num_current)d devices currently available. " 

                          "Time elapsed: %(elapsed).3f", 

                          {'iter_num': self.iter_num, 

                           'num_skipped': len(skipped_devices), 

                           'num_current': len(port_info['current']), 

                           'elapsed': time.time() - start}) 

                # Update the list of current ports storing only those which 

                # have been actually processed. 

                port_info['current'] = (port_info['current'] - 

                                        set(skipped_devices)) 

            except DeviceListRetrievalError: 

                # Need to resync as there was an error with server 

                # communication. 

                LOG.exception(_LE("process_network_ports - iteration:%d - " 

                                  "failure while retrieving port details " 

                                  "from server"), self.iter_num) 

                resync_a = True 

 

        # TODO(salv-orlando): Optimize avoiding applying filters 

        # unnecessarily, (eg: when there are no IP address changes) 

        added_ports = port_info.get('added', set()) 

        if security_disabled_ports: 

            added_ports -= set(security_disabled_ports) 

        self.sg_agent.setup_port_filters(added_ports, 

                                         port_info.get('updated', set())) 

        self._bind_devices(need_binding_devices) 

 

        if 'removed' in port_info and port_info['removed']: 

            start = time.time() 

            resync_b = self.treat_devices_removed(port_info['removed']) 

            LOG.debug("process_network_ports - iteration:%(iter_num)d - " 

                      "treat_devices_removed completed in %(elapsed).3f", 

                      {'iter_num': self.iter_num, 

                       'elapsed': time.time() - start}) 

        # If one of the above operations fails => resync with plugin 

        return (resync_a | resync_b) 

 

    def process_ancillary_network_ports(self, port_info): 

        resync_a = False 

        resync_b = False 

        if 'added' in port_info and port_info['added']: 

            start = time.time() 

            try: 

                self.treat_ancillary_devices_added(port_info['added']) 

                LOG.debug("process_ancillary_network_ports - iteration: " 

                          "%(iter_num)d - treat_ancillary_devices_added " 

                          "completed in %(elapsed).3f", 

                          {'iter_num': self.iter_num, 

                           'elapsed': time.time() - start}) 

            except DeviceListRetrievalError: 

                # Need to resync as there was an error with server 

                # communication. 

                LOG.exception(_LE("process_ancillary_network_ports - " 

                                  "iteration:%d - failure while retrieving " 

                                  "port details from server"), self.iter_num) 

                resync_a = True 

        if 'removed' in port_info and port_info['removed']: 

            start = time.time() 

            resync_b = self.treat_ancillary_devices_removed( 

                port_info['removed']) 

            LOG.debug("process_ancillary_network_ports - iteration: " 

                      "%(iter_num)d - treat_ancillary_devices_removed " 

                      "completed in %(elapsed).3f", 

                      {'iter_num': self.iter_num, 

                       'elapsed': time.time() - start}) 

 

        # If one of the above operations fails => resync with plugin 

        return (resync_a | resync_b) 

 

    def get_ip_in_hex(self, ip_address): 

        try: 

            return '%08x' % netaddr.IPAddress(ip_address, version=4) 

        except Exception: 

            LOG.warn(_LW("Invalid remote IP: %s"), ip_address) 

            return 

 

    def tunnel_sync(self): 

        try: 

            for tunnel_type in self.tunnel_types: 

                details = self.plugin_rpc.tunnel_sync(self.context, 

                                                      self.local_ip, 

                                                      tunnel_type, 

                                                      self.conf.host) 

1487                if not self.l2_pop: 

                    tunnels = details['tunnels'] 

                    for tunnel in tunnels: 

1494                        if self.local_ip != tunnel['ip_address']: 

                            remote_ip = tunnel['ip_address'] 

                            remote_ip_hex = self.get_ip_in_hex(remote_ip) 

                            if not remote_ip_hex: 

                                continue 

                            tun_name = '%s-%s' % (tunnel_type, remote_ip_hex) 

                            self._setup_tunnel_port(self.tun_br, 

                                                    tun_name, 

                                                    tunnel['ip_address'], 

                                                    tunnel_type) 

        except Exception as e: 

            LOG.debug("Unable to sync tunnel IP %(local_ip)s: %(e)s", 

                      {'local_ip': self.local_ip, 'e': e}) 

            return True 

        return False 

 

    def _agent_has_updates(self, polling_manager): 

        return (polling_manager.is_polling_required or 

                self.updated_ports or 

                self.sg_agent.firewall_refresh_needed()) 

 

    def _port_info_has_changes(self, port_info): 

        return (port_info.get('added') or 

                port_info.get('removed') or 

                port_info.get('updated')) 

 

    def check_ovs_status(self): 

        # Check for the canary flow 

        status = self.int_br.check_canary_table() 

        if status == constants.OVS_RESTARTED: 

            LOG.warn(_LW("OVS is restarted. OVSNeutronAgent will reset " 

                         "bridges and recover ports.")) 

        elif status == constants.OVS_DEAD: 

            LOG.warn(_LW("OVS is dead. OVSNeutronAgent will keep running " 

                         "and checking OVS status periodically.")) 

        return status 

 

    def loop_count_and_wait(self, start_time, port_stats): 

        # sleep till end of polling interval 

        elapsed = time.time() - start_time 

        LOG.debug("Agent rpc_loop - iteration:%(iter_num)d " 

                  "completed. Processed ports statistics: " 

                  "%(port_stats)s. Elapsed:%(elapsed).3f", 

                  {'iter_num': self.iter_num, 

                   'port_stats': port_stats, 

                   'elapsed': elapsed}) 

1544        if elapsed < self.polling_interval: 

            time.sleep(self.polling_interval - elapsed) 

        else: 

            LOG.debug("Loop iteration exceeded interval " 

                      "(%(polling_interval)s vs. %(elapsed)s)!", 

                      {'polling_interval': self.polling_interval, 

                       'elapsed': elapsed}) 

        self.iter_num = self.iter_num + 1 

 

    def get_port_stats(self, port_info, ancillary_port_info): 

        port_stats = { 

            'regular': { 

                'added': len(port_info.get('added', [])), 

                'updated': len(port_info.get('updated', [])), 

                'removed': len(port_info.get('removed', []))}} 

        if self.ancillary_brs: 

            port_stats['ancillary'] = { 

                'added': len(ancillary_port_info.get('added', [])), 

                'removed': len(ancillary_port_info.get('removed', []))} 

        return port_stats 

 

    def cleanup_stale_flows(self): 

exit        if self.iter_num == 0: 

            bridges = [self.int_br] 

1566            if self.enable_tunneling: 

                bridges.append(self.tun_br) 

            for bridge in bridges: 

                LOG.info(_LI("Cleaning stale %s flows"), bridge.br_name) 

                bridge.cleanup_flows() 

 

    def rpc_loop(self, polling_manager=None): 

1573        if not polling_manager: 

            polling_manager = polling.get_polling_manager( 

                minimize_polling=False) 

 

        sync = True 

        ports = set() 

        updated_ports_copy = set() 

        ancillary_ports = set() 

        tunnel_sync = True 

        ovs_restarted = False 

        while self._check_and_handle_signal(): 

            port_info = {} 

            ancillary_port_info = {} 

            start = time.time() 

            LOG.debug("Agent rpc_loop - iteration:%d started", 

                      self.iter_num) 

            if sync: 

                LOG.info(_LI("Agent out of sync with plugin!")) 

                ports.clear() 

                ancillary_ports.clear() 

                sync = False 

                polling_manager.force_polling() 

            ovs_status = self.check_ovs_status() 

            if ovs_status == constants.OVS_RESTARTED: 

                self.setup_integration_br() 

                self.setup_physical_bridges(self.bridge_mappings) 

                if self.enable_tunneling: 

                    self.setup_tunnel_br() 

                    self.setup_tunnel_br_flows() 

                    tunnel_sync = True 

                if self.enable_distributed_routing: 

                    self.dvr_agent.reset_ovs_parameters(self.int_br, 

                                                 self.tun_br, 

                                                 self.patch_int_ofport, 

                                                 self.patch_tun_ofport) 

                    self.dvr_agent.reset_dvr_parameters() 

                    self.dvr_agent.setup_dvr_flows() 

            elif ovs_status == constants.OVS_DEAD: 

                # Agent doesn't apply any operations when ovs is dead, to 

                # prevent unexpected failure or crash. Sleep and continue 

                # loop in which ovs status will be checked periodically. 

                port_stats = self.get_port_stats({}, {}) 

                self.loop_count_and_wait(start, port_stats) 

                continue 

            # Notify the plugin of tunnel IP 

            if self.enable_tunneling and tunnel_sync: 

                LOG.info(_LI("Agent tunnel out of sync with plugin!")) 

                try: 

                    tunnel_sync = self.tunnel_sync() 

                except Exception: 

                    LOG.exception(_LE("Error while synchronizing tunnels")) 

                    tunnel_sync = True 

            ovs_restarted |= (ovs_status == constants.OVS_RESTARTED) 

1697            if self._agent_has_updates(polling_manager) or ovs_restarted: 

                try: 

                    LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " 

                              "starting polling. Elapsed:%(elapsed).3f", 

                              {'iter_num': self.iter_num, 

                               'elapsed': time.time() - start}) 

                    # Save updated ports dict to perform rollback in 

                    # case resync would be needed, and then clear 

                    # self.updated_ports. As the greenthread should not yield 

                    # between these two statements, this will be thread-safe 

                    updated_ports_copy = self.updated_ports 

                    self.updated_ports = set() 

                    reg_ports = (set() if ovs_restarted else ports) 

                    port_info = self.scan_ports(reg_ports, updated_ports_copy) 

                    self.process_deleted_ports(port_info) 

                    ofport_changed_ports = self.update_stale_ofport_rules() 

1644                    if ofport_changed_ports: 

                        port_info.setdefault('updated', set()).update( 

                            ofport_changed_ports) 

                    LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " 

                              "port information retrieved. " 

                              "Elapsed:%(elapsed).3f", 

                              {'iter_num': self.iter_num, 

                               'elapsed': time.time() - start}) 

 

                    # Treat ancillary devices if they exist 

                    if self.ancillary_brs: 

                        ancillary_port_info = self.scan_ancillary_ports( 

                            ancillary_ports) 

                        LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " 

                                  "ancillary port info retrieved. " 

                                  "Elapsed:%(elapsed).3f", 

                                  {'iter_num': self.iter_num, 

                                   'elapsed': time.time() - start}) 

                    # Secure and wire/unwire VIFs and update their status 

                    # on Neutron server 

1675                    if (self._port_info_has_changes(port_info) or 

                        self.sg_agent.firewall_refresh_needed() or 

                        ovs_restarted): 

                        LOG.debug("Starting to process devices in:%s", 

                                  port_info) 

                        # If treat devices fails - must resync with plugin 

                        sync = self.process_network_ports(port_info, 

                                                          ovs_restarted) 

                        self.cleanup_stale_flows() 

                        LOG.debug("Agent rpc_loop - iteration:%(iter_num)d - " 

                                  "ports processed. Elapsed:%(elapsed).3f", 

                                  {'iter_num': self.iter_num, 

                                   'elapsed': time.time() - start}) 

 

                    ports = port_info['current'] 

 

1678                    if self.ancillary_brs: 

                        sync |= self.process_ancillary_network_ports( 

                            ancillary_port_info) 

                        LOG.debug("Agent rpc_loop - iteration: " 

                                  "%(iter_num)d - ancillary ports " 

                                  "processed. Elapsed:%(elapsed).3f", 

                                  {'iter_num': self.iter_num, 

                                   'elapsed': time.time() - start}) 

                        ancillary_ports = ancillary_port_info['current'] 

 

                    polling_manager.polling_completed() 

                    # Keep this flag in the last line of "try" block, 

                    # so we can sure that no other Exception occurred. 

1697                    if not sync: 

                        ovs_restarted = False 

                except Exception: 

                    LOG.exception(_LE("Error while processing VIF ports")) 

                    # Put the ports back in self.updated_port 

                    self.updated_ports |= updated_ports_copy 

                    sync = True 

            port_stats = self.get_port_stats(port_info, ancillary_port_info) 

            self.loop_count_and_wait(start, port_stats) 

 

    def daemon_loop(self): 

        # Start everything. 

        LOG.info(_LI("Agent initialized successfully, now running... ")) 

        signal.signal(signal.SIGTERM, self._handle_sigterm) 

        signal.signal(signal.SIGHUP, self._handle_sighup) 

        with polling.get_polling_manager( 

            self.minimize_polling, 

            self.ovsdb_monitor_respawn_interval) as pm: 

 

            self.rpc_loop(polling_manager=pm) 

 

    def _handle_sigterm(self, signum, frame): 

        self.catch_sigterm = True 

        if self.quitting_rpc_timeout: 

            self.set_rpc_timeout(self.quitting_rpc_timeout) 

 

    def _handle_sighup(self, signum, frame): 

        self.catch_sighup = True 

 

    def _check_and_handle_signal(self): 

1721        if self.catch_sigterm: 

            LOG.info(_LI("Agent caught SIGTERM, quitting daemon loop.")) 

            self.run_daemon_loop = False 

            self.catch_sigterm = False 

1725        if self.catch_sighup: 

            LOG.info(_LI("Agent caught SIGHUP, resetting.")) 

            self.conf.reload_config_files() 

            config.setup_logging() 

            LOG.debug('Full set of CONF:') 

            self.conf.log_opt_values(LOG, logging.DEBUG) 

            self.catch_sighup = False 

        return self.run_daemon_loop 

 

    def set_rpc_timeout(self, timeout): 

        for rpc_api in (self.plugin_rpc, self.sg_plugin_rpc, 

                        self.dvr_plugin_rpc, self.state_rpc): 

            rpc_api.client.timeout = timeout 

 

    def _check_agent_configurations(self): 

        if (self.enable_distributed_routing and self.enable_tunneling 

            and not self.l2_pop): 

            raise ValueError(_("DVR deployments for VXLAN/GRE underlays " 

                               "require L2-pop to be enabled, in both the " 

                               "Agent and Server side.")) 

 

 

def create_agent_config_map(config): 

    """Create a map of agent config parameters. 

 

    :param config: an instance of cfg.CONF 

    :returns: a map of agent configuration parameters 

    """ 

    try: 

        bridge_mappings = n_utils.parse_mappings(config.OVS.bridge_mappings) 

    except ValueError as e: 

        raise ValueError(_("Parsing bridge_mappings failed: %s.") % e) 

 

    kwargs = dict( 

        integ_br=config.OVS.integration_bridge, 

        tun_br=config.OVS.tunnel_bridge, 

        local_ip=config.OVS.local_ip, 

        bridge_mappings=bridge_mappings, 

        polling_interval=config.AGENT.polling_interval, 

        minimize_polling=config.AGENT.minimize_polling, 

        tunnel_types=config.AGENT.tunnel_types, 

        veth_mtu=config.AGENT.veth_mtu, 

        enable_distributed_routing=config.AGENT.enable_distributed_routing, 

        l2_population=config.AGENT.l2_population, 

        arp_responder=config.AGENT.arp_responder, 

        prevent_arp_spoofing=config.AGENT.prevent_arp_spoofing, 

        use_veth_interconnection=config.OVS.use_veth_interconnection, 

        quitting_rpc_timeout=config.AGENT.quitting_rpc_timeout 

    ) 

 

    # Verify the tunnel_types specified are valid 

    for tun in kwargs['tunnel_types']: 

        if tun not in constants.TUNNEL_NETWORK_TYPES: 

            msg = _('Invalid tunnel type specified: %s'), tun 

            raise ValueError(msg) 

        if not kwargs['local_ip']: 

            msg = _('Tunneling cannot be enabled without a valid local_ip.') 

            raise ValueError(msg) 

 

    return kwargs 

 

 

def validate_local_ip(local_ip): 

    """If tunneling is enabled, verify if the ip exists on the agent's host.""" 

    if not cfg.CONF.AGENT.tunnel_types: 

        return 

 

    if not ip_lib.IPWrapper().get_device_by_ip(local_ip): 

        LOG.error(_LE("Tunneling can't be enabled with invalid local_ip '%s'." 

                      " IP couldn't be found on this host's interfaces."), 

                  local_ip) 

        raise SystemExit(1) 

 

 

def prepare_xen_compute(): 

    is_xen_compute_host = 'rootwrap-xen-dom0' in cfg.CONF.AGENT.root_helper 

    if is_xen_compute_host: 

        # Force ip_lib to always use the root helper to ensure that ip 

        # commands target xen dom0 rather than domU. 

        cfg.CONF.register_opts(ip_lib.OPTS) 

        cfg.CONF.set_default('ip_lib_force_root', True) 

 

 

def main(bridge_classes): 

    try: 

        agent_config = create_agent_config_map(cfg.CONF) 

    except ValueError: 

        LOG.exception(_LE("Agent failed to create agent config map")) 

        raise SystemExit(1) 

    prepare_xen_compute() 

    validate_local_ip(agent_config['local_ip']) 

    try: 

        agent = OVSNeutronAgent(bridge_classes, **agent_config) 

    except (RuntimeError, ValueError) as e: 

        LOG.error(_LE("%s Agent terminated!"), e) 

        sys.exit(1) 

    agent.daemon_loop()