Line data Source code
1 : // SPDX-License-Identifier: GPL-2.0
2 : /*
3 : * Support routines for initializing a PCI subsystem
4 : *
5 : * Extruded from code written by
6 : * Dave Rusling (david.rusling@reo.mts.dec.com)
7 : * David Mosberger (davidm@cs.arizona.edu)
8 : * David Miller (davem@redhat.com)
9 : *
10 : * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
11 : * PCI-PCI bridges cleanup, sorted resource allocation.
12 : * Feb 2002, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
13 : * Converted to allocation in 3 passes, which gives
14 : * tighter packing. Prefetchable range support.
15 : */
16 :
17 : #include <linux/init.h>
18 : #include <linux/kernel.h>
19 : #include <linux/module.h>
20 : #include <linux/pci.h>
21 : #include <linux/errno.h>
22 : #include <linux/ioport.h>
23 : #include <linux/cache.h>
24 : #include <linux/slab.h>
25 : #include <linux/acpi.h>
26 : #include "pci.h"
27 :
28 : unsigned int pci_flags;
29 : EXPORT_SYMBOL_GPL(pci_flags);
30 :
31 : struct pci_dev_resource {
32 : struct list_head list;
33 : struct resource *res;
34 : struct pci_dev *dev;
35 : resource_size_t start;
36 : resource_size_t end;
37 : resource_size_t add_size;
38 : resource_size_t min_align;
39 : unsigned long flags;
40 : };
41 :
42 0 : static void free_list(struct list_head *head)
43 : {
44 : struct pci_dev_resource *dev_res, *tmp;
45 :
46 0 : list_for_each_entry_safe(dev_res, tmp, head, list) {
47 0 : list_del(&dev_res->list);
48 0 : kfree(dev_res);
49 : }
50 0 : }
51 :
52 : /**
53 : * add_to_list() - Add a new resource tracker to the list
54 : * @head: Head of the list
55 : * @dev: Device to which the resource belongs
56 : * @res: Resource to be tracked
57 : * @add_size: Additional size to be optionally added to the resource
58 : * @min_align: Minimum memory window alignment
59 : */
60 0 : static int add_to_list(struct list_head *head, struct pci_dev *dev,
61 : struct resource *res, resource_size_t add_size,
62 : resource_size_t min_align)
63 : {
64 : struct pci_dev_resource *tmp;
65 :
66 0 : tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
67 0 : if (!tmp)
68 : return -ENOMEM;
69 :
70 0 : tmp->res = res;
71 0 : tmp->dev = dev;
72 0 : tmp->start = res->start;
73 0 : tmp->end = res->end;
74 0 : tmp->flags = res->flags;
75 0 : tmp->add_size = add_size;
76 0 : tmp->min_align = min_align;
77 :
78 0 : list_add(&tmp->list, head);
79 :
80 0 : return 0;
81 : }
82 :
83 0 : static void remove_from_list(struct list_head *head, struct resource *res)
84 : {
85 : struct pci_dev_resource *dev_res, *tmp;
86 :
87 0 : list_for_each_entry_safe(dev_res, tmp, head, list) {
88 0 : if (dev_res->res == res) {
89 0 : list_del(&dev_res->list);
90 0 : kfree(dev_res);
91 0 : break;
92 : }
93 : }
94 0 : }
95 :
96 : static struct pci_dev_resource *res_to_dev_res(struct list_head *head,
97 : struct resource *res)
98 : {
99 : struct pci_dev_resource *dev_res;
100 :
101 0 : list_for_each_entry(dev_res, head, list) {
102 0 : if (dev_res->res == res)
103 : return dev_res;
104 : }
105 :
106 : return NULL;
107 : }
108 :
109 : static resource_size_t get_res_add_size(struct list_head *head,
110 : struct resource *res)
111 : {
112 : struct pci_dev_resource *dev_res;
113 :
114 0 : dev_res = res_to_dev_res(head, res);
115 0 : return dev_res ? dev_res->add_size : 0;
116 : }
117 :
118 : static resource_size_t get_res_add_align(struct list_head *head,
119 : struct resource *res)
120 : {
121 : struct pci_dev_resource *dev_res;
122 :
123 0 : dev_res = res_to_dev_res(head, res);
124 0 : return dev_res ? dev_res->min_align : 0;
125 : }
126 :
127 :
128 : /* Sort resources by alignment */
129 0 : static void pdev_sort_resources(struct pci_dev *dev, struct list_head *head)
130 : {
131 : int i;
132 :
133 0 : for (i = 0; i < PCI_NUM_RESOURCES; i++) {
134 : struct resource *r;
135 : struct pci_dev_resource *dev_res, *tmp;
136 : resource_size_t r_align;
137 : struct list_head *n;
138 :
139 0 : r = &dev->resource[i];
140 :
141 0 : if (r->flags & IORESOURCE_PCI_FIXED)
142 0 : continue;
143 :
144 0 : if (!(r->flags) || r->parent)
145 0 : continue;
146 :
147 0 : r_align = pci_resource_alignment(dev, r);
148 0 : if (!r_align) {
149 0 : pci_warn(dev, "BAR %d: %pR has bogus alignment\n",
150 : i, r);
151 0 : continue;
152 : }
153 :
154 0 : tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
155 0 : if (!tmp)
156 0 : panic("%s: kzalloc() failed!\n", __func__);
157 0 : tmp->res = r;
158 0 : tmp->dev = dev;
159 :
160 : /* Fallback is smallest one or list is empty */
161 0 : n = head;
162 0 : list_for_each_entry(dev_res, head, list) {
163 : resource_size_t align;
164 :
165 0 : align = pci_resource_alignment(dev_res->dev,
166 : dev_res->res);
167 :
168 0 : if (r_align > align) {
169 : n = &dev_res->list;
170 : break;
171 : }
172 : }
173 : /* Insert it just before n */
174 0 : list_add_tail(&tmp->list, n);
175 : }
176 0 : }
177 :
178 0 : static void __dev_sort_resources(struct pci_dev *dev, struct list_head *head)
179 : {
180 0 : u16 class = dev->class >> 8;
181 :
182 : /* Don't touch classless devices or host bridges or IOAPICs */
183 0 : if (class == PCI_CLASS_NOT_DEFINED || class == PCI_CLASS_BRIDGE_HOST)
184 : return;
185 :
186 : /* Don't touch IOAPIC devices already enabled by firmware */
187 0 : if (class == PCI_CLASS_SYSTEM_PIC) {
188 : u16 command;
189 0 : pci_read_config_word(dev, PCI_COMMAND, &command);
190 0 : if (command & (PCI_COMMAND_IO | PCI_COMMAND_MEMORY))
191 0 : return;
192 : }
193 :
194 0 : pdev_sort_resources(dev, head);
195 : }
196 :
197 : static inline void reset_resource(struct resource *res)
198 : {
199 0 : res->start = 0;
200 0 : res->end = 0;
201 0 : res->flags = 0;
202 : }
203 :
204 : /**
205 : * reassign_resources_sorted() - Satisfy any additional resource requests
206 : *
207 : * @realloc_head: Head of the list tracking requests requiring
208 : * additional resources
209 : * @head: Head of the list tracking requests with allocated
210 : * resources
211 : *
212 : * Walk through each element of the realloc_head and try to procure additional
213 : * resources for the element, provided the element is in the head list.
214 : */
215 0 : static void reassign_resources_sorted(struct list_head *realloc_head,
216 : struct list_head *head)
217 : {
218 : struct resource *res;
219 : struct pci_dev_resource *add_res, *tmp;
220 : struct pci_dev_resource *dev_res;
221 : resource_size_t add_size, align;
222 : int idx;
223 :
224 0 : list_for_each_entry_safe(add_res, tmp, realloc_head, list) {
225 0 : bool found_match = false;
226 :
227 0 : res = add_res->res;
228 : /* Skip resource that has been reset */
229 0 : if (!res->flags)
230 : goto out;
231 :
232 : /* Skip this resource if not found in head list */
233 0 : list_for_each_entry(dev_res, head, list) {
234 0 : if (dev_res->res == res) {
235 : found_match = true;
236 : break;
237 : }
238 : }
239 0 : if (!found_match) /* Just skip */
240 0 : continue;
241 :
242 0 : idx = res - &add_res->dev->resource[0];
243 0 : add_size = add_res->add_size;
244 0 : align = add_res->min_align;
245 0 : if (!resource_size(res)) {
246 0 : res->start = align;
247 0 : res->end = res->start + add_size - 1;
248 0 : if (pci_assign_resource(add_res->dev, idx))
249 : reset_resource(res);
250 : } else {
251 0 : res->flags |= add_res->flags &
252 : (IORESOURCE_STARTALIGN|IORESOURCE_SIZEALIGN);
253 0 : if (pci_reassign_resource(add_res->dev, idx,
254 : add_size, align))
255 0 : pci_info(add_res->dev, "failed to add %llx res[%d]=%pR\n",
256 : (unsigned long long) add_size, idx,
257 : res);
258 : }
259 : out:
260 0 : list_del(&add_res->list);
261 0 : kfree(add_res);
262 : }
263 0 : }
264 :
265 : /**
266 : * assign_requested_resources_sorted() - Satisfy resource requests
267 : *
268 : * @head: Head of the list tracking requests for resources
269 : * @fail_head: Head of the list tracking requests that could not be
270 : * allocated
271 : *
272 : * Satisfy resource requests of each element in the list. Add requests that
273 : * could not be satisfied to the failed_list.
274 : */
275 0 : static void assign_requested_resources_sorted(struct list_head *head,
276 : struct list_head *fail_head)
277 : {
278 : struct resource *res;
279 : struct pci_dev_resource *dev_res;
280 : int idx;
281 :
282 0 : list_for_each_entry(dev_res, head, list) {
283 0 : res = dev_res->res;
284 0 : idx = res - &dev_res->dev->resource[0];
285 0 : if (resource_size(res) &&
286 0 : pci_assign_resource(dev_res->dev, idx)) {
287 0 : if (fail_head) {
288 : /*
289 : * If the failed resource is a ROM BAR and
290 : * it will be enabled later, don't add it
291 : * to the list.
292 : */
293 0 : if (!((idx == PCI_ROM_RESOURCE) &&
294 0 : (!(res->flags & IORESOURCE_ROM_ENABLE))))
295 0 : add_to_list(fail_head,
296 : dev_res->dev, res,
297 : 0 /* don't care */,
298 : 0 /* don't care */);
299 : }
300 : reset_resource(res);
301 : }
302 : }
303 0 : }
304 :
305 : static unsigned long pci_fail_res_type_mask(struct list_head *fail_head)
306 : {
307 : struct pci_dev_resource *fail_res;
308 0 : unsigned long mask = 0;
309 :
310 : /* Check failed type */
311 0 : list_for_each_entry(fail_res, fail_head, list)
312 0 : mask |= fail_res->flags;
313 :
314 : /*
315 : * One pref failed resource will set IORESOURCE_MEM, as we can
316 : * allocate pref in non-pref range. Will release all assigned
317 : * non-pref sibling resources according to that bit.
318 : */
319 0 : return mask & (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH);
320 : }
321 :
322 0 : static bool pci_need_to_release(unsigned long mask, struct resource *res)
323 : {
324 0 : if (res->flags & IORESOURCE_IO)
325 0 : return !!(mask & IORESOURCE_IO);
326 :
327 : /* Check pref at first */
328 0 : if (res->flags & IORESOURCE_PREFETCH) {
329 0 : if (mask & IORESOURCE_PREFETCH)
330 : return true;
331 : /* Count pref if its parent is non-pref */
332 0 : else if ((mask & IORESOURCE_MEM) &&
333 0 : !(res->parent->flags & IORESOURCE_PREFETCH))
334 : return true;
335 : else
336 : return false;
337 : }
338 :
339 0 : if (res->flags & IORESOURCE_MEM)
340 0 : return !!(mask & IORESOURCE_MEM);
341 :
342 : return false; /* Should not get here */
343 : }
344 :
345 0 : static void __assign_resources_sorted(struct list_head *head,
346 : struct list_head *realloc_head,
347 : struct list_head *fail_head)
348 : {
349 : /*
350 : * Should not assign requested resources at first. They could be
351 : * adjacent, so later reassign can not reallocate them one by one in
352 : * parent resource window.
353 : *
354 : * Try to assign requested + add_size at beginning. If could do that,
355 : * could get out early. If could not do that, we still try to assign
356 : * requested at first, then try to reassign add_size for some resources.
357 : *
358 : * Separate three resource type checking if we need to release
359 : * assigned resource after requested + add_size try.
360 : *
361 : * 1. If IO port assignment fails, will release assigned IO
362 : * port.
363 : * 2. If pref MMIO assignment fails, release assigned pref
364 : * MMIO. If assigned pref MMIO's parent is non-pref MMIO
365 : * and non-pref MMIO assignment fails, will release that
366 : * assigned pref MMIO.
367 : * 3. If non-pref MMIO assignment fails or pref MMIO
368 : * assignment fails, will release assigned non-pref MMIO.
369 : */
370 0 : LIST_HEAD(save_head);
371 0 : LIST_HEAD(local_fail_head);
372 : struct pci_dev_resource *save_res;
373 : struct pci_dev_resource *dev_res, *tmp_res, *dev_res2;
374 : unsigned long fail_type;
375 : resource_size_t add_align, align;
376 :
377 : /* Check if optional add_size is there */
378 0 : if (!realloc_head || list_empty(realloc_head))
379 : goto requested_and_reassign;
380 :
381 : /* Save original start, end, flags etc at first */
382 0 : list_for_each_entry(dev_res, head, list) {
383 0 : if (add_to_list(&save_head, dev_res->dev, dev_res->res, 0, 0)) {
384 0 : free_list(&save_head);
385 0 : goto requested_and_reassign;
386 : }
387 : }
388 :
389 : /* Update res in head list with add_size in realloc_head list */
390 0 : list_for_each_entry_safe(dev_res, tmp_res, head, list) {
391 0 : dev_res->res->end += get_res_add_size(realloc_head,
392 : dev_res->res);
393 :
394 : /*
395 : * There are two kinds of additional resources in the list:
396 : * 1. bridge resource -- IORESOURCE_STARTALIGN
397 : * 2. SR-IOV resource -- IORESOURCE_SIZEALIGN
398 : * Here just fix the additional alignment for bridge
399 : */
400 0 : if (!(dev_res->res->flags & IORESOURCE_STARTALIGN))
401 0 : continue;
402 :
403 0 : add_align = get_res_add_align(realloc_head, dev_res->res);
404 :
405 : /*
406 : * The "head" list is sorted by alignment so resources with
407 : * bigger alignment will be assigned first. After we
408 : * change the alignment of a dev_res in "head" list, we
409 : * need to reorder the list by alignment to make it
410 : * consistent.
411 : */
412 0 : if (add_align > dev_res->res->start) {
413 0 : resource_size_t r_size = resource_size(dev_res->res);
414 :
415 0 : dev_res->res->start = add_align;
416 0 : dev_res->res->end = add_align + r_size - 1;
417 :
418 0 : list_for_each_entry(dev_res2, head, list) {
419 0 : align = pci_resource_alignment(dev_res2->dev,
420 : dev_res2->res);
421 0 : if (add_align > align) {
422 0 : list_move_tail(&dev_res->list,
423 : &dev_res2->list);
424 : break;
425 : }
426 : }
427 : }
428 :
429 : }
430 :
431 : /* Try updated head list with add_size added */
432 0 : assign_requested_resources_sorted(head, &local_fail_head);
433 :
434 : /* All assigned with add_size? */
435 0 : if (list_empty(&local_fail_head)) {
436 : /* Remove head list from realloc_head list */
437 0 : list_for_each_entry(dev_res, head, list)
438 0 : remove_from_list(realloc_head, dev_res->res);
439 0 : free_list(&save_head);
440 0 : free_list(head);
441 0 : return;
442 : }
443 :
444 : /* Check failed type */
445 0 : fail_type = pci_fail_res_type_mask(&local_fail_head);
446 : /* Remove not need to be released assigned res from head list etc */
447 0 : list_for_each_entry_safe(dev_res, tmp_res, head, list)
448 0 : if (dev_res->res->parent &&
449 0 : !pci_need_to_release(fail_type, dev_res->res)) {
450 : /* Remove it from realloc_head list */
451 0 : remove_from_list(realloc_head, dev_res->res);
452 0 : remove_from_list(&save_head, dev_res->res);
453 0 : list_del(&dev_res->list);
454 0 : kfree(dev_res);
455 : }
456 :
457 0 : free_list(&local_fail_head);
458 : /* Release assigned resource */
459 0 : list_for_each_entry(dev_res, head, list)
460 0 : if (dev_res->res->parent)
461 0 : release_resource(dev_res->res);
462 : /* Restore start/end/flags from saved list */
463 0 : list_for_each_entry(save_res, &save_head, list) {
464 0 : struct resource *res = save_res->res;
465 :
466 0 : res->start = save_res->start;
467 0 : res->end = save_res->end;
468 0 : res->flags = save_res->flags;
469 : }
470 0 : free_list(&save_head);
471 :
472 : requested_and_reassign:
473 : /* Satisfy the must-have resource requests */
474 0 : assign_requested_resources_sorted(head, fail_head);
475 :
476 : /* Try to satisfy any additional optional resource requests */
477 0 : if (realloc_head)
478 0 : reassign_resources_sorted(realloc_head, head);
479 0 : free_list(head);
480 : }
481 :
482 0 : static void pdev_assign_resources_sorted(struct pci_dev *dev,
483 : struct list_head *add_head,
484 : struct list_head *fail_head)
485 : {
486 0 : LIST_HEAD(head);
487 :
488 0 : __dev_sort_resources(dev, &head);
489 0 : __assign_resources_sorted(&head, add_head, fail_head);
490 :
491 0 : }
492 :
493 0 : static void pbus_assign_resources_sorted(const struct pci_bus *bus,
494 : struct list_head *realloc_head,
495 : struct list_head *fail_head)
496 : {
497 : struct pci_dev *dev;
498 0 : LIST_HEAD(head);
499 :
500 0 : list_for_each_entry(dev, &bus->devices, bus_list)
501 0 : __dev_sort_resources(dev, &head);
502 :
503 0 : __assign_resources_sorted(&head, realloc_head, fail_head);
504 0 : }
505 :
506 0 : void pci_setup_cardbus(struct pci_bus *bus)
507 : {
508 0 : struct pci_dev *bridge = bus->self;
509 : struct resource *res;
510 : struct pci_bus_region region;
511 :
512 0 : pci_info(bridge, "CardBus bridge to %pR\n",
513 : &bus->busn_res);
514 :
515 0 : res = bus->resource[0];
516 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
517 0 : if (res->flags & IORESOURCE_IO) {
518 : /*
519 : * The IO resource is allocated a range twice as large as it
520 : * would normally need. This allows us to set both IO regs.
521 : */
522 0 : pci_info(bridge, " bridge window %pR\n", res);
523 0 : pci_write_config_dword(bridge, PCI_CB_IO_BASE_0,
524 0 : region.start);
525 0 : pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_0,
526 0 : region.end);
527 : }
528 :
529 0 : res = bus->resource[1];
530 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
531 0 : if (res->flags & IORESOURCE_IO) {
532 0 : pci_info(bridge, " bridge window %pR\n", res);
533 0 : pci_write_config_dword(bridge, PCI_CB_IO_BASE_1,
534 0 : region.start);
535 0 : pci_write_config_dword(bridge, PCI_CB_IO_LIMIT_1,
536 0 : region.end);
537 : }
538 :
539 0 : res = bus->resource[2];
540 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
541 0 : if (res->flags & IORESOURCE_MEM) {
542 0 : pci_info(bridge, " bridge window %pR\n", res);
543 0 : pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_0,
544 0 : region.start);
545 0 : pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_0,
546 0 : region.end);
547 : }
548 :
549 0 : res = bus->resource[3];
550 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
551 0 : if (res->flags & IORESOURCE_MEM) {
552 0 : pci_info(bridge, " bridge window %pR\n", res);
553 0 : pci_write_config_dword(bridge, PCI_CB_MEMORY_BASE_1,
554 0 : region.start);
555 0 : pci_write_config_dword(bridge, PCI_CB_MEMORY_LIMIT_1,
556 0 : region.end);
557 : }
558 0 : }
559 : EXPORT_SYMBOL(pci_setup_cardbus);
560 :
561 : /*
562 : * Initialize bridges with base/limit values we have collected. PCI-to-PCI
563 : * Bridge Architecture Specification rev. 1.1 (1998) requires that if there
564 : * are no I/O ports or memory behind the bridge, the corresponding range
565 : * must be turned off by writing base value greater than limit to the
566 : * bridge's base/limit registers.
567 : *
568 : * Note: care must be taken when updating I/O base/limit registers of
569 : * bridges which support 32-bit I/O. This update requires two config space
570 : * writes, so it's quite possible that an I/O window of the bridge will
571 : * have some undesirable address (e.g. 0) after the first write. Ditto
572 : * 64-bit prefetchable MMIO.
573 : */
574 0 : static void pci_setup_bridge_io(struct pci_dev *bridge)
575 : {
576 : struct resource *res;
577 : struct pci_bus_region region;
578 : unsigned long io_mask;
579 : u8 io_base_lo, io_limit_lo;
580 : u16 l;
581 : u32 io_upper16;
582 :
583 0 : io_mask = PCI_IO_RANGE_MASK;
584 0 : if (bridge->io_window_1k)
585 0 : io_mask = PCI_IO_1K_RANGE_MASK;
586 :
587 : /* Set up the top and bottom of the PCI I/O segment for this bus */
588 0 : res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
589 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
590 0 : if (res->flags & IORESOURCE_IO) {
591 0 : pci_read_config_word(bridge, PCI_IO_BASE, &l);
592 0 : io_base_lo = (region.start >> 8) & io_mask;
593 0 : io_limit_lo = (region.end >> 8) & io_mask;
594 0 : l = ((u16) io_limit_lo << 8) | io_base_lo;
595 : /* Set up upper 16 bits of I/O base/limit */
596 0 : io_upper16 = (region.end & 0xffff0000) | (region.start >> 16);
597 0 : pci_info(bridge, " bridge window %pR\n", res);
598 : } else {
599 : /* Clear upper 16 bits of I/O base/limit */
600 0 : io_upper16 = 0;
601 0 : l = 0x00f0;
602 : }
603 : /* Temporarily disable the I/O range before updating PCI_IO_BASE */
604 0 : pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, 0x0000ffff);
605 : /* Update lower 16 bits of I/O base/limit */
606 0 : pci_write_config_word(bridge, PCI_IO_BASE, l);
607 : /* Update upper 16 bits of I/O base/limit */
608 0 : pci_write_config_dword(bridge, PCI_IO_BASE_UPPER16, io_upper16);
609 0 : }
610 :
611 0 : static void pci_setup_bridge_mmio(struct pci_dev *bridge)
612 : {
613 : struct resource *res;
614 : struct pci_bus_region region;
615 : u32 l;
616 :
617 : /* Set up the top and bottom of the PCI Memory segment for this bus */
618 0 : res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
619 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
620 0 : if (res->flags & IORESOURCE_MEM) {
621 0 : l = (region.start >> 16) & 0xfff0;
622 0 : l |= region.end & 0xfff00000;
623 0 : pci_info(bridge, " bridge window %pR\n", res);
624 : } else {
625 : l = 0x0000fff0;
626 : }
627 0 : pci_write_config_dword(bridge, PCI_MEMORY_BASE, l);
628 0 : }
629 :
630 0 : static void pci_setup_bridge_mmio_pref(struct pci_dev *bridge)
631 : {
632 : struct resource *res;
633 : struct pci_bus_region region;
634 : u32 l, bu, lu;
635 :
636 : /*
637 : * Clear out the upper 32 bits of PREF limit. If
638 : * PCI_PREF_BASE_UPPER32 was non-zero, this temporarily disables
639 : * PREF range, which is ok.
640 : */
641 0 : pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, 0);
642 :
643 : /* Set up PREF base/limit */
644 0 : bu = lu = 0;
645 0 : res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
646 0 : pcibios_resource_to_bus(bridge->bus, ®ion, res);
647 0 : if (res->flags & IORESOURCE_PREFETCH) {
648 0 : l = (region.start >> 16) & 0xfff0;
649 0 : l |= region.end & 0xfff00000;
650 0 : if (res->flags & IORESOURCE_MEM_64) {
651 0 : bu = upper_32_bits(region.start);
652 0 : lu = upper_32_bits(region.end);
653 : }
654 0 : pci_info(bridge, " bridge window %pR\n", res);
655 : } else {
656 : l = 0x0000fff0;
657 : }
658 0 : pci_write_config_dword(bridge, PCI_PREF_MEMORY_BASE, l);
659 :
660 : /* Set the upper 32 bits of PREF base & limit */
661 0 : pci_write_config_dword(bridge, PCI_PREF_BASE_UPPER32, bu);
662 0 : pci_write_config_dword(bridge, PCI_PREF_LIMIT_UPPER32, lu);
663 0 : }
664 :
665 0 : static void __pci_setup_bridge(struct pci_bus *bus, unsigned long type)
666 : {
667 0 : struct pci_dev *bridge = bus->self;
668 :
669 0 : pci_info(bridge, "PCI bridge to %pR\n",
670 : &bus->busn_res);
671 :
672 0 : if (type & IORESOURCE_IO)
673 0 : pci_setup_bridge_io(bridge);
674 :
675 0 : if (type & IORESOURCE_MEM)
676 0 : pci_setup_bridge_mmio(bridge);
677 :
678 0 : if (type & IORESOURCE_PREFETCH)
679 0 : pci_setup_bridge_mmio_pref(bridge);
680 :
681 0 : pci_write_config_word(bridge, PCI_BRIDGE_CONTROL, bus->bridge_ctl);
682 0 : }
683 :
684 0 : void __weak pcibios_setup_bridge(struct pci_bus *bus, unsigned long type)
685 : {
686 0 : }
687 :
688 0 : void pci_setup_bridge(struct pci_bus *bus)
689 : {
690 0 : unsigned long type = IORESOURCE_IO | IORESOURCE_MEM |
691 : IORESOURCE_PREFETCH;
692 :
693 0 : pcibios_setup_bridge(bus, type);
694 0 : __pci_setup_bridge(bus, type);
695 0 : }
696 :
697 :
698 0 : int pci_claim_bridge_resource(struct pci_dev *bridge, int i)
699 : {
700 0 : if (i < PCI_BRIDGE_RESOURCES || i > PCI_BRIDGE_RESOURCE_END)
701 : return 0;
702 :
703 0 : if (pci_claim_resource(bridge, i) == 0)
704 : return 0; /* Claimed the window */
705 :
706 0 : if ((bridge->class >> 8) != PCI_CLASS_BRIDGE_PCI)
707 : return 0;
708 :
709 0 : if (!pci_bus_clip_resource(bridge, i))
710 : return -EINVAL; /* Clipping didn't change anything */
711 :
712 0 : switch (i) {
713 : case PCI_BRIDGE_IO_WINDOW:
714 0 : pci_setup_bridge_io(bridge);
715 0 : break;
716 : case PCI_BRIDGE_MEM_WINDOW:
717 0 : pci_setup_bridge_mmio(bridge);
718 0 : break;
719 : case PCI_BRIDGE_PREF_MEM_WINDOW:
720 0 : pci_setup_bridge_mmio_pref(bridge);
721 0 : break;
722 : default:
723 : return -EINVAL;
724 : }
725 :
726 0 : if (pci_claim_resource(bridge, i) == 0)
727 : return 0; /* Claimed a smaller window */
728 :
729 0 : return -EINVAL;
730 : }
731 :
732 : /*
733 : * Check whether the bridge supports optional I/O and prefetchable memory
734 : * ranges. If not, the respective base/limit registers must be read-only
735 : * and read as 0.
736 : */
737 0 : static void pci_bridge_check_ranges(struct pci_bus *bus)
738 : {
739 0 : struct pci_dev *bridge = bus->self;
740 : struct resource *b_res;
741 :
742 0 : b_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
743 0 : b_res->flags |= IORESOURCE_MEM;
744 :
745 0 : if (bridge->io_window) {
746 0 : b_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
747 0 : b_res->flags |= IORESOURCE_IO;
748 : }
749 :
750 0 : if (bridge->pref_window) {
751 0 : b_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
752 0 : b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH;
753 0 : if (bridge->pref_64_window) {
754 0 : b_res->flags |= IORESOURCE_MEM_64 |
755 : PCI_PREF_RANGE_TYPE_64;
756 : }
757 : }
758 0 : }
759 :
760 : /*
761 : * Helper function for sizing routines. Assigned resources have non-NULL
762 : * parent resource.
763 : *
764 : * Return first unassigned resource of the correct type. If there is none,
765 : * return first assigned resource of the correct type. If none of the
766 : * above, return NULL.
767 : *
768 : * Returning an assigned resource of the correct type allows the caller to
769 : * distinguish between already assigned and no resource of the correct type.
770 : */
771 0 : static struct resource *find_bus_resource_of_type(struct pci_bus *bus,
772 : unsigned long type_mask,
773 : unsigned long type)
774 : {
775 0 : struct resource *r, *r_assigned = NULL;
776 : int i;
777 :
778 0 : pci_bus_for_each_resource(bus, r, i) {
779 0 : if (r == &ioport_resource || r == &iomem_resource)
780 0 : continue;
781 0 : if (r && (r->flags & type_mask) == type && !r->parent)
782 : return r;
783 0 : if (r && (r->flags & type_mask) == type && !r_assigned)
784 0 : r_assigned = r;
785 : }
786 : return r_assigned;
787 : }
788 :
789 : static resource_size_t calculate_iosize(resource_size_t size,
790 : resource_size_t min_size,
791 : resource_size_t size1,
792 : resource_size_t add_size,
793 : resource_size_t children_add_size,
794 : resource_size_t old_size,
795 : resource_size_t align)
796 : {
797 0 : if (size < min_size)
798 0 : size = min_size;
799 0 : if (old_size == 1)
800 0 : old_size = 0;
801 : /*
802 : * To be fixed in 2.5: we should have sort of HAVE_ISA flag in the
803 : * struct pci_bus.
804 : */
805 : #if defined(CONFIG_ISA) || defined(CONFIG_EISA)
806 : size = (size & 0xff) + ((size & ~0xffUL) << 2);
807 : #endif
808 0 : size = size + size1;
809 0 : if (size < old_size)
810 0 : size = old_size;
811 :
812 0 : size = ALIGN(max(size, add_size) + children_add_size, align);
813 : return size;
814 : }
815 :
816 : static resource_size_t calculate_memsize(resource_size_t size,
817 : resource_size_t min_size,
818 : resource_size_t add_size,
819 : resource_size_t children_add_size,
820 : resource_size_t old_size,
821 : resource_size_t align)
822 : {
823 0 : if (size < min_size)
824 0 : size = min_size;
825 0 : if (old_size == 1)
826 0 : old_size = 0;
827 0 : if (size < old_size)
828 0 : size = old_size;
829 :
830 0 : size = ALIGN(max(size, add_size) + children_add_size, align);
831 : return size;
832 : }
833 :
834 0 : resource_size_t __weak pcibios_window_alignment(struct pci_bus *bus,
835 : unsigned long type)
836 : {
837 0 : return 1;
838 : }
839 :
840 : #define PCI_P2P_DEFAULT_MEM_ALIGN 0x100000 /* 1MiB */
841 : #define PCI_P2P_DEFAULT_IO_ALIGN 0x1000 /* 4KiB */
842 : #define PCI_P2P_DEFAULT_IO_ALIGN_1K 0x400 /* 1KiB */
843 :
844 0 : static resource_size_t window_alignment(struct pci_bus *bus, unsigned long type)
845 : {
846 0 : resource_size_t align = 1, arch_align;
847 :
848 0 : if (type & IORESOURCE_MEM)
849 : align = PCI_P2P_DEFAULT_MEM_ALIGN;
850 0 : else if (type & IORESOURCE_IO) {
851 : /*
852 : * Per spec, I/O windows are 4K-aligned, but some bridges have
853 : * an extension to support 1K alignment.
854 : */
855 0 : if (bus->self && bus->self->io_window_1k)
856 : align = PCI_P2P_DEFAULT_IO_ALIGN_1K;
857 : else
858 0 : align = PCI_P2P_DEFAULT_IO_ALIGN;
859 : }
860 :
861 0 : arch_align = pcibios_window_alignment(bus, type);
862 0 : return max(align, arch_align);
863 : }
864 :
865 : /**
866 : * pbus_size_io() - Size the I/O window of a given bus
867 : *
868 : * @bus: The bus
869 : * @min_size: The minimum I/O window that must be allocated
870 : * @add_size: Additional optional I/O window
871 : * @realloc_head: Track the additional I/O window on this list
872 : *
873 : * Sizing the I/O windows of the PCI-PCI bridge is trivial, since these
874 : * windows have 1K or 4K granularity and the I/O ranges of non-bridge PCI
875 : * devices are limited to 256 bytes. We must be careful with the ISA
876 : * aliasing though.
877 : */
878 0 : static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size,
879 : resource_size_t add_size,
880 : struct list_head *realloc_head)
881 : {
882 : struct pci_dev *dev;
883 0 : struct resource *b_res = find_bus_resource_of_type(bus, IORESOURCE_IO,
884 : IORESOURCE_IO);
885 0 : resource_size_t size = 0, size0 = 0, size1 = 0;
886 0 : resource_size_t children_add_size = 0;
887 : resource_size_t min_align, align;
888 :
889 0 : if (!b_res)
890 : return;
891 :
892 : /* If resource is already assigned, nothing more to do */
893 0 : if (b_res->parent)
894 : return;
895 :
896 0 : min_align = window_alignment(bus, IORESOURCE_IO);
897 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
898 : int i;
899 :
900 0 : for (i = 0; i < PCI_NUM_RESOURCES; i++) {
901 0 : struct resource *r = &dev->resource[i];
902 : unsigned long r_size;
903 :
904 0 : if (r->parent || !(r->flags & IORESOURCE_IO))
905 0 : continue;
906 0 : r_size = resource_size(r);
907 :
908 0 : if (r_size < 0x400)
909 : /* Might be re-aligned for ISA */
910 0 : size += r_size;
911 : else
912 0 : size1 += r_size;
913 :
914 0 : align = pci_resource_alignment(dev, r);
915 0 : if (align > min_align)
916 0 : min_align = align;
917 :
918 0 : if (realloc_head)
919 0 : children_add_size += get_res_add_size(realloc_head, r);
920 : }
921 : }
922 :
923 0 : size0 = calculate_iosize(size, min_size, size1, 0, 0,
924 : resource_size(b_res), min_align);
925 0 : size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
926 0 : calculate_iosize(size, min_size, size1, add_size, children_add_size,
927 : resource_size(b_res), min_align);
928 0 : if (!size0 && !size1) {
929 0 : if (bus->self && (b_res->start || b_res->end))
930 0 : pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
931 : b_res, &bus->busn_res);
932 0 : b_res->flags = 0;
933 0 : return;
934 : }
935 :
936 0 : b_res->start = min_align;
937 0 : b_res->end = b_res->start + size0 - 1;
938 0 : b_res->flags |= IORESOURCE_STARTALIGN;
939 0 : if (bus->self && size1 > size0 && realloc_head) {
940 0 : add_to_list(realloc_head, bus->self, b_res, size1-size0,
941 : min_align);
942 0 : pci_info(bus->self, "bridge window %pR to %pR add_size %llx\n",
943 : b_res, &bus->busn_res,
944 : (unsigned long long) size1 - size0);
945 : }
946 : }
947 :
948 0 : static inline resource_size_t calculate_mem_align(resource_size_t *aligns,
949 : int max_order)
950 : {
951 0 : resource_size_t align = 0;
952 0 : resource_size_t min_align = 0;
953 : int order;
954 :
955 0 : for (order = 0; order <= max_order; order++) {
956 0 : resource_size_t align1 = 1;
957 :
958 0 : align1 <<= (order + 20);
959 :
960 0 : if (!align)
961 : min_align = align1;
962 0 : else if (ALIGN(align + min_align, min_align) < align1)
963 0 : min_align = align1 >> 1;
964 0 : align += aligns[order];
965 : }
966 :
967 0 : return min_align;
968 : }
969 :
970 : /**
971 : * pbus_size_mem() - Size the memory window of a given bus
972 : *
973 : * @bus: The bus
974 : * @mask: Mask the resource flag, then compare it with type
975 : * @type: The type of free resource from bridge
976 : * @type2: Second match type
977 : * @type3: Third match type
978 : * @min_size: The minimum memory window that must be allocated
979 : * @add_size: Additional optional memory window
980 : * @realloc_head: Track the additional memory window on this list
981 : *
982 : * Calculate the size of the bus and minimal alignment which guarantees
983 : * that all child resources fit in this size.
984 : *
985 : * Return -ENOSPC if there's no available bus resource of the desired
986 : * type. Otherwise, set the bus resource start/end to indicate the
987 : * required size, add things to realloc_head (if supplied), and return 0.
988 : */
989 0 : static int pbus_size_mem(struct pci_bus *bus, unsigned long mask,
990 : unsigned long type, unsigned long type2,
991 : unsigned long type3, resource_size_t min_size,
992 : resource_size_t add_size,
993 : struct list_head *realloc_head)
994 : {
995 : struct pci_dev *dev;
996 : resource_size_t min_align, align, size, size0, size1;
997 : resource_size_t aligns[24]; /* Alignments from 1MB to 8TB */
998 : int order, max_order;
999 0 : struct resource *b_res = find_bus_resource_of_type(bus,
1000 : mask | IORESOURCE_PREFETCH, type);
1001 0 : resource_size_t children_add_size = 0;
1002 0 : resource_size_t children_add_align = 0;
1003 0 : resource_size_t add_align = 0;
1004 :
1005 0 : if (!b_res)
1006 : return -ENOSPC;
1007 :
1008 : /* If resource is already assigned, nothing more to do */
1009 0 : if (b_res->parent)
1010 : return 0;
1011 :
1012 0 : memset(aligns, 0, sizeof(aligns));
1013 0 : max_order = 0;
1014 0 : size = 0;
1015 :
1016 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1017 : int i;
1018 :
1019 0 : for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1020 0 : struct resource *r = &dev->resource[i];
1021 : resource_size_t r_size;
1022 :
1023 0 : if (r->parent || (r->flags & IORESOURCE_PCI_FIXED) ||
1024 0 : ((r->flags & mask) != type &&
1025 0 : (r->flags & mask) != type2 &&
1026 : (r->flags & mask) != type3))
1027 0 : continue;
1028 0 : r_size = resource_size(r);
1029 : #ifdef CONFIG_PCI_IOV
1030 : /* Put SRIOV requested res to the optional list */
1031 : if (realloc_head && i >= PCI_IOV_RESOURCES &&
1032 : i <= PCI_IOV_RESOURCE_END) {
1033 : add_align = max(pci_resource_alignment(dev, r), add_align);
1034 : r->end = r->start - 1;
1035 : add_to_list(realloc_head, dev, r, r_size, 0 /* Don't care */);
1036 : children_add_size += r_size;
1037 : continue;
1038 : }
1039 : #endif
1040 : /*
1041 : * aligns[0] is for 1MB (since bridge memory
1042 : * windows are always at least 1MB aligned), so
1043 : * keep "order" from being negative for smaller
1044 : * resources.
1045 : */
1046 0 : align = pci_resource_alignment(dev, r);
1047 0 : order = __ffs(align) - 20;
1048 0 : if (order < 0)
1049 0 : order = 0;
1050 0 : if (order >= ARRAY_SIZE(aligns)) {
1051 0 : pci_warn(dev, "disabling BAR %d: %pR (bad alignment %#llx)\n",
1052 : i, r, (unsigned long long) align);
1053 0 : r->flags = 0;
1054 0 : continue;
1055 : }
1056 0 : size += max(r_size, align);
1057 : /*
1058 : * Exclude ranges with size > align from calculation of
1059 : * the alignment.
1060 : */
1061 0 : if (r_size <= align)
1062 0 : aligns[order] += align;
1063 0 : if (order > max_order)
1064 0 : max_order = order;
1065 :
1066 0 : if (realloc_head) {
1067 0 : children_add_size += get_res_add_size(realloc_head, r);
1068 0 : children_add_align = get_res_add_align(realloc_head, r);
1069 0 : add_align = max(add_align, children_add_align);
1070 : }
1071 : }
1072 : }
1073 :
1074 0 : min_align = calculate_mem_align(aligns, max_order);
1075 0 : min_align = max(min_align, window_alignment(bus, b_res->flags));
1076 0 : size0 = calculate_memsize(size, min_size, 0, 0, resource_size(b_res), min_align);
1077 0 : add_align = max(min_align, add_align);
1078 0 : size1 = (!realloc_head || (realloc_head && !add_size && !children_add_size)) ? size0 :
1079 0 : calculate_memsize(size, min_size, add_size, children_add_size,
1080 : resource_size(b_res), add_align);
1081 0 : if (!size0 && !size1) {
1082 0 : if (bus->self && (b_res->start || b_res->end))
1083 0 : pci_info(bus->self, "disabling bridge window %pR to %pR (unused)\n",
1084 : b_res, &bus->busn_res);
1085 0 : b_res->flags = 0;
1086 0 : return 0;
1087 : }
1088 0 : b_res->start = min_align;
1089 0 : b_res->end = size0 + min_align - 1;
1090 0 : b_res->flags |= IORESOURCE_STARTALIGN;
1091 0 : if (bus->self && size1 > size0 && realloc_head) {
1092 0 : add_to_list(realloc_head, bus->self, b_res, size1-size0, add_align);
1093 0 : pci_info(bus->self, "bridge window %pR to %pR add_size %llx add_align %llx\n",
1094 : b_res, &bus->busn_res,
1095 : (unsigned long long) (size1 - size0),
1096 : (unsigned long long) add_align);
1097 : }
1098 : return 0;
1099 : }
1100 :
1101 0 : unsigned long pci_cardbus_resource_alignment(struct resource *res)
1102 : {
1103 0 : if (res->flags & IORESOURCE_IO)
1104 0 : return pci_cardbus_io_size;
1105 0 : if (res->flags & IORESOURCE_MEM)
1106 0 : return pci_cardbus_mem_size;
1107 : return 0;
1108 : }
1109 :
1110 0 : static void pci_bus_size_cardbus(struct pci_bus *bus,
1111 : struct list_head *realloc_head)
1112 : {
1113 0 : struct pci_dev *bridge = bus->self;
1114 : struct resource *b_res;
1115 0 : resource_size_t b_res_3_size = pci_cardbus_mem_size * 2;
1116 : u16 ctrl;
1117 :
1118 0 : b_res = &bridge->resource[PCI_CB_BRIDGE_IO_0_WINDOW];
1119 0 : if (b_res->parent)
1120 : goto handle_b_res_1;
1121 : /*
1122 : * Reserve some resources for CardBus. We reserve a fixed amount
1123 : * of bus space for CardBus bridges.
1124 : */
1125 0 : b_res->start = pci_cardbus_io_size;
1126 0 : b_res->end = b_res->start + pci_cardbus_io_size - 1;
1127 0 : b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1128 0 : if (realloc_head) {
1129 0 : b_res->end -= pci_cardbus_io_size;
1130 0 : add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1131 : pci_cardbus_io_size);
1132 : }
1133 :
1134 : handle_b_res_1:
1135 0 : b_res = &bridge->resource[PCI_CB_BRIDGE_IO_1_WINDOW];
1136 0 : if (b_res->parent)
1137 : goto handle_b_res_2;
1138 0 : b_res->start = pci_cardbus_io_size;
1139 0 : b_res->end = b_res->start + pci_cardbus_io_size - 1;
1140 0 : b_res->flags |= IORESOURCE_IO | IORESOURCE_STARTALIGN;
1141 0 : if (realloc_head) {
1142 0 : b_res->end -= pci_cardbus_io_size;
1143 0 : add_to_list(realloc_head, bridge, b_res, pci_cardbus_io_size,
1144 : pci_cardbus_io_size);
1145 : }
1146 :
1147 : handle_b_res_2:
1148 : /* MEM1 must not be pref MMIO */
1149 0 : pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1150 0 : if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM1) {
1151 0 : ctrl &= ~PCI_CB_BRIDGE_CTL_PREFETCH_MEM1;
1152 0 : pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1153 0 : pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1154 : }
1155 :
1156 : /* Check whether prefetchable memory is supported by this bridge. */
1157 0 : pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1158 0 : if (!(ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0)) {
1159 0 : ctrl |= PCI_CB_BRIDGE_CTL_PREFETCH_MEM0;
1160 0 : pci_write_config_word(bridge, PCI_CB_BRIDGE_CONTROL, ctrl);
1161 0 : pci_read_config_word(bridge, PCI_CB_BRIDGE_CONTROL, &ctrl);
1162 : }
1163 :
1164 0 : b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_0_WINDOW];
1165 0 : if (b_res->parent)
1166 : goto handle_b_res_3;
1167 : /*
1168 : * If we have prefetchable memory support, allocate two regions.
1169 : * Otherwise, allocate one region of twice the size.
1170 : */
1171 0 : if (ctrl & PCI_CB_BRIDGE_CTL_PREFETCH_MEM0) {
1172 0 : b_res->start = pci_cardbus_mem_size;
1173 0 : b_res->end = b_res->start + pci_cardbus_mem_size - 1;
1174 0 : b_res->flags |= IORESOURCE_MEM | IORESOURCE_PREFETCH |
1175 : IORESOURCE_STARTALIGN;
1176 0 : if (realloc_head) {
1177 0 : b_res->end -= pci_cardbus_mem_size;
1178 0 : add_to_list(realloc_head, bridge, b_res,
1179 : pci_cardbus_mem_size, pci_cardbus_mem_size);
1180 : }
1181 :
1182 : /* Reduce that to half */
1183 0 : b_res_3_size = pci_cardbus_mem_size;
1184 : }
1185 :
1186 : handle_b_res_3:
1187 0 : b_res = &bridge->resource[PCI_CB_BRIDGE_MEM_1_WINDOW];
1188 0 : if (b_res->parent)
1189 : goto handle_done;
1190 0 : b_res->start = pci_cardbus_mem_size;
1191 0 : b_res->end = b_res->start + b_res_3_size - 1;
1192 0 : b_res->flags |= IORESOURCE_MEM | IORESOURCE_STARTALIGN;
1193 0 : if (realloc_head) {
1194 0 : b_res->end -= b_res_3_size;
1195 0 : add_to_list(realloc_head, bridge, b_res, b_res_3_size,
1196 : pci_cardbus_mem_size);
1197 : }
1198 :
1199 : handle_done:
1200 : ;
1201 0 : }
1202 :
1203 0 : void __pci_bus_size_bridges(struct pci_bus *bus, struct list_head *realloc_head)
1204 : {
1205 : struct pci_dev *dev;
1206 0 : unsigned long mask, prefmask, type2 = 0, type3 = 0;
1207 0 : resource_size_t additional_io_size = 0, additional_mmio_size = 0,
1208 0 : additional_mmio_pref_size = 0;
1209 : struct resource *pref;
1210 : struct pci_host_bridge *host;
1211 : int hdr_type, i, ret;
1212 :
1213 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1214 0 : struct pci_bus *b = dev->subordinate;
1215 0 : if (!b)
1216 0 : continue;
1217 :
1218 0 : switch (dev->hdr_type) {
1219 : case PCI_HEADER_TYPE_CARDBUS:
1220 0 : pci_bus_size_cardbus(b, realloc_head);
1221 0 : break;
1222 :
1223 : case PCI_HEADER_TYPE_BRIDGE:
1224 : default:
1225 0 : __pci_bus_size_bridges(b, realloc_head);
1226 0 : break;
1227 : }
1228 : }
1229 :
1230 : /* The root bus? */
1231 0 : if (pci_is_root_bus(bus)) {
1232 0 : host = to_pci_host_bridge(bus->bridge);
1233 0 : if (!host->size_windows)
1234 : return;
1235 0 : pci_bus_for_each_resource(bus, pref, i)
1236 0 : if (pref && (pref->flags & IORESOURCE_PREFETCH))
1237 : break;
1238 : hdr_type = -1; /* Intentionally invalid - not a PCI device. */
1239 : } else {
1240 0 : pref = &bus->self->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1241 0 : hdr_type = bus->self->hdr_type;
1242 : }
1243 :
1244 0 : switch (hdr_type) {
1245 : case PCI_HEADER_TYPE_CARDBUS:
1246 : /* Don't size CardBuses yet */
1247 : break;
1248 :
1249 : case PCI_HEADER_TYPE_BRIDGE:
1250 0 : pci_bridge_check_ranges(bus);
1251 0 : if (bus->self->is_hotplug_bridge) {
1252 0 : additional_io_size = pci_hotplug_io_size;
1253 0 : additional_mmio_size = pci_hotplug_mmio_size;
1254 0 : additional_mmio_pref_size = pci_hotplug_mmio_pref_size;
1255 : }
1256 : fallthrough;
1257 : default:
1258 0 : pbus_size_io(bus, realloc_head ? 0 : additional_io_size,
1259 : additional_io_size, realloc_head);
1260 :
1261 : /*
1262 : * If there's a 64-bit prefetchable MMIO window, compute
1263 : * the size required to put all 64-bit prefetchable
1264 : * resources in it.
1265 : */
1266 0 : mask = IORESOURCE_MEM;
1267 0 : prefmask = IORESOURCE_MEM | IORESOURCE_PREFETCH;
1268 0 : if (pref && (pref->flags & IORESOURCE_MEM_64)) {
1269 0 : prefmask |= IORESOURCE_MEM_64;
1270 0 : ret = pbus_size_mem(bus, prefmask, prefmask,
1271 : prefmask, prefmask,
1272 : realloc_head ? 0 : additional_mmio_pref_size,
1273 : additional_mmio_pref_size, realloc_head);
1274 :
1275 : /*
1276 : * If successful, all non-prefetchable resources
1277 : * and any 32-bit prefetchable resources will go in
1278 : * the non-prefetchable window.
1279 : */
1280 0 : if (ret == 0) {
1281 0 : mask = prefmask;
1282 0 : type2 = prefmask & ~IORESOURCE_MEM_64;
1283 0 : type3 = prefmask & ~IORESOURCE_PREFETCH;
1284 : }
1285 : }
1286 :
1287 : /*
1288 : * If there is no 64-bit prefetchable window, compute the
1289 : * size required to put all prefetchable resources in the
1290 : * 32-bit prefetchable window (if there is one).
1291 : */
1292 0 : if (!type2) {
1293 0 : prefmask &= ~IORESOURCE_MEM_64;
1294 0 : ret = pbus_size_mem(bus, prefmask, prefmask,
1295 : prefmask, prefmask,
1296 : realloc_head ? 0 : additional_mmio_pref_size,
1297 : additional_mmio_pref_size, realloc_head);
1298 :
1299 : /*
1300 : * If successful, only non-prefetchable resources
1301 : * will go in the non-prefetchable window.
1302 : */
1303 0 : if (ret == 0)
1304 : mask = prefmask;
1305 : else
1306 0 : additional_mmio_size += additional_mmio_pref_size;
1307 :
1308 : type2 = type3 = IORESOURCE_MEM;
1309 : }
1310 :
1311 : /*
1312 : * Compute the size required to put everything else in the
1313 : * non-prefetchable window. This includes:
1314 : *
1315 : * - all non-prefetchable resources
1316 : * - 32-bit prefetchable resources if there's a 64-bit
1317 : * prefetchable window or no prefetchable window at all
1318 : * - 64-bit prefetchable resources if there's no prefetchable
1319 : * window at all
1320 : *
1321 : * Note that the strategy in __pci_assign_resource() must match
1322 : * that used here. Specifically, we cannot put a 32-bit
1323 : * prefetchable resource in a 64-bit prefetchable window.
1324 : */
1325 0 : pbus_size_mem(bus, mask, IORESOURCE_MEM, type2, type3,
1326 : realloc_head ? 0 : additional_mmio_size,
1327 : additional_mmio_size, realloc_head);
1328 0 : break;
1329 : }
1330 : }
1331 :
1332 0 : void pci_bus_size_bridges(struct pci_bus *bus)
1333 : {
1334 0 : __pci_bus_size_bridges(bus, NULL);
1335 0 : }
1336 : EXPORT_SYMBOL(pci_bus_size_bridges);
1337 :
1338 0 : static void assign_fixed_resource_on_bus(struct pci_bus *b, struct resource *r)
1339 : {
1340 : int i;
1341 : struct resource *parent_r;
1342 0 : unsigned long mask = IORESOURCE_IO | IORESOURCE_MEM |
1343 : IORESOURCE_PREFETCH;
1344 :
1345 0 : pci_bus_for_each_resource(b, parent_r, i) {
1346 0 : if (!parent_r)
1347 0 : continue;
1348 :
1349 0 : if ((r->flags & mask) == (parent_r->flags & mask) &&
1350 0 : resource_contains(parent_r, r))
1351 0 : request_resource(parent_r, r);
1352 : }
1353 0 : }
1354 :
1355 : /*
1356 : * Try to assign any resources marked as IORESOURCE_PCI_FIXED, as they are
1357 : * skipped by pbus_assign_resources_sorted().
1358 : */
1359 0 : static void pdev_assign_fixed_resources(struct pci_dev *dev)
1360 : {
1361 : int i;
1362 :
1363 0 : for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1364 : struct pci_bus *b;
1365 0 : struct resource *r = &dev->resource[i];
1366 :
1367 0 : if (r->parent || !(r->flags & IORESOURCE_PCI_FIXED) ||
1368 0 : !(r->flags & (IORESOURCE_IO | IORESOURCE_MEM)))
1369 0 : continue;
1370 :
1371 0 : b = dev->bus;
1372 0 : while (b && !r->parent) {
1373 0 : assign_fixed_resource_on_bus(b, r);
1374 0 : b = b->parent;
1375 : }
1376 : }
1377 0 : }
1378 :
1379 0 : void __pci_bus_assign_resources(const struct pci_bus *bus,
1380 : struct list_head *realloc_head,
1381 : struct list_head *fail_head)
1382 : {
1383 : struct pci_bus *b;
1384 : struct pci_dev *dev;
1385 :
1386 0 : pbus_assign_resources_sorted(bus, realloc_head, fail_head);
1387 :
1388 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1389 0 : pdev_assign_fixed_resources(dev);
1390 :
1391 0 : b = dev->subordinate;
1392 0 : if (!b)
1393 0 : continue;
1394 :
1395 0 : __pci_bus_assign_resources(b, realloc_head, fail_head);
1396 :
1397 0 : switch (dev->hdr_type) {
1398 : case PCI_HEADER_TYPE_BRIDGE:
1399 0 : if (!pci_is_enabled(dev))
1400 : pci_setup_bridge(b);
1401 : break;
1402 :
1403 : case PCI_HEADER_TYPE_CARDBUS:
1404 0 : pci_setup_cardbus(b);
1405 0 : break;
1406 :
1407 : default:
1408 0 : pci_info(dev, "not setting up bridge for bus %04x:%02x\n",
1409 : pci_domain_nr(b), b->number);
1410 0 : break;
1411 : }
1412 : }
1413 0 : }
1414 :
1415 0 : void pci_bus_assign_resources(const struct pci_bus *bus)
1416 : {
1417 0 : __pci_bus_assign_resources(bus, NULL, NULL);
1418 0 : }
1419 : EXPORT_SYMBOL(pci_bus_assign_resources);
1420 :
1421 0 : static void pci_claim_device_resources(struct pci_dev *dev)
1422 : {
1423 : int i;
1424 :
1425 0 : for (i = 0; i < PCI_BRIDGE_RESOURCES; i++) {
1426 0 : struct resource *r = &dev->resource[i];
1427 :
1428 0 : if (!r->flags || r->parent)
1429 0 : continue;
1430 :
1431 0 : pci_claim_resource(dev, i);
1432 : }
1433 0 : }
1434 :
1435 0 : static void pci_claim_bridge_resources(struct pci_dev *dev)
1436 : {
1437 : int i;
1438 :
1439 0 : for (i = PCI_BRIDGE_RESOURCES; i < PCI_NUM_RESOURCES; i++) {
1440 0 : struct resource *r = &dev->resource[i];
1441 :
1442 0 : if (!r->flags || r->parent)
1443 0 : continue;
1444 :
1445 0 : pci_claim_bridge_resource(dev, i);
1446 : }
1447 0 : }
1448 :
1449 0 : static void pci_bus_allocate_dev_resources(struct pci_bus *b)
1450 : {
1451 : struct pci_dev *dev;
1452 : struct pci_bus *child;
1453 :
1454 0 : list_for_each_entry(dev, &b->devices, bus_list) {
1455 0 : pci_claim_device_resources(dev);
1456 :
1457 0 : child = dev->subordinate;
1458 0 : if (child)
1459 0 : pci_bus_allocate_dev_resources(child);
1460 : }
1461 0 : }
1462 :
1463 0 : static void pci_bus_allocate_resources(struct pci_bus *b)
1464 : {
1465 : struct pci_bus *child;
1466 :
1467 : /*
1468 : * Carry out a depth-first search on the PCI bus tree to allocate
1469 : * bridge apertures. Read the programmed bridge bases and
1470 : * recursively claim the respective bridge resources.
1471 : */
1472 0 : if (b->self) {
1473 0 : pci_read_bridge_bases(b);
1474 0 : pci_claim_bridge_resources(b->self);
1475 : }
1476 :
1477 0 : list_for_each_entry(child, &b->children, node)
1478 0 : pci_bus_allocate_resources(child);
1479 0 : }
1480 :
1481 0 : void pci_bus_claim_resources(struct pci_bus *b)
1482 : {
1483 0 : pci_bus_allocate_resources(b);
1484 0 : pci_bus_allocate_dev_resources(b);
1485 0 : }
1486 : EXPORT_SYMBOL(pci_bus_claim_resources);
1487 :
1488 0 : static void __pci_bridge_assign_resources(const struct pci_dev *bridge,
1489 : struct list_head *add_head,
1490 : struct list_head *fail_head)
1491 : {
1492 : struct pci_bus *b;
1493 :
1494 0 : pdev_assign_resources_sorted((struct pci_dev *)bridge,
1495 : add_head, fail_head);
1496 :
1497 0 : b = bridge->subordinate;
1498 0 : if (!b)
1499 : return;
1500 :
1501 0 : __pci_bus_assign_resources(b, add_head, fail_head);
1502 :
1503 0 : switch (bridge->class >> 8) {
1504 : case PCI_CLASS_BRIDGE_PCI:
1505 : pci_setup_bridge(b);
1506 : break;
1507 :
1508 : case PCI_CLASS_BRIDGE_CARDBUS:
1509 0 : pci_setup_cardbus(b);
1510 0 : break;
1511 :
1512 : default:
1513 0 : pci_info(bridge, "not setting up bridge for bus %04x:%02x\n",
1514 : pci_domain_nr(b), b->number);
1515 0 : break;
1516 : }
1517 : }
1518 :
1519 : #define PCI_RES_TYPE_MASK \
1520 : (IORESOURCE_IO | IORESOURCE_MEM | IORESOURCE_PREFETCH |\
1521 : IORESOURCE_MEM_64)
1522 :
1523 0 : static void pci_bridge_release_resources(struct pci_bus *bus,
1524 : unsigned long type)
1525 : {
1526 0 : struct pci_dev *dev = bus->self;
1527 : struct resource *r;
1528 : unsigned int old_flags;
1529 : struct resource *b_res;
1530 0 : int idx = 1;
1531 :
1532 0 : b_res = &dev->resource[PCI_BRIDGE_RESOURCES];
1533 :
1534 : /*
1535 : * 1. If IO port assignment fails, release bridge IO port.
1536 : * 2. If non pref MMIO assignment fails, release bridge nonpref MMIO.
1537 : * 3. If 64bit pref MMIO assignment fails, and bridge pref is 64bit,
1538 : * release bridge pref MMIO.
1539 : * 4. If pref MMIO assignment fails, and bridge pref is 32bit,
1540 : * release bridge pref MMIO.
1541 : * 5. If pref MMIO assignment fails, and bridge pref is not
1542 : * assigned, release bridge nonpref MMIO.
1543 : */
1544 0 : if (type & IORESOURCE_IO)
1545 : idx = 0;
1546 0 : else if (!(type & IORESOURCE_PREFETCH))
1547 : idx = 1;
1548 0 : else if ((type & IORESOURCE_MEM_64) &&
1549 0 : (b_res[2].flags & IORESOURCE_MEM_64))
1550 : idx = 2;
1551 0 : else if (!(b_res[2].flags & IORESOURCE_MEM_64) &&
1552 : (b_res[2].flags & IORESOURCE_PREFETCH))
1553 : idx = 2;
1554 : else
1555 0 : idx = 1;
1556 :
1557 0 : r = &b_res[idx];
1558 :
1559 0 : if (!r->parent)
1560 : return;
1561 :
1562 : /* If there are children, release them all */
1563 0 : release_child_resources(r);
1564 0 : if (!release_resource(r)) {
1565 0 : type = old_flags = r->flags & PCI_RES_TYPE_MASK;
1566 0 : pci_info(dev, "resource %d %pR released\n",
1567 : PCI_BRIDGE_RESOURCES + idx, r);
1568 : /* Keep the old size */
1569 0 : r->end = resource_size(r) - 1;
1570 0 : r->start = 0;
1571 0 : r->flags = 0;
1572 :
1573 : /* Avoiding touch the one without PREF */
1574 0 : if (type & IORESOURCE_PREFETCH)
1575 0 : type = IORESOURCE_PREFETCH;
1576 0 : __pci_setup_bridge(bus, type);
1577 : /* For next child res under same bridge */
1578 0 : r->flags = old_flags;
1579 : }
1580 : }
1581 :
1582 : enum release_type {
1583 : leaf_only,
1584 : whole_subtree,
1585 : };
1586 :
1587 : /*
1588 : * Try to release PCI bridge resources from leaf bridge, so we can allocate
1589 : * a larger window later.
1590 : */
1591 0 : static void pci_bus_release_bridge_resources(struct pci_bus *bus,
1592 : unsigned long type,
1593 : enum release_type rel_type)
1594 : {
1595 : struct pci_dev *dev;
1596 0 : bool is_leaf_bridge = true;
1597 :
1598 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1599 0 : struct pci_bus *b = dev->subordinate;
1600 0 : if (!b)
1601 0 : continue;
1602 :
1603 0 : is_leaf_bridge = false;
1604 :
1605 0 : if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1606 0 : continue;
1607 :
1608 0 : if (rel_type == whole_subtree)
1609 0 : pci_bus_release_bridge_resources(b, type,
1610 : whole_subtree);
1611 : }
1612 :
1613 0 : if (pci_is_root_bus(bus))
1614 : return;
1615 :
1616 0 : if ((bus->self->class >> 8) != PCI_CLASS_BRIDGE_PCI)
1617 : return;
1618 :
1619 0 : if ((rel_type == whole_subtree) || is_leaf_bridge)
1620 0 : pci_bridge_release_resources(bus, type);
1621 : }
1622 :
1623 0 : static void pci_bus_dump_res(struct pci_bus *bus)
1624 : {
1625 : struct resource *res;
1626 : int i;
1627 :
1628 0 : pci_bus_for_each_resource(bus, res, i) {
1629 0 : if (!res || !res->end || !res->flags)
1630 0 : continue;
1631 :
1632 0 : dev_info(&bus->dev, "resource %d %pR\n", i, res);
1633 : }
1634 0 : }
1635 :
1636 0 : static void pci_bus_dump_resources(struct pci_bus *bus)
1637 : {
1638 : struct pci_bus *b;
1639 : struct pci_dev *dev;
1640 :
1641 :
1642 0 : pci_bus_dump_res(bus);
1643 :
1644 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1645 0 : b = dev->subordinate;
1646 0 : if (!b)
1647 0 : continue;
1648 :
1649 0 : pci_bus_dump_resources(b);
1650 : }
1651 0 : }
1652 :
1653 0 : static int pci_bus_get_depth(struct pci_bus *bus)
1654 : {
1655 0 : int depth = 0;
1656 : struct pci_bus *child_bus;
1657 :
1658 0 : list_for_each_entry(child_bus, &bus->children, node) {
1659 : int ret;
1660 :
1661 0 : ret = pci_bus_get_depth(child_bus);
1662 0 : if (ret + 1 > depth)
1663 0 : depth = ret + 1;
1664 : }
1665 :
1666 0 : return depth;
1667 : }
1668 :
1669 : /*
1670 : * -1: undefined, will auto detect later
1671 : * 0: disabled by user
1672 : * 1: disabled by auto detect
1673 : * 2: enabled by user
1674 : * 3: enabled by auto detect
1675 : */
1676 : enum enable_type {
1677 : undefined = -1,
1678 : user_disabled,
1679 : auto_disabled,
1680 : user_enabled,
1681 : auto_enabled,
1682 : };
1683 :
1684 : static enum enable_type pci_realloc_enable = undefined;
1685 0 : void __init pci_realloc_get_opt(char *str)
1686 : {
1687 0 : if (!strncmp(str, "off", 3))
1688 0 : pci_realloc_enable = user_disabled;
1689 0 : else if (!strncmp(str, "on", 2))
1690 0 : pci_realloc_enable = user_enabled;
1691 0 : }
1692 : static bool pci_realloc_enabled(enum enable_type enable)
1693 : {
1694 : return enable >= user_enabled;
1695 : }
1696 :
1697 : #if defined(CONFIG_PCI_IOV) && defined(CONFIG_PCI_REALLOC_ENABLE_AUTO)
1698 : static int iov_resources_unassigned(struct pci_dev *dev, void *data)
1699 : {
1700 : int i;
1701 : bool *unassigned = data;
1702 :
1703 : for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
1704 : struct resource *r = &dev->resource[i + PCI_IOV_RESOURCES];
1705 : struct pci_bus_region region;
1706 :
1707 : /* Not assigned or rejected by kernel? */
1708 : if (!r->flags)
1709 : continue;
1710 :
1711 : pcibios_resource_to_bus(dev->bus, ®ion, r);
1712 : if (!region.start) {
1713 : *unassigned = true;
1714 : return 1; /* Return early from pci_walk_bus() */
1715 : }
1716 : }
1717 :
1718 : return 0;
1719 : }
1720 :
1721 : static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1722 : enum enable_type enable_local)
1723 : {
1724 : bool unassigned = false;
1725 : struct pci_host_bridge *host;
1726 :
1727 : if (enable_local != undefined)
1728 : return enable_local;
1729 :
1730 : host = pci_find_host_bridge(bus);
1731 : if (host->preserve_config)
1732 : return auto_disabled;
1733 :
1734 : pci_walk_bus(bus, iov_resources_unassigned, &unassigned);
1735 : if (unassigned)
1736 : return auto_enabled;
1737 :
1738 : return enable_local;
1739 : }
1740 : #else
1741 : static enum enable_type pci_realloc_detect(struct pci_bus *bus,
1742 : enum enable_type enable_local)
1743 : {
1744 : return enable_local;
1745 : }
1746 : #endif
1747 :
1748 0 : static void adjust_bridge_window(struct pci_dev *bridge, struct resource *res,
1749 : struct list_head *add_list,
1750 : resource_size_t new_size)
1751 : {
1752 0 : resource_size_t add_size, size = resource_size(res);
1753 :
1754 0 : if (res->parent)
1755 : return;
1756 :
1757 0 : if (!new_size)
1758 : return;
1759 :
1760 0 : if (new_size > size) {
1761 : add_size = new_size - size;
1762 : pci_dbg(bridge, "bridge window %pR extended by %pa\n", res,
1763 : &add_size);
1764 0 : } else if (new_size < size) {
1765 : add_size = size - new_size;
1766 : pci_dbg(bridge, "bridge window %pR shrunken by %pa\n", res,
1767 : &add_size);
1768 : } else {
1769 : return;
1770 : }
1771 :
1772 0 : res->end = res->start + new_size - 1;
1773 :
1774 : /* If the resource is part of the add_list, remove it now */
1775 0 : if (add_list)
1776 0 : remove_from_list(add_list, res);
1777 : }
1778 :
1779 0 : static void remove_dev_resource(struct resource *avail, struct pci_dev *dev,
1780 : struct resource *res)
1781 : {
1782 : resource_size_t size, align, tmp;
1783 :
1784 0 : size = resource_size(res);
1785 0 : if (!size)
1786 : return;
1787 :
1788 0 : align = pci_resource_alignment(dev, res);
1789 0 : align = align ? ALIGN(avail->start, align) - avail->start : 0;
1790 0 : tmp = align + size;
1791 0 : avail->start = min(avail->start + tmp, avail->end + 1);
1792 : }
1793 :
1794 0 : static void remove_dev_resources(struct pci_dev *dev, struct resource *io,
1795 : struct resource *mmio,
1796 : struct resource *mmio_pref)
1797 : {
1798 : int i;
1799 :
1800 0 : for (i = 0; i < PCI_NUM_RESOURCES; i++) {
1801 0 : struct resource *res = &dev->resource[i];
1802 :
1803 0 : if (resource_type(res) == IORESOURCE_IO) {
1804 0 : remove_dev_resource(io, dev, res);
1805 0 : } else if (resource_type(res) == IORESOURCE_MEM) {
1806 :
1807 : /*
1808 : * Make sure prefetchable memory is reduced from
1809 : * the correct resource. Specifically we put 32-bit
1810 : * prefetchable memory in non-prefetchable window
1811 : * if there is an 64-bit pretchable window.
1812 : *
1813 : * See comments in __pci_bus_size_bridges() for
1814 : * more information.
1815 : */
1816 0 : if ((res->flags & IORESOURCE_PREFETCH) &&
1817 0 : ((res->flags & IORESOURCE_MEM_64) ==
1818 0 : (mmio_pref->flags & IORESOURCE_MEM_64)))
1819 0 : remove_dev_resource(mmio_pref, dev, res);
1820 : else
1821 0 : remove_dev_resource(mmio, dev, res);
1822 : }
1823 : }
1824 0 : }
1825 :
1826 : /*
1827 : * io, mmio and mmio_pref contain the total amount of bridge window space
1828 : * available. This includes the minimal space needed to cover all the
1829 : * existing devices on the bus and the possible extra space that can be
1830 : * shared with the bridges.
1831 : */
1832 0 : static void pci_bus_distribute_available_resources(struct pci_bus *bus,
1833 : struct list_head *add_list,
1834 : struct resource io,
1835 : struct resource mmio,
1836 : struct resource mmio_pref)
1837 : {
1838 0 : unsigned int normal_bridges = 0, hotplug_bridges = 0;
1839 : struct resource *io_res, *mmio_res, *mmio_pref_res;
1840 0 : struct pci_dev *dev, *bridge = bus->self;
1841 : resource_size_t io_per_b, mmio_per_b, mmio_pref_per_b, align;
1842 :
1843 0 : io_res = &bridge->resource[PCI_BRIDGE_IO_WINDOW];
1844 0 : mmio_res = &bridge->resource[PCI_BRIDGE_MEM_WINDOW];
1845 0 : mmio_pref_res = &bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1846 :
1847 : /*
1848 : * The alignment of this bridge is yet to be considered, hence it must
1849 : * be done now before extending its bridge window.
1850 : */
1851 0 : align = pci_resource_alignment(bridge, io_res);
1852 0 : if (!io_res->parent && align)
1853 0 : io.start = min(ALIGN(io.start, align), io.end + 1);
1854 :
1855 0 : align = pci_resource_alignment(bridge, mmio_res);
1856 0 : if (!mmio_res->parent && align)
1857 0 : mmio.start = min(ALIGN(mmio.start, align), mmio.end + 1);
1858 :
1859 0 : align = pci_resource_alignment(bridge, mmio_pref_res);
1860 0 : if (!mmio_pref_res->parent && align)
1861 0 : mmio_pref.start = min(ALIGN(mmio_pref.start, align),
1862 : mmio_pref.end + 1);
1863 :
1864 : /*
1865 : * Now that we have adjusted for alignment, update the bridge window
1866 : * resources to fill as much remaining resource space as possible.
1867 : */
1868 0 : adjust_bridge_window(bridge, io_res, add_list, resource_size(&io));
1869 0 : adjust_bridge_window(bridge, mmio_res, add_list, resource_size(&mmio));
1870 0 : adjust_bridge_window(bridge, mmio_pref_res, add_list,
1871 : resource_size(&mmio_pref));
1872 :
1873 : /*
1874 : * Calculate how many hotplug bridges and normal bridges there
1875 : * are on this bus. We will distribute the additional available
1876 : * resources between hotplug bridges.
1877 : */
1878 0 : for_each_pci_bridge(dev, bus) {
1879 0 : if (dev->is_hotplug_bridge)
1880 0 : hotplug_bridges++;
1881 : else
1882 0 : normal_bridges++;
1883 : }
1884 :
1885 0 : if (!(hotplug_bridges + normal_bridges))
1886 : return;
1887 :
1888 : /*
1889 : * Calculate the amount of space we can forward from "bus" to any
1890 : * downstream buses, i.e., the space left over after assigning the
1891 : * BARs and windows on "bus".
1892 : */
1893 0 : list_for_each_entry(dev, &bus->devices, bus_list) {
1894 0 : if (!dev->is_virtfn)
1895 0 : remove_dev_resources(dev, &io, &mmio, &mmio_pref);
1896 : }
1897 :
1898 : /*
1899 : * If there is at least one hotplug bridge on this bus it gets all
1900 : * the extra resource space that was left after the reductions
1901 : * above.
1902 : *
1903 : * If there are no hotplug bridges the extra resource space is
1904 : * split between non-hotplug bridges. This is to allow possible
1905 : * hotplug bridges below them to get the extra space as well.
1906 : */
1907 0 : if (hotplug_bridges) {
1908 0 : io_per_b = div64_ul(resource_size(&io), hotplug_bridges);
1909 0 : mmio_per_b = div64_ul(resource_size(&mmio), hotplug_bridges);
1910 0 : mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
1911 : hotplug_bridges);
1912 : } else {
1913 0 : io_per_b = div64_ul(resource_size(&io), normal_bridges);
1914 0 : mmio_per_b = div64_ul(resource_size(&mmio), normal_bridges);
1915 0 : mmio_pref_per_b = div64_ul(resource_size(&mmio_pref),
1916 : normal_bridges);
1917 : }
1918 :
1919 0 : for_each_pci_bridge(dev, bus) {
1920 : struct resource *res;
1921 : struct pci_bus *b;
1922 :
1923 0 : b = dev->subordinate;
1924 0 : if (!b)
1925 0 : continue;
1926 0 : if (hotplug_bridges && !dev->is_hotplug_bridge)
1927 0 : continue;
1928 :
1929 0 : res = &dev->resource[PCI_BRIDGE_IO_WINDOW];
1930 :
1931 : /*
1932 : * Make sure the split resource space is properly aligned
1933 : * for bridge windows (align it down to avoid going above
1934 : * what is available).
1935 : */
1936 0 : align = pci_resource_alignment(dev, res);
1937 0 : io.end = align ? io.start + ALIGN_DOWN(io_per_b, align) - 1
1938 0 : : io.start + io_per_b - 1;
1939 :
1940 : /*
1941 : * The x_per_b holds the extra resource space that can be
1942 : * added for each bridge but there is the minimal already
1943 : * reserved as well so adjust x.start down accordingly to
1944 : * cover the whole space.
1945 : */
1946 0 : io.start -= resource_size(res);
1947 :
1948 0 : res = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
1949 0 : align = pci_resource_alignment(dev, res);
1950 0 : mmio.end = align ? mmio.start + ALIGN_DOWN(mmio_per_b, align) - 1
1951 0 : : mmio.start + mmio_per_b - 1;
1952 0 : mmio.start -= resource_size(res);
1953 :
1954 0 : res = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1955 0 : align = pci_resource_alignment(dev, res);
1956 0 : mmio_pref.end = align ? mmio_pref.start +
1957 0 : ALIGN_DOWN(mmio_pref_per_b, align) - 1
1958 0 : : mmio_pref.start + mmio_pref_per_b - 1;
1959 0 : mmio_pref.start -= resource_size(res);
1960 :
1961 0 : pci_bus_distribute_available_resources(b, add_list, io, mmio,
1962 : mmio_pref);
1963 :
1964 0 : io.start += io.end + 1;
1965 0 : mmio.start += mmio.end + 1;
1966 0 : mmio_pref.start += mmio_pref.end + 1;
1967 : }
1968 : }
1969 :
1970 0 : static void pci_bridge_distribute_available_resources(struct pci_dev *bridge,
1971 : struct list_head *add_list)
1972 : {
1973 : struct resource available_io, available_mmio, available_mmio_pref;
1974 :
1975 0 : if (!bridge->is_hotplug_bridge)
1976 0 : return;
1977 :
1978 : pci_dbg(bridge, "distributing available resources\n");
1979 :
1980 : /* Take the initial extra resources from the hotplug port */
1981 0 : available_io = bridge->resource[PCI_BRIDGE_IO_WINDOW];
1982 0 : available_mmio = bridge->resource[PCI_BRIDGE_MEM_WINDOW];
1983 0 : available_mmio_pref = bridge->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
1984 :
1985 0 : pci_bus_distribute_available_resources(bridge->subordinate,
1986 : add_list, available_io,
1987 : available_mmio,
1988 : available_mmio_pref);
1989 : }
1990 :
1991 : static bool pci_bridge_resources_not_assigned(struct pci_dev *dev)
1992 : {
1993 : const struct resource *r;
1994 :
1995 : /*
1996 : * If the child device's resources are not yet assigned it means we
1997 : * are configuring them (not the boot firmware), so we should be
1998 : * able to extend the upstream bridge resources in the same way we
1999 : * do with the normal hotplug case.
2000 : */
2001 0 : r = &dev->resource[PCI_BRIDGE_IO_WINDOW];
2002 0 : if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2003 : return false;
2004 0 : r = &dev->resource[PCI_BRIDGE_MEM_WINDOW];
2005 0 : if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2006 : return false;
2007 0 : r = &dev->resource[PCI_BRIDGE_PREF_MEM_WINDOW];
2008 0 : if (r->flags && !(r->flags & IORESOURCE_STARTALIGN))
2009 : return false;
2010 :
2011 : return true;
2012 : }
2013 :
2014 : static void
2015 0 : pci_root_bus_distribute_available_resources(struct pci_bus *bus,
2016 : struct list_head *add_list)
2017 : {
2018 0 : struct pci_dev *dev, *bridge = bus->self;
2019 :
2020 0 : for_each_pci_bridge(dev, bus) {
2021 : struct pci_bus *b;
2022 :
2023 0 : b = dev->subordinate;
2024 0 : if (!b)
2025 0 : continue;
2026 :
2027 : /*
2028 : * Need to check "bridge" here too because it is NULL
2029 : * in case of root bus.
2030 : */
2031 0 : if (bridge && pci_bridge_resources_not_assigned(dev))
2032 0 : pci_bridge_distribute_available_resources(bridge,
2033 : add_list);
2034 : else
2035 0 : pci_root_bus_distribute_available_resources(b, add_list);
2036 : }
2037 0 : }
2038 :
2039 : /*
2040 : * First try will not touch PCI bridge res.
2041 : * Second and later try will clear small leaf bridge res.
2042 : * Will stop till to the max depth if can not find good one.
2043 : */
2044 0 : void pci_assign_unassigned_root_bus_resources(struct pci_bus *bus)
2045 : {
2046 0 : LIST_HEAD(realloc_head);
2047 : /* List of resources that want additional resources */
2048 0 : struct list_head *add_list = NULL;
2049 0 : int tried_times = 0;
2050 0 : enum release_type rel_type = leaf_only;
2051 0 : LIST_HEAD(fail_head);
2052 : struct pci_dev_resource *fail_res;
2053 0 : int pci_try_num = 1;
2054 : enum enable_type enable_local;
2055 :
2056 : /* Don't realloc if asked to do so */
2057 0 : enable_local = pci_realloc_detect(bus, pci_realloc_enable);
2058 0 : if (pci_realloc_enabled(enable_local)) {
2059 0 : int max_depth = pci_bus_get_depth(bus);
2060 :
2061 0 : pci_try_num = max_depth + 1;
2062 0 : dev_info(&bus->dev, "max bus depth: %d pci_try_num: %d\n",
2063 : max_depth, pci_try_num);
2064 : }
2065 :
2066 : again:
2067 : /*
2068 : * Last try will use add_list, otherwise will try good to have as must
2069 : * have, so can realloc parent bridge resource
2070 : */
2071 0 : if (tried_times + 1 == pci_try_num)
2072 0 : add_list = &realloc_head;
2073 : /*
2074 : * Depth first, calculate sizes and alignments of all subordinate buses.
2075 : */
2076 0 : __pci_bus_size_bridges(bus, add_list);
2077 :
2078 0 : pci_root_bus_distribute_available_resources(bus, add_list);
2079 :
2080 : /* Depth last, allocate resources and update the hardware. */
2081 0 : __pci_bus_assign_resources(bus, add_list, &fail_head);
2082 0 : if (add_list)
2083 0 : BUG_ON(!list_empty(add_list));
2084 0 : tried_times++;
2085 :
2086 : /* Any device complain? */
2087 0 : if (list_empty(&fail_head))
2088 : goto dump;
2089 :
2090 0 : if (tried_times >= pci_try_num) {
2091 0 : if (enable_local == undefined)
2092 0 : dev_info(&bus->dev, "Some PCI device resources are unassigned, try booting with pci=realloc\n");
2093 0 : else if (enable_local == auto_enabled)
2094 0 : dev_info(&bus->dev, "Automatically enabled pci realloc, if you have problem, try booting with pci=realloc=off\n");
2095 :
2096 0 : free_list(&fail_head);
2097 0 : goto dump;
2098 : }
2099 :
2100 0 : dev_info(&bus->dev, "No. %d try to assign unassigned res\n",
2101 : tried_times + 1);
2102 :
2103 : /* Third times and later will not check if it is leaf */
2104 0 : if ((tried_times + 1) > 2)
2105 0 : rel_type = whole_subtree;
2106 :
2107 : /*
2108 : * Try to release leaf bridge's resources that doesn't fit resource of
2109 : * child device under that bridge.
2110 : */
2111 0 : list_for_each_entry(fail_res, &fail_head, list)
2112 0 : pci_bus_release_bridge_resources(fail_res->dev->bus,
2113 0 : fail_res->flags & PCI_RES_TYPE_MASK,
2114 : rel_type);
2115 :
2116 : /* Restore size and flags */
2117 0 : list_for_each_entry(fail_res, &fail_head, list) {
2118 0 : struct resource *res = fail_res->res;
2119 : int idx;
2120 :
2121 0 : res->start = fail_res->start;
2122 0 : res->end = fail_res->end;
2123 0 : res->flags = fail_res->flags;
2124 :
2125 0 : if (pci_is_bridge(fail_res->dev)) {
2126 0 : idx = res - &fail_res->dev->resource[0];
2127 0 : if (idx >= PCI_BRIDGE_RESOURCES &&
2128 : idx <= PCI_BRIDGE_RESOURCE_END)
2129 0 : res->flags = 0;
2130 : }
2131 : }
2132 0 : free_list(&fail_head);
2133 :
2134 0 : goto again;
2135 :
2136 : dump:
2137 : /* Dump the resource on buses */
2138 0 : pci_bus_dump_resources(bus);
2139 0 : }
2140 :
2141 0 : void __init pci_assign_unassigned_resources(void)
2142 : {
2143 : struct pci_bus *root_bus;
2144 :
2145 0 : list_for_each_entry(root_bus, &pci_root_buses, node) {
2146 0 : pci_assign_unassigned_root_bus_resources(root_bus);
2147 :
2148 : /* Make sure the root bridge has a companion ACPI device */
2149 : if (ACPI_HANDLE(root_bus->bridge))
2150 : acpi_ioapic_add(ACPI_HANDLE(root_bus->bridge));
2151 : }
2152 0 : }
2153 :
2154 0 : void pci_assign_unassigned_bridge_resources(struct pci_dev *bridge)
2155 : {
2156 0 : struct pci_bus *parent = bridge->subordinate;
2157 : /* List of resources that want additional resources */
2158 0 : LIST_HEAD(add_list);
2159 :
2160 0 : int tried_times = 0;
2161 0 : LIST_HEAD(fail_head);
2162 : struct pci_dev_resource *fail_res;
2163 : int retval;
2164 :
2165 : again:
2166 0 : __pci_bus_size_bridges(parent, &add_list);
2167 :
2168 : /*
2169 : * Distribute remaining resources (if any) equally between hotplug
2170 : * bridges below. This makes it possible to extend the hierarchy
2171 : * later without running out of resources.
2172 : */
2173 0 : pci_bridge_distribute_available_resources(bridge, &add_list);
2174 :
2175 0 : __pci_bridge_assign_resources(bridge, &add_list, &fail_head);
2176 0 : BUG_ON(!list_empty(&add_list));
2177 0 : tried_times++;
2178 :
2179 0 : if (list_empty(&fail_head))
2180 : goto enable_all;
2181 :
2182 0 : if (tried_times >= 2) {
2183 : /* Still fail, don't need to try more */
2184 0 : free_list(&fail_head);
2185 0 : goto enable_all;
2186 : }
2187 :
2188 0 : printk(KERN_DEBUG "PCI: No. %d try to assign unassigned res\n",
2189 : tried_times + 1);
2190 :
2191 : /*
2192 : * Try to release leaf bridge's resources that aren't big enough
2193 : * to contain child device resources.
2194 : */
2195 0 : list_for_each_entry(fail_res, &fail_head, list)
2196 0 : pci_bus_release_bridge_resources(fail_res->dev->bus,
2197 0 : fail_res->flags & PCI_RES_TYPE_MASK,
2198 : whole_subtree);
2199 :
2200 : /* Restore size and flags */
2201 0 : list_for_each_entry(fail_res, &fail_head, list) {
2202 0 : struct resource *res = fail_res->res;
2203 : int idx;
2204 :
2205 0 : res->start = fail_res->start;
2206 0 : res->end = fail_res->end;
2207 0 : res->flags = fail_res->flags;
2208 :
2209 0 : if (pci_is_bridge(fail_res->dev)) {
2210 0 : idx = res - &fail_res->dev->resource[0];
2211 0 : if (idx >= PCI_BRIDGE_RESOURCES &&
2212 : idx <= PCI_BRIDGE_RESOURCE_END)
2213 0 : res->flags = 0;
2214 : }
2215 : }
2216 0 : free_list(&fail_head);
2217 :
2218 0 : goto again;
2219 :
2220 : enable_all:
2221 0 : retval = pci_reenable_device(bridge);
2222 0 : if (retval)
2223 0 : pci_err(bridge, "Error reenabling bridge (%d)\n", retval);
2224 0 : pci_set_master(bridge);
2225 0 : }
2226 : EXPORT_SYMBOL_GPL(pci_assign_unassigned_bridge_resources);
2227 :
2228 0 : int pci_reassign_bridge_resources(struct pci_dev *bridge, unsigned long type)
2229 : {
2230 : struct pci_dev_resource *dev_res;
2231 : struct pci_dev *next;
2232 0 : LIST_HEAD(saved);
2233 0 : LIST_HEAD(added);
2234 0 : LIST_HEAD(failed);
2235 : unsigned int i;
2236 : int ret;
2237 :
2238 0 : down_read(&pci_bus_sem);
2239 :
2240 : /* Walk to the root hub, releasing bridge BARs when possible */
2241 0 : next = bridge;
2242 : do {
2243 0 : bridge = next;
2244 0 : for (i = PCI_BRIDGE_RESOURCES; i < PCI_BRIDGE_RESOURCE_END;
2245 0 : i++) {
2246 0 : struct resource *res = &bridge->resource[i];
2247 :
2248 0 : if ((res->flags ^ type) & PCI_RES_TYPE_MASK)
2249 0 : continue;
2250 :
2251 : /* Ignore BARs which are still in use */
2252 0 : if (res->child)
2253 0 : continue;
2254 :
2255 0 : ret = add_to_list(&saved, bridge, res, 0, 0);
2256 0 : if (ret)
2257 : goto cleanup;
2258 :
2259 0 : pci_info(bridge, "BAR %d: releasing %pR\n",
2260 : i, res);
2261 :
2262 0 : if (res->parent)
2263 0 : release_resource(res);
2264 0 : res->start = 0;
2265 0 : res->end = 0;
2266 0 : break;
2267 : }
2268 0 : if (i == PCI_BRIDGE_RESOURCE_END)
2269 : break;
2270 :
2271 0 : next = bridge->bus ? bridge->bus->self : NULL;
2272 0 : } while (next);
2273 :
2274 0 : if (list_empty(&saved)) {
2275 0 : up_read(&pci_bus_sem);
2276 0 : return -ENOENT;
2277 : }
2278 :
2279 0 : __pci_bus_size_bridges(bridge->subordinate, &added);
2280 0 : __pci_bridge_assign_resources(bridge, &added, &failed);
2281 0 : BUG_ON(!list_empty(&added));
2282 :
2283 0 : if (!list_empty(&failed)) {
2284 : ret = -ENOSPC;
2285 : goto cleanup;
2286 : }
2287 :
2288 0 : list_for_each_entry(dev_res, &saved, list) {
2289 : /* Skip the bridge we just assigned resources for */
2290 0 : if (bridge == dev_res->dev)
2291 0 : continue;
2292 :
2293 0 : bridge = dev_res->dev;
2294 0 : pci_setup_bridge(bridge->subordinate);
2295 : }
2296 :
2297 0 : free_list(&saved);
2298 0 : up_read(&pci_bus_sem);
2299 0 : return 0;
2300 :
2301 : cleanup:
2302 : /* Restore size and flags */
2303 0 : list_for_each_entry(dev_res, &failed, list) {
2304 0 : struct resource *res = dev_res->res;
2305 :
2306 0 : res->start = dev_res->start;
2307 0 : res->end = dev_res->end;
2308 0 : res->flags = dev_res->flags;
2309 : }
2310 0 : free_list(&failed);
2311 :
2312 : /* Revert to the old configuration */
2313 0 : list_for_each_entry(dev_res, &saved, list) {
2314 0 : struct resource *res = dev_res->res;
2315 :
2316 0 : bridge = dev_res->dev;
2317 0 : i = res - bridge->resource;
2318 :
2319 0 : res->start = dev_res->start;
2320 0 : res->end = dev_res->end;
2321 0 : res->flags = dev_res->flags;
2322 :
2323 0 : pci_claim_resource(bridge, i);
2324 0 : pci_setup_bridge(bridge->subordinate);
2325 : }
2326 0 : free_list(&saved);
2327 0 : up_read(&pci_bus_sem);
2328 :
2329 0 : return ret;
2330 : }
2331 :
2332 0 : void pci_assign_unassigned_bus_resources(struct pci_bus *bus)
2333 : {
2334 : struct pci_dev *dev;
2335 : /* List of resources that want additional resources */
2336 0 : LIST_HEAD(add_list);
2337 :
2338 0 : down_read(&pci_bus_sem);
2339 0 : for_each_pci_bridge(dev, bus)
2340 0 : if (pci_has_subordinate(dev))
2341 0 : __pci_bus_size_bridges(dev->subordinate, &add_list);
2342 0 : up_read(&pci_bus_sem);
2343 0 : __pci_bus_assign_resources(bus, &add_list, NULL);
2344 0 : BUG_ON(!list_empty(&add_list));
2345 0 : }
2346 : EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources);
|