Commit 0405a5cec3406f19e69da07c8111a6bf1088ac29

Authored by Rob Herring
Committed by Tony Luck
1 parent 7741892164

pstore/ram: avoid atomic accesses for ioremapped regions

For persistent RAM outside of main memory, the memory may have limitations
on supported accesses. For internal RAM on highbank platform exclusive
accesses are not supported and will hang the system. So atomic_cmpxchg
cannot be used. This commit uses spinlock protection for buffer size and
start updates on ioremapped regions instead.

Signed-off-by: Rob Herring <rob.herring@calxeda.com>
Acked-by: Anton Vorontsov <anton@enomsg.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>

Showing 1 changed file with 52 additions and 2 deletions Side-by-side Diff

fs/pstore/ram_core.c
... ... @@ -46,7 +46,7 @@
46 46 }
47 47  
48 48 /* increase and wrap the start pointer, returning the old value */
49   -static inline size_t buffer_start_add(struct persistent_ram_zone *prz, size_t a)
  49 +static size_t buffer_start_add_atomic(struct persistent_ram_zone *prz, size_t a)
50 50 {
51 51 int old;
52 52 int new;
... ... @@ -62,7 +62,7 @@
62 62 }
63 63  
64 64 /* increase the size counter until it hits the max size */
65   -static inline void buffer_size_add(struct persistent_ram_zone *prz, size_t a)
  65 +static void buffer_size_add_atomic(struct persistent_ram_zone *prz, size_t a)
66 66 {
67 67 size_t old;
68 68 size_t new;
... ... @@ -78,6 +78,53 @@
78 78 } while (atomic_cmpxchg(&prz->buffer->size, old, new) != old);
79 79 }
80 80  
  81 +static DEFINE_RAW_SPINLOCK(buffer_lock);
  82 +
  83 +/* increase and wrap the start pointer, returning the old value */
  84 +static size_t buffer_start_add_locked(struct persistent_ram_zone *prz, size_t a)
  85 +{
  86 + int old;
  87 + int new;
  88 + unsigned long flags;
  89 +
  90 + raw_spin_lock_irqsave(&buffer_lock, flags);
  91 +
  92 + old = atomic_read(&prz->buffer->start);
  93 + new = old + a;
  94 + while (unlikely(new > prz->buffer_size))
  95 + new -= prz->buffer_size;
  96 + atomic_set(&prz->buffer->start, new);
  97 +
  98 + raw_spin_unlock_irqrestore(&buffer_lock, flags);
  99 +
  100 + return old;
  101 +}
  102 +
  103 +/* increase the size counter until it hits the max size */
  104 +static void buffer_size_add_locked(struct persistent_ram_zone *prz, size_t a)
  105 +{
  106 + size_t old;
  107 + size_t new;
  108 + unsigned long flags;
  109 +
  110 + raw_spin_lock_irqsave(&buffer_lock, flags);
  111 +
  112 + old = atomic_read(&prz->buffer->size);
  113 + if (old == prz->buffer_size)
  114 + goto exit;
  115 +
  116 + new = old + a;
  117 + if (new > prz->buffer_size)
  118 + new = prz->buffer_size;
  119 + atomic_set(&prz->buffer->size, new);
  120 +
  121 +exit:
  122 + raw_spin_unlock_irqrestore(&buffer_lock, flags);
  123 +}
  124 +
  125 +static size_t (*buffer_start_add)(struct persistent_ram_zone *, size_t) = buffer_start_add_atomic;
  126 +static void (*buffer_size_add)(struct persistent_ram_zone *, size_t) = buffer_size_add_atomic;
  127 +
81 128 static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
82 129 uint8_t *data, size_t len, uint8_t *ecc)
83 130 {
... ... @@ -371,6 +418,9 @@
371 418 (unsigned long long)size, (unsigned long long)start);
372 419 return NULL;
373 420 }
  421 +
  422 + buffer_start_add = buffer_start_add_locked;
  423 + buffer_size_add = buffer_size_add_locked;
374 424  
375 425 return ioremap(start, size);
376 426 }