Verified Commit 8355b447 authored by Philippe B.'s avatar Philippe B. 🏂

Shell/DDB: rename DISTRIBUTED_DB -> CACHE

parent 1181587c
(*****************************************************************************)
(* *)
(* Open Source License *)
(* Copyright (c) 2018 Dynamic Ledger Solutions, Inc. <[email protected]> *)
......@@ -23,7 +22,7 @@
(* *)
(*****************************************************************************)
module type DISTRIBUTED_DB = sig
module type CACHE = sig
type t
type key
......@@ -153,7 +152,7 @@ end)
(Scheduler : SCHEDULER_EVENTS with type key := Hash.t)
(Precheck : PRECHECK with type key := Hash.t and type value := Disk_table.value) : sig
include
DISTRIBUTED_DB
CACHE
with type key = Hash.t
and type value = Disk_table.value
and type param = Precheck.param
......@@ -220,9 +219,9 @@ end = struct
(* Missing data key *)
register_error_kind
`Permanent
~id:("distributed_db." ^ Hash.name ^ ".missing")
~id:("cache." ^ Hash.name ^ ".missing")
~title:("Missing " ^ Hash.name)
~description:("Some " ^ Hash.name ^ " is missing from the distributed db")
~description:("Some " ^ Hash.name ^ " is missing from the chache")
~pp:(fun ppf key ->
Format.fprintf ppf "Missing %s %a" Hash.name Hash.pp key)
(Data_encoding.obj1 (Data_encoding.req "key" Hash.encoding))
......@@ -233,7 +232,7 @@ end = struct
`Permanent
~title:("Canceled fetch of a " ^ Hash.name)
~description:("The fetch of a " ^ Hash.name ^ " has been canceled")
~id:("distributed_db." ^ Hash.name ^ ".fetch_canceled")
~id:("cache." ^ Hash.name ^ ".fetch_canceled")
~pp:(fun ppf key ->
Format.fprintf ppf "Fetch of %s %a canceled" Hash.name Hash.pp key)
Data_encoding.(obj1 (req "key" Hash.encoding))
......@@ -244,7 +243,7 @@ end = struct
`Permanent
~title:("Timed out fetch of a " ^ Hash.name)
~description:("The fetch of a " ^ Hash.name ^ " has timed out")
~id:("distributed_db." ^ Hash.name ^ ".fetch_timeout")
~id:("cache." ^ Hash.name ^ ".fetch_timeout")
~pp:(fun ppf key ->
Format.fprintf ppf "Fetch of %s %a timed out" Hash.name Hash.pp key)
Data_encoding.(obj1 (req "key" Hash.encoding))
......@@ -430,7 +429,7 @@ end)
val memory_table_length : t -> int
end = struct
include Internal_event.Legacy_logging.Make_semantic (struct
let name = "node.distributed_db.scheduler." ^ Hash.name
let name = "node.cache.scheduler." ^ Hash.name
end)
type key = Hash.t
......
......@@ -25,7 +25,7 @@
(** Generic cache / request scheduling service.
This module defines a generic key-value cache service [Distributed_db].
This module defines a generic key-value cache service [Cache].
It is parameterized by abstract services [Disk], [Scheduler], [Memory_table]
and [Precheck].
......@@ -36,7 +36,7 @@
The [Scheduler] is also a generic service, parameterized by
the [Memory_table] and [Request] modules. Importantly, the [Memory_table]
must be shared between the [Scheduler] and the [Distributed_db] as it
must be shared between the [Scheduler] and the [Cache] as it
used to store both pending requests and found values.
TODO: this can maybe statically enforced by reviewing the set of exported
......@@ -46,7 +46,7 @@
The cache is "semi"-readthrough. It sends a request via
[Request.send] to query a value to the network, but it is the
responsibility of the client to *notify the cache* with
[Distributed_db.notify] when the requested value is available.
[Cache.notify] when the requested value is available.
Notified values are validated before being inserted in the cache,
using the [Precheck] module. *)
......@@ -139,7 +139,7 @@ end)
include SCHEDULER_EVENTS with type t := t and type key := Hash.t
end
module type DISTRIBUTED_DB = sig
module type CACHE = sig
type t
(** The index key *)
......@@ -274,7 +274,7 @@ end)
(Scheduler : SCHEDULER_EVENTS with type key := Hash.t)
(Precheck : PRECHECK with type key := Hash.t and type value := Disk_table.value) : sig
include
DISTRIBUTED_DB
CACHE
with type key = Hash.t
and type value = Disk_table.value
and type param = Precheck.param
......
......@@ -1083,7 +1083,7 @@ let watch_operation {operation_input; _} =
Lwt_watcher.create_stream operation_input
module Make
(Table : Cache.DISTRIBUTED_DB) (Kind : sig
(Table : Cache.CACHE) (Kind : sig
type t
val proj : t -> Table.t
......@@ -1132,7 +1132,7 @@ module Block_header = struct
let proj chain = chain.block_header_db.table
end) :
Cache.DISTRIBUTED_DB
Cache.CACHE
with type t := chain_db
and type key := Block_hash.t
and type value := Block_header.t
......@@ -1168,7 +1168,7 @@ module Operation = struct
let proj chain = chain.operation_db.table
end) :
Cache.DISTRIBUTED_DB
Cache.CACHE
with type t := chain_db
and type key := Operation_hash.t
and type value := Operation.t
......@@ -1186,7 +1186,7 @@ module Protocol = struct
let proj db = db.protocol_db.table
end) :
Cache.DISTRIBUTED_DB
Cache.CACHE
with type t := db
and type key := Protocol_hash.t
and type value := Protocol.t
......
......@@ -124,7 +124,7 @@ module Block_header : sig
type t = Block_header.t (* avoid shadowing. *)
include
DISTRIBUTED_DB
CACHE
with type t := chain_db
and type key := Block_hash.t
and type value := Block_header.t
......@@ -137,7 +137,7 @@ val read_block_header :
(** Index of all the operations of a given block (per validation pass). *)
module Operations :
DISTRIBUTED_DB
CACHE
with type t := chain_db
and type key = Block_hash.t * int
and type value = Operation.t list
......@@ -146,7 +146,7 @@ module Operations :
(** Index of all the hashes of operations of a given block (per
validation pass). *)
module Operation_hashes :
DISTRIBUTED_DB
CACHE
with type t := chain_db
and type key = Block_hash.t * int
and type value = Operation_hash.t list
......@@ -183,7 +183,7 @@ module Operation : sig
type t = Operation.t (* avoid shadowing. *)
include
DISTRIBUTED_DB
CACHE
with type t := chain_db
and type key := Operation_hash.t
and type value := Operation.t
......@@ -205,7 +205,7 @@ module Protocol : sig
type t = Protocol.t (* avoid shadowing. *)
include
DISTRIBUTED_DB
CACHE
with type t := db
and type key := Protocol_hash.t
and type value := Protocol.t
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment