Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
Menu
Open sidebar
Iris
RefinedC
Commits
119d3306
Commit
119d3306
authored
Sep 27, 2021
by
Paul
Browse files
make milbeaut_xdmac.c compile
parent
349e734e
Pipeline
#54226
passed with stage
in 15 minutes and 52 seconds
Changes
3
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
_CoqProject
View file @
119d3306
...
...
@@ -56,3 +56,5 @@
-Q _build/default/examples/proofs/pointers refinedc.examples.pointers
-Q _build/default/linux/casestudies/proofs/pgtable refinedc.linux.casestudies.pgtable
-Q _build/default/tutorial/proofs/lithium refinedc.tutorial.lithium
-Q _build/default/linux/casestudies/proofs/bits refinedc.linux.casestudies.bits
-Q _build/default/linux/casestudies/proofs/milbeaut_xdmac refinedc.linux.casestudies.milbeaut_xdmac
linux/casestudies/bits.h
View file @
119d3306
...
...
@@ -5,6 +5,8 @@
typedef
uint64_t
u64
;
typedef
uint32_t
u32
;
typedef
uint16_t
u16
;
typedef
uint8_t
u8
;
#define BITS_PER_LONG (sizeof(long) * 8)
...
...
linux/casestudies/milbeaut_xdmac.c
0 → 100644
View file @
119d3306
// SPDX-License-Identifier: GPL-2.0
//
// Copyright (C) 2019 Linaro Ltd.
// Copyright (C) 2019 Socionext Inc.
#include <stddef.h>
#include "bits.h"
// include/linux/types.h
/*
* A dma_addr_t can hold any valid DMA address, i.e., any address returned
* by the DMA API.
*
* If the DMA API only uses 32-bit addresses, dma_addr_t need only be 32
* bits wide. Bus addresses, e.g., PCI BARs, may be wider than 32 bits,
* but drivers do memory-mapped I/O to ioremapped kernel virtual addresses,
* so they don't care about the size of the actual bus addresses.
*/
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
typedef
u64
dma_addr_t
;
#else
typedef
u32
dma_addr_t
;
#endif
// include/linux/irqreturn.h
/**
* enum irqreturn
* @IRQ_NONE interrupt was not from this device or was not handled
* @IRQ_HANDLED interrupt was handled by this device
* @IRQ_WAKE_THREAD handler requests to wake the handler thread
*/
enum
irqreturn
{
IRQ_NONE
=
(
0
<<
0
),
IRQ_HANDLED
=
(
1
<<
0
),
IRQ_WAKE_THREAD
=
(
1
<<
1
),
};
typedef
enum
irqreturn
irqreturn_t
;
u32
readl
(
const
volatile
void
/* __iomem */
*
addr
);
void
writel
(
u32
b
,
volatile
void
/* __iomem */
*
addr
);
u32
readl_relaxed
(
const
volatile
void
/* __iomem */
*
addr
);
void
writel_relaxed
(
u32
value
,
volatile
void
/* __iomem */
*
addr
);
// include/linux/dmaengine.h
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
* @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
* @name: backlink name for sysfs
* @dbg_client_name: slave name for debugfs in format:
* dev_name(requester's dev):channel name, for example: "2b00000.mcasp:tx"
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
* @table_count: number of appearances in the mem-to-mem allocation table
* @router: pointer to the DMA router structure
* @route_data: channel specific data for the router
* @private: private data for certain client-channel associations
*/
struct
dma_chan
{
// struct dma_device *device;
// struct device *slave;
// dma_cookie_t cookie;
// dma_cookie_t completed_cookie;
/* sysfs */
int
chan_id
;
// struct dma_chan_dev *dev;
// const char *name;
// #ifdef CONFIG_DEBUG_FS
// char *dbg_client_name;
// #endif
// struct list_head device_node;
// struct dma_chan_percpu __percpu *local;
// int client_count;
// int table_count;
// /* DMA router */
// struct dma_router *router;
// void *route_data;
// void *private;
};
// drivers/dma/virt-dma.h
struct
virt_dma_desc
{
// struct dma_async_tx_descriptor tx;
// struct dmaengine_result tx_result;
// /* protected by vc.lock */
// struct list_head node;
void
*
dummy
;
};
struct
virt_dma_chan
{
// struct dma_chan chan;
// struct tasklet_struct task;
// void (*desc_free)(struct virt_dma_desc *);
// spinlock_t lock;
// /* protected by vc.lock */
// struct list_head desc_allocated;
// struct list_head desc_submitted;
// struct list_head desc_issued;
// struct list_head desc_completed;
// struct list_head desc_terminated;
// struct virt_dma_desc *cyclic;
void
*
dummy
;
};
struct
virt_dma_chan
*
to_virt_chan
(
struct
dma_chan
*
chan
);
/**
* vchan_cookie_complete - report completion of a descriptor
* @vd: virtual descriptor to update
*
* vc.lock must be held by caller
*/
void
vchan_cookie_complete
(
struct
virt_dma_desc
*
vd
);
/**
* vchan_terminate_vdesc - Disable pending cyclic callback
* @vd: virtual descriptor to be terminated
*
* vc.lock must be held by caller
*/
void
vchan_terminate_vdesc
(
struct
virt_dma_desc
*
vd
);
/* global register */
#define M10V_XDACS 0x00
/* channel local register */
#define M10V_XDTBC 0x10
#define M10V_XDSSA 0x14
#define M10V_XDDSA 0x18
#define M10V_XDSAC 0x1C
#define M10V_XDDAC 0x20
#define M10V_XDDCC 0x24
#define M10V_XDDES 0x28
#define M10V_XDDPC 0x2C
#define M10V_XDDSD 0x30
#define M10V_XDACS_XE BIT(28)
#define M10V_DEFBS 0x3
#define M10V_DEFBL 0xf
#define M10V_XDSAC_SBS GENMASK(17, 16)
#define M10V_XDSAC_SBL GENMASK(11, 8)
#define M10V_XDDAC_DBS GENMASK(17, 16)
#define M10V_XDDAC_DBL GENMASK(11, 8)
#define M10V_XDDES_CE BIT(28)
#define M10V_XDDES_SE BIT(24)
#define M10V_XDDES_SA BIT(15)
#define M10V_XDDES_TF GENMASK(23, 20)
#define M10V_XDDES_EI BIT(1)
#define M10V_XDDES_TI BIT(0)
#define M10V_XDDSD_IS_MASK GENMASK(3, 0)
#define M10V_XDDSD_IS_NORMAL 0x8
#define MLB_XDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
struct
milbeaut_xdmac_desc
{
struct
virt_dma_desc
vd
;
size_t
len
;
dma_addr_t
src
;
dma_addr_t
dst
;
};
struct
milbeaut_xdmac_chan
{
struct
virt_dma_chan
vc
;
struct
milbeaut_xdmac_desc
*
md
;
int
/* void __iomem */
*
reg_ch_base
;
};
struct
milbeaut_xdmac_device
{
// struct dma_device ddev;
int
/* void __iomem */
*
reg_base
;
// struct milbeaut_xdmac_chan channels[];
};
static
struct
milbeaut_xdmac_chan
*
to_milbeaut_xdmac_chan
(
struct
virt_dma_chan
*
vc
);
// {
// return container_of(vc, struct milbeaut_xdmac_chan, vc);
// }
static
struct
milbeaut_xdmac_desc
*
to_milbeaut_xdmac_desc
(
struct
virt_dma_desc
*
vd
);
// {
// return container_of(vd, struct milbeaut_xdmac_desc, vd);
// }
/* mc->vc.lock must be held by caller */
static
struct
milbeaut_xdmac_desc
*
milbeaut_xdmac_next_desc
(
struct
milbeaut_xdmac_chan
*
mc
);
// {
// struct virt_dma_desc *vd;
// vd = vchan_next_desc(&mc->vc);
// if (!vd) {
// mc->md = NULL;
// return NULL;
// }
// list_del(&vd->node);
// mc->md = to_milbeaut_xdmac_desc(vd);
// return mc->md;
// }
/* mc->vc.lock must be held by caller */
static
void
milbeaut_chan_start
(
struct
milbeaut_xdmac_chan
*
mc
,
struct
milbeaut_xdmac_desc
*
md
)
{
u32
val
;
/* Setup the channel */
val
=
md
->
len
-
1
;
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDTBC
);
val
=
md
->
src
;
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDSSA
);
val
=
md
->
dst
;
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDDSA
);
val
=
readl_relaxed
(
mc
->
reg_ch_base
+
M10V_XDSAC
);
val
&=
~
(
M10V_XDSAC_SBS
|
M10V_XDSAC_SBL
);
val
|=
FIELD_PREP
(
M10V_XDSAC_SBS
,
M10V_DEFBS
)
|
FIELD_PREP
(
M10V_XDSAC_SBL
,
M10V_DEFBL
);
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDSAC
);
val
=
readl_relaxed
(
mc
->
reg_ch_base
+
M10V_XDDAC
);
val
&=
~
(
M10V_XDDAC_DBS
|
M10V_XDDAC_DBL
);
val
|=
FIELD_PREP
(
M10V_XDDAC_DBS
,
M10V_DEFBS
)
|
FIELD_PREP
(
M10V_XDDAC_DBL
,
M10V_DEFBL
);
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDDAC
);
/* Start the channel */
val
=
readl_relaxed
(
mc
->
reg_ch_base
+
M10V_XDDES
);
val
&=
~
(
M10V_XDDES_CE
|
M10V_XDDES_SE
|
M10V_XDDES_TF
|
M10V_XDDES_EI
|
M10V_XDDES_TI
);
val
|=
FIELD_PREP
(
M10V_XDDES_CE
,
1
)
|
FIELD_PREP
(
M10V_XDDES_SE
,
1
)
|
FIELD_PREP
(
M10V_XDDES_TF
,
1
)
|
FIELD_PREP
(
M10V_XDDES_EI
,
1
)
|
FIELD_PREP
(
M10V_XDDES_TI
,
1
);
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDDES
);
}
/* mc->vc.lock must be held by caller */
static
void
milbeaut_xdmac_start
(
struct
milbeaut_xdmac_chan
*
mc
)
{
struct
milbeaut_xdmac_desc
*
md
;
md
=
milbeaut_xdmac_next_desc
(
mc
);
if
(
md
)
milbeaut_chan_start
(
mc
,
md
);
}
static
irqreturn_t
milbeaut_xdmac_interrupt
(
int
irq
,
void
*
dev_id
)
{
struct
milbeaut_xdmac_chan
*
mc
=
dev_id
;
struct
milbeaut_xdmac_desc
*
md
;
u32
val
;
// spin_lock(&mc->vc.lock);
/* Ack and Stop */
val
=
FIELD_PREP
(
M10V_XDDSD_IS_MASK
,
0x0
);
writel_relaxed
(
val
,
mc
->
reg_ch_base
+
M10V_XDDSD
);
md
=
mc
->
md
;
if
(
!
md
)
goto
out
;
vchan_cookie_complete
(
&
md
->
vd
);
milbeaut_xdmac_start
(
mc
);
out:
// spin_unlock(&mc->vc.lock);
return
IRQ_HANDLED
;
}
static
int
milbeaut_xdmac_terminate_all
(
struct
dma_chan
*
chan
)
{
struct
virt_dma_chan
*
vc
=
to_virt_chan
(
chan
);
struct
milbeaut_xdmac_chan
*
mc
=
to_milbeaut_xdmac_chan
(
vc
);
unsigned
long
flags
;
u32
val
;
// LIST_HEAD(head);
// spin_lock_irqsave(&vc->lock, flags);
/* Halt the channel */
val
=
readl
(
mc
->
reg_ch_base
+
M10V_XDDES
);
val
&=
~
M10V_XDDES_CE
;
val
|=
FIELD_PREP
(
M10V_XDDES_CE
,
0
);
writel
(
val
,
mc
->
reg_ch_base
+
M10V_XDDES
);
if
(
mc
->
md
)
{
vchan_terminate_vdesc
(
&
mc
->
md
->
vd
);
mc
->
md
=
NULL
;
}
// vchan_get_all_descriptors(vc, &head);
// spin_unlock_irqrestore(&vc->lock, flags);
// vchan_dma_desc_free_list(vc, &head);
return
0
;
}
static
void
enable_xdmac
(
struct
milbeaut_xdmac_device
*
mdev
)
{
unsigned
int
val
;
val
=
readl
(
mdev
->
reg_base
+
M10V_XDACS
);
val
|=
M10V_XDACS_XE
;
writel
(
val
,
mdev
->
reg_base
+
M10V_XDACS
);
}
static
void
disable_xdmac
(
struct
milbeaut_xdmac_device
*
mdev
)
{
unsigned
int
val
;
val
=
readl
(
mdev
->
reg_base
+
M10V_XDACS
);
val
&=
~
M10V_XDACS_XE
;
writel
(
val
,
mdev
->
reg_base
+
M10V_XDACS
);
}
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment