请求
1). 初始化互斥体 -- mutex_init();
2). 获得互斥体 -- mutex_lock();
3). 释放互斥体 -- mutex_unlock();
1.mutex_init(), 注意mutex使用之前都需要先init
void
__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
{
atomic_long_set(&lock->owner, 0);
spin_lock_init(&lock->wait_lock);
INIT_LIST_HEAD(&lock->wait_list);
#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
osq_lock_init(&lock->osq);
#endif
debug_mutex_init(lock, name, key);
}
EXPORT_SYMBOL(__mutex_init);
/**
* mutex_init - initialize the mutex
* @mutex: the mutex to be initialized
*
* Initialize the mutex to unlocked state.
*
* It is not allowed to initialize an already locked mutex.
*/
#define mutex_init(mutex) \
do { \
static struct lock_class_key __key; \
\
__mutex_init((mutex), #mutex, &__key); \
} while (0)
2.mutex_lock(), 注意看注释说明,
a). 如果mutex已经被其他task获取,那么目前的task先sleep直到获取;
b). mutex不能被嵌套获取;上一个task释放mutex之后,才能被其他的task获取;
c). mutex要先被初始化才能使用;mutex正在使用过程中,其内存不能被释放;
//------kernel4.4------
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
* Lock the mutex exclusively for this task. If the mutex is not
* available right now, it will sleep until it can get it.
*
* The mutex must later on be released by the same task that
* acquired it. Recursive locking is not allowed. The task
* may not exit without first unlocking the mutex. Also, kernel
* memory where the mutex resides must not be freed with
* the mutex still locked. The mutex must first be initialized
* (or statically defined) before it can be locked. memset()-ing
* the mutex to 0 is not allowed.
*
* ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
* checks that will enforce the restrictions and will also do
* deadlock debugging. )
*
* This function is similar to (but not equivalent to) down().
*/
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
/*
* The locking fastpath is the 1->0 transition from
* 'unlocked' into 'locked' state.
*/
__mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
mutex_set_owner(lock);
}
//======kernel4.14======
/**
* mutex_lock - acquire the mutex
* @lock: the mutex to be acquired
*
* Lock the mutex exclusively for this task. If the mutex is not
* available right now, it will sleep until it can get it.
*
* The mutex must later on be released by the same task that
* acquired it. Recursive locking is not allowed. The task
* may not exit without first unlocking the mutex. Also, kernel
* memory where the mutex resides must not be freed with
* the mutex still locked. The mutex must first be initialized
* (or statically defined) before it can be locked. memset()-ing
* the mutex to 0 is not allowed.
*
* (The CONFIG_DEBUG_MUTEXES .config option turns on debugging
* checks that will enforce the restrictions and will also do
* deadlock debugging)
*
* This function is similar to (but not equivalent to) down().
*/
void __sched mutex_lock(struct mutex *lock)
{
might_sleep();
if (!__mutex_trylock_fast(lock))
__mutex_lock_slowpath(lock);
}
EXPORT_SYMBOL(mutex_lock);
3. mutex_unlock(), 释放互斥体
a). 释放之前获得的mutex;
b). mutex只有被获得,才能调用这个函数来释放;换言之,如果没有获得,就没有必要做释放的动作;
//======kernel 4.4======
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
* Unlock a mutex that has been locked by this task previously.
*
* This function must not be used in interrupt context. Unlocking
* of a not locked mutex is not allowed.
*
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
{
/*
* The unlocking fastpath is the 0->1 transition from 'locked'
* into 'unlocked' state:
*/
#ifndef CONFIG_DEBUG_MUTEXES
/*
* When debugging is enabled we must not clear the owner before time,
* the slow path will always be taken, and that clears the owner field
* after verifying that it was indeed current.
*/
mutex_clear_owner(lock);
#endif
__mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
}
EXPORT_SYMBOL(mutex_unlock);
//=======kernel 4.14======
/**
* mutex_unlock - release the mutex
* @lock: the mutex to be released
*
* Unlock a mutex that has been locked by this task previously.
*
* This function must not be used in interrupt context. Unlocking
* of a not locked mutex is not allowed.
*
* This function is similar to (but not equivalent to) up().
*/
void __sched mutex_unlock(struct mutex *lock)
{
#ifndef CONFIG_DEBUG_LOCK_ALLOC
if (__mutex_unlock_fast(lock))
return;
#endif
__mutex_unlock_slowpath(lock, _RET_IP_);
}
EXPORT_SYMBOL(mutex_unlock);
如果lock只在interrupt handler里使用,则没有必要加。中断handler本身就是不可重入的,不存在竞争。
static irqreturn_t xxxx_typec_interrupt(int irq, void *data)
{
struct xxxx_typec *sc = data;
u32 event;
int ret;
//mutex_lock(&sc->lock);
ret = regmap_read(sc->regmap, XXXX_INT_MASK, &event);
if (ret)
goto unlock;
event &= XXXX_EVENT_MASK;
ret = regmap_read(sc->regmap, XXXX_STATUS, &sc->state);
if (ret)
goto clear_ints;
sc->state &= XXXX_STATE_MASK;
if (event & XXXX_ATTACH_INT) {
ret = XXxx_typec_connect(sc, sc->state);
if (ret)
dev_warn(sc->dev, "failed to register partner\n");
} else if (event & XXXX_DETACH_INT) {
xxxx_typec_disconnect(sc, sc->state);
}
clear_ints:
regmap_write(sc->regmap, XXXX_INT_CLR, event);
unlock:
//mutex_unlock(&sc->lock);
dev_info(sc->dev, "now works as DRP and is in %d state, event %d\n",
sc->state, event);
return IRQ_HANDLED;
}
static int xxxx_typec_probe(struct platform_device *pdev)
{
struct xxxx_typec *sc;
int ret;
struct device *dev = &pdev->dev;
struct device_node *node = pdev->dev.of_node;
u32 value = 0;
sc = devm_kzalloc(&pdev->dev, sizeof(*sc), GFP_KERNEL);
if (!sc)
return -ENOMEM;
sc->edev = devm_extcon_dev_allocate(&pdev->dev, xxxx_typec_cable);
if (IS_ERR(sc->edev)) {
dev_err(&pdev->dev, "failed to allocate extcon device\n");
return PTR_ERR();
}
ret = devm_extcon_dev_register(&pdev->dev, sc->edev);
if (ret < 0) {
dev_err(&pdev->dev, "can't register extcon device: %d\n", ret);
return ret;
}
//mutex_init(&sc->lock);
sc->dev = &pdev->dev;
sc->irq = platform_get_irq(pdev, 0);
...
ret = devm_request_threaded_irq(sc->dev, sc->irq, NULL,
xxxx_typec_interrupt,
IRQF_EARLY_RESUME | IRQF_ONESHOT,
dev_name(sc->dev), sc);
...
}